xref: /linux/drivers/gpu/drm/i915/i915_debugfs.c (revision 2b64b2ed277ff23e785fbdb65098ee7e1252d64f)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/sort.h>
30 #include <linux/sched/mm.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_fourcc.h>
33 #include "intel_drv.h"
34 #include "intel_guc_submission.h"
35 
36 #include "i915_reset.h"
37 
38 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39 {
40 	return to_i915(node->minor->dev);
41 }
42 
43 static int i915_capabilities(struct seq_file *m, void *data)
44 {
45 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
46 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
47 	struct drm_printer p = drm_seq_file_printer(m);
48 
49 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
50 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
51 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
52 
53 	intel_device_info_dump_flags(info, &p);
54 	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
55 	intel_driver_caps_print(&dev_priv->caps, &p);
56 
57 	kernel_param_lock(THIS_MODULE);
58 	i915_params_dump(&i915_modparams, &p);
59 	kernel_param_unlock(THIS_MODULE);
60 
61 	return 0;
62 }
63 
64 static char get_active_flag(struct drm_i915_gem_object *obj)
65 {
66 	return i915_gem_object_is_active(obj) ? '*' : ' ';
67 }
68 
69 static char get_pin_flag(struct drm_i915_gem_object *obj)
70 {
71 	return obj->pin_global ? 'p' : ' ';
72 }
73 
74 static char get_tiling_flag(struct drm_i915_gem_object *obj)
75 {
76 	switch (i915_gem_object_get_tiling(obj)) {
77 	default:
78 	case I915_TILING_NONE: return ' ';
79 	case I915_TILING_X: return 'X';
80 	case I915_TILING_Y: return 'Y';
81 	}
82 }
83 
84 static char get_global_flag(struct drm_i915_gem_object *obj)
85 {
86 	return obj->userfault_count ? 'g' : ' ';
87 }
88 
89 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
90 {
91 	return obj->mm.mapping ? 'M' : ' ';
92 }
93 
94 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95 {
96 	u64 size = 0;
97 	struct i915_vma *vma;
98 
99 	for_each_ggtt_vma(vma, obj) {
100 		if (drm_mm_node_allocated(&vma->node))
101 			size += vma->node.size;
102 	}
103 
104 	return size;
105 }
106 
107 static const char *
108 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109 {
110 	size_t x = 0;
111 
112 	switch (page_sizes) {
113 	case 0:
114 		return "";
115 	case I915_GTT_PAGE_SIZE_4K:
116 		return "4K";
117 	case I915_GTT_PAGE_SIZE_64K:
118 		return "64K";
119 	case I915_GTT_PAGE_SIZE_2M:
120 		return "2M";
121 	default:
122 		if (!buf)
123 			return "M";
124 
125 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126 			x += snprintf(buf + x, len - x, "2M, ");
127 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128 			x += snprintf(buf + x, len - x, "64K, ");
129 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130 			x += snprintf(buf + x, len - x, "4K, ");
131 		buf[x-2] = '\0';
132 
133 		return buf;
134 	}
135 }
136 
137 static void
138 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 {
140 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
141 	struct intel_engine_cs *engine;
142 	struct i915_vma *vma;
143 	unsigned int frontbuffer_bits;
144 	int pin_count = 0;
145 
146 	lockdep_assert_held(&obj->base.dev->struct_mutex);
147 
148 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
149 		   &obj->base,
150 		   get_active_flag(obj),
151 		   get_pin_flag(obj),
152 		   get_tiling_flag(obj),
153 		   get_global_flag(obj),
154 		   get_pin_mapped_flag(obj),
155 		   obj->base.size / 1024,
156 		   obj->read_domains,
157 		   obj->write_domain,
158 		   i915_cache_level_str(dev_priv, obj->cache_level),
159 		   obj->mm.dirty ? " dirty" : "",
160 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
161 	if (obj->base.name)
162 		seq_printf(m, " (name: %d)", obj->base.name);
163 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
164 		if (i915_vma_is_pinned(vma))
165 			pin_count++;
166 	}
167 	seq_printf(m, " (pinned x %d)", pin_count);
168 	if (obj->pin_global)
169 		seq_printf(m, " (global)");
170 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
171 		if (!drm_mm_node_allocated(&vma->node))
172 			continue;
173 
174 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
175 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
176 			   vma->node.start, vma->node.size,
177 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
178 		if (i915_vma_is_ggtt(vma)) {
179 			switch (vma->ggtt_view.type) {
180 			case I915_GGTT_VIEW_NORMAL:
181 				seq_puts(m, ", normal");
182 				break;
183 
184 			case I915_GGTT_VIEW_PARTIAL:
185 				seq_printf(m, ", partial [%08llx+%x]",
186 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
187 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
188 				break;
189 
190 			case I915_GGTT_VIEW_ROTATED:
191 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192 					   vma->ggtt_view.rotated.plane[0].width,
193 					   vma->ggtt_view.rotated.plane[0].height,
194 					   vma->ggtt_view.rotated.plane[0].stride,
195 					   vma->ggtt_view.rotated.plane[0].offset,
196 					   vma->ggtt_view.rotated.plane[1].width,
197 					   vma->ggtt_view.rotated.plane[1].height,
198 					   vma->ggtt_view.rotated.plane[1].stride,
199 					   vma->ggtt_view.rotated.plane[1].offset);
200 				break;
201 
202 			default:
203 				MISSING_CASE(vma->ggtt_view.type);
204 				break;
205 			}
206 		}
207 		if (vma->fence)
208 			seq_printf(m, " , fence: %d%s",
209 				   vma->fence->id,
210 				   i915_active_request_isset(&vma->last_fence) ? "*" : "");
211 		seq_puts(m, ")");
212 	}
213 	if (obj->stolen)
214 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
215 
216 	engine = i915_gem_object_last_write_engine(obj);
217 	if (engine)
218 		seq_printf(m, " (%s)", engine->name);
219 
220 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221 	if (frontbuffer_bits)
222 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
223 }
224 
225 static int obj_rank_by_stolen(const void *A, const void *B)
226 {
227 	const struct drm_i915_gem_object *a =
228 		*(const struct drm_i915_gem_object **)A;
229 	const struct drm_i915_gem_object *b =
230 		*(const struct drm_i915_gem_object **)B;
231 
232 	if (a->stolen->start < b->stolen->start)
233 		return -1;
234 	if (a->stolen->start > b->stolen->start)
235 		return 1;
236 	return 0;
237 }
238 
239 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240 {
241 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
242 	struct drm_device *dev = &dev_priv->drm;
243 	struct drm_i915_gem_object **objects;
244 	struct drm_i915_gem_object *obj;
245 	u64 total_obj_size, total_gtt_size;
246 	unsigned long total, count, n;
247 	int ret;
248 
249 	total = READ_ONCE(dev_priv->mm.object_count);
250 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
251 	if (!objects)
252 		return -ENOMEM;
253 
254 	ret = mutex_lock_interruptible(&dev->struct_mutex);
255 	if (ret)
256 		goto out;
257 
258 	total_obj_size = total_gtt_size = count = 0;
259 
260 	spin_lock(&dev_priv->mm.obj_lock);
261 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
262 		if (count == total)
263 			break;
264 
265 		if (obj->stolen == NULL)
266 			continue;
267 
268 		objects[count++] = obj;
269 		total_obj_size += obj->base.size;
270 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
271 
272 	}
273 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
274 		if (count == total)
275 			break;
276 
277 		if (obj->stolen == NULL)
278 			continue;
279 
280 		objects[count++] = obj;
281 		total_obj_size += obj->base.size;
282 	}
283 	spin_unlock(&dev_priv->mm.obj_lock);
284 
285 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286 
287 	seq_puts(m, "Stolen:\n");
288 	for (n = 0; n < count; n++) {
289 		seq_puts(m, "   ");
290 		describe_obj(m, objects[n]);
291 		seq_putc(m, '\n');
292 	}
293 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
294 		   count, total_obj_size, total_gtt_size);
295 
296 	mutex_unlock(&dev->struct_mutex);
297 out:
298 	kvfree(objects);
299 	return ret;
300 }
301 
302 struct file_stats {
303 	struct i915_address_space *vm;
304 	unsigned long count;
305 	u64 total, unbound;
306 	u64 global, shared;
307 	u64 active, inactive;
308 	u64 closed;
309 };
310 
311 static int per_file_stats(int id, void *ptr, void *data)
312 {
313 	struct drm_i915_gem_object *obj = ptr;
314 	struct file_stats *stats = data;
315 	struct i915_vma *vma;
316 
317 	lockdep_assert_held(&obj->base.dev->struct_mutex);
318 
319 	stats->count++;
320 	stats->total += obj->base.size;
321 	if (!obj->bind_count)
322 		stats->unbound += obj->base.size;
323 	if (obj->base.name || obj->base.dma_buf)
324 		stats->shared += obj->base.size;
325 
326 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
327 		if (!drm_mm_node_allocated(&vma->node))
328 			continue;
329 
330 		if (i915_vma_is_ggtt(vma)) {
331 			stats->global += vma->node.size;
332 		} else {
333 			if (vma->vm != stats->vm)
334 				continue;
335 		}
336 
337 		if (i915_vma_is_active(vma))
338 			stats->active += vma->node.size;
339 		else
340 			stats->inactive += vma->node.size;
341 
342 		if (i915_vma_is_closed(vma))
343 			stats->closed += vma->node.size;
344 	}
345 
346 	return 0;
347 }
348 
349 #define print_file_stats(m, name, stats) do { \
350 	if (stats.count) \
351 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
352 			   name, \
353 			   stats.count, \
354 			   stats.total, \
355 			   stats.active, \
356 			   stats.inactive, \
357 			   stats.global, \
358 			   stats.shared, \
359 			   stats.unbound, \
360 			   stats.closed); \
361 } while (0)
362 
363 static void print_batch_pool_stats(struct seq_file *m,
364 				   struct drm_i915_private *dev_priv)
365 {
366 	struct drm_i915_gem_object *obj;
367 	struct intel_engine_cs *engine;
368 	struct file_stats stats = {};
369 	enum intel_engine_id id;
370 	int j;
371 
372 	for_each_engine(engine, dev_priv, id) {
373 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
374 			list_for_each_entry(obj,
375 					    &engine->batch_pool.cache_list[j],
376 					    batch_pool_link)
377 				per_file_stats(0, obj, &stats);
378 		}
379 	}
380 
381 	print_file_stats(m, "[k]batch pool", stats);
382 }
383 
384 static void print_context_stats(struct seq_file *m,
385 				struct drm_i915_private *i915)
386 {
387 	struct file_stats kstats = {};
388 	struct i915_gem_context *ctx;
389 
390 	list_for_each_entry(ctx, &i915->contexts.list, link) {
391 		struct intel_engine_cs *engine;
392 		enum intel_engine_id id;
393 
394 		for_each_engine(engine, i915, id) {
395 			struct intel_context *ce = to_intel_context(ctx, engine);
396 
397 			if (ce->state)
398 				per_file_stats(0, ce->state->obj, &kstats);
399 			if (ce->ring)
400 				per_file_stats(0, ce->ring->vma->obj, &kstats);
401 		}
402 
403 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
404 			struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
405 			struct drm_file *file = ctx->file_priv->file;
406 			struct task_struct *task;
407 			char name[80];
408 
409 			spin_lock(&file->table_lock);
410 			idr_for_each(&file->object_idr, per_file_stats, &stats);
411 			spin_unlock(&file->table_lock);
412 
413 			rcu_read_lock();
414 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
415 			snprintf(name, sizeof(name), "%s/%d",
416 				 task ? task->comm : "<unknown>",
417 				 ctx->user_handle);
418 			rcu_read_unlock();
419 
420 			print_file_stats(m, name, stats);
421 		}
422 	}
423 
424 	print_file_stats(m, "[k]contexts", kstats);
425 }
426 
427 static int i915_gem_object_info(struct seq_file *m, void *data)
428 {
429 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
430 	struct drm_device *dev = &dev_priv->drm;
431 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
432 	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
433 	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
434 	struct drm_i915_gem_object *obj;
435 	unsigned int page_sizes = 0;
436 	char buf[80];
437 	int ret;
438 
439 	seq_printf(m, "%u objects, %llu bytes\n",
440 		   dev_priv->mm.object_count,
441 		   dev_priv->mm.object_memory);
442 
443 	size = count = 0;
444 	mapped_size = mapped_count = 0;
445 	purgeable_size = purgeable_count = 0;
446 	huge_size = huge_count = 0;
447 
448 	spin_lock(&dev_priv->mm.obj_lock);
449 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
450 		size += obj->base.size;
451 		++count;
452 
453 		if (obj->mm.madv == I915_MADV_DONTNEED) {
454 			purgeable_size += obj->base.size;
455 			++purgeable_count;
456 		}
457 
458 		if (obj->mm.mapping) {
459 			mapped_count++;
460 			mapped_size += obj->base.size;
461 		}
462 
463 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
464 			huge_count++;
465 			huge_size += obj->base.size;
466 			page_sizes |= obj->mm.page_sizes.sg;
467 		}
468 	}
469 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
470 
471 	size = count = dpy_size = dpy_count = 0;
472 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
473 		size += obj->base.size;
474 		++count;
475 
476 		if (obj->pin_global) {
477 			dpy_size += obj->base.size;
478 			++dpy_count;
479 		}
480 
481 		if (obj->mm.madv == I915_MADV_DONTNEED) {
482 			purgeable_size += obj->base.size;
483 			++purgeable_count;
484 		}
485 
486 		if (obj->mm.mapping) {
487 			mapped_count++;
488 			mapped_size += obj->base.size;
489 		}
490 
491 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
492 			huge_count++;
493 			huge_size += obj->base.size;
494 			page_sizes |= obj->mm.page_sizes.sg;
495 		}
496 	}
497 	spin_unlock(&dev_priv->mm.obj_lock);
498 
499 	seq_printf(m, "%u bound objects, %llu bytes\n",
500 		   count, size);
501 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
502 		   purgeable_count, purgeable_size);
503 	seq_printf(m, "%u mapped objects, %llu bytes\n",
504 		   mapped_count, mapped_size);
505 	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
506 		   huge_count,
507 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
508 		   huge_size);
509 	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
510 		   dpy_count, dpy_size);
511 
512 	seq_printf(m, "%llu [%pa] gtt total\n",
513 		   ggtt->vm.total, &ggtt->mappable_end);
514 	seq_printf(m, "Supported page sizes: %s\n",
515 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
516 					buf, sizeof(buf)));
517 
518 	seq_putc(m, '\n');
519 
520 	ret = mutex_lock_interruptible(&dev->struct_mutex);
521 	if (ret)
522 		return ret;
523 
524 	print_batch_pool_stats(m, dev_priv);
525 	print_context_stats(m, dev_priv);
526 	mutex_unlock(&dev->struct_mutex);
527 
528 	return 0;
529 }
530 
531 static int i915_gem_gtt_info(struct seq_file *m, void *data)
532 {
533 	struct drm_info_node *node = m->private;
534 	struct drm_i915_private *dev_priv = node_to_i915(node);
535 	struct drm_device *dev = &dev_priv->drm;
536 	struct drm_i915_gem_object **objects;
537 	struct drm_i915_gem_object *obj;
538 	u64 total_obj_size, total_gtt_size;
539 	unsigned long nobject, n;
540 	int count, ret;
541 
542 	nobject = READ_ONCE(dev_priv->mm.object_count);
543 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
544 	if (!objects)
545 		return -ENOMEM;
546 
547 	ret = mutex_lock_interruptible(&dev->struct_mutex);
548 	if (ret)
549 		return ret;
550 
551 	count = 0;
552 	spin_lock(&dev_priv->mm.obj_lock);
553 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
554 		objects[count++] = obj;
555 		if (count == nobject)
556 			break;
557 	}
558 	spin_unlock(&dev_priv->mm.obj_lock);
559 
560 	total_obj_size = total_gtt_size = 0;
561 	for (n = 0;  n < count; n++) {
562 		obj = objects[n];
563 
564 		seq_puts(m, "   ");
565 		describe_obj(m, obj);
566 		seq_putc(m, '\n');
567 		total_obj_size += obj->base.size;
568 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
569 	}
570 
571 	mutex_unlock(&dev->struct_mutex);
572 
573 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
574 		   count, total_obj_size, total_gtt_size);
575 	kvfree(objects);
576 
577 	return 0;
578 }
579 
580 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
581 {
582 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
583 	struct drm_device *dev = &dev_priv->drm;
584 	struct drm_i915_gem_object *obj;
585 	struct intel_engine_cs *engine;
586 	enum intel_engine_id id;
587 	int total = 0;
588 	int ret, j;
589 
590 	ret = mutex_lock_interruptible(&dev->struct_mutex);
591 	if (ret)
592 		return ret;
593 
594 	for_each_engine(engine, dev_priv, id) {
595 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
596 			int count;
597 
598 			count = 0;
599 			list_for_each_entry(obj,
600 					    &engine->batch_pool.cache_list[j],
601 					    batch_pool_link)
602 				count++;
603 			seq_printf(m, "%s cache[%d]: %d objects\n",
604 				   engine->name, j, count);
605 
606 			list_for_each_entry(obj,
607 					    &engine->batch_pool.cache_list[j],
608 					    batch_pool_link) {
609 				seq_puts(m, "   ");
610 				describe_obj(m, obj);
611 				seq_putc(m, '\n');
612 			}
613 
614 			total += count;
615 		}
616 	}
617 
618 	seq_printf(m, "total: %d\n", total);
619 
620 	mutex_unlock(&dev->struct_mutex);
621 
622 	return 0;
623 }
624 
625 static void gen8_display_interrupt_info(struct seq_file *m)
626 {
627 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
628 	int pipe;
629 
630 	for_each_pipe(dev_priv, pipe) {
631 		enum intel_display_power_domain power_domain;
632 		intel_wakeref_t wakeref;
633 
634 		power_domain = POWER_DOMAIN_PIPE(pipe);
635 		wakeref = intel_display_power_get_if_enabled(dev_priv,
636 							     power_domain);
637 		if (!wakeref) {
638 			seq_printf(m, "Pipe %c power disabled\n",
639 				   pipe_name(pipe));
640 			continue;
641 		}
642 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
643 			   pipe_name(pipe),
644 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
645 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
646 			   pipe_name(pipe),
647 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
648 		seq_printf(m, "Pipe %c IER:\t%08x\n",
649 			   pipe_name(pipe),
650 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
651 
652 		intel_display_power_put(dev_priv, power_domain, wakeref);
653 	}
654 
655 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
656 		   I915_READ(GEN8_DE_PORT_IMR));
657 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
658 		   I915_READ(GEN8_DE_PORT_IIR));
659 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
660 		   I915_READ(GEN8_DE_PORT_IER));
661 
662 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
663 		   I915_READ(GEN8_DE_MISC_IMR));
664 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
665 		   I915_READ(GEN8_DE_MISC_IIR));
666 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
667 		   I915_READ(GEN8_DE_MISC_IER));
668 
669 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
670 		   I915_READ(GEN8_PCU_IMR));
671 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
672 		   I915_READ(GEN8_PCU_IIR));
673 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
674 		   I915_READ(GEN8_PCU_IER));
675 }
676 
677 static int i915_interrupt_info(struct seq_file *m, void *data)
678 {
679 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
680 	struct intel_engine_cs *engine;
681 	enum intel_engine_id id;
682 	intel_wakeref_t wakeref;
683 	int i, pipe;
684 
685 	wakeref = intel_runtime_pm_get(dev_priv);
686 
687 	if (IS_CHERRYVIEW(dev_priv)) {
688 		intel_wakeref_t pref;
689 
690 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
691 			   I915_READ(GEN8_MASTER_IRQ));
692 
693 		seq_printf(m, "Display IER:\t%08x\n",
694 			   I915_READ(VLV_IER));
695 		seq_printf(m, "Display IIR:\t%08x\n",
696 			   I915_READ(VLV_IIR));
697 		seq_printf(m, "Display IIR_RW:\t%08x\n",
698 			   I915_READ(VLV_IIR_RW));
699 		seq_printf(m, "Display IMR:\t%08x\n",
700 			   I915_READ(VLV_IMR));
701 		for_each_pipe(dev_priv, pipe) {
702 			enum intel_display_power_domain power_domain;
703 
704 			power_domain = POWER_DOMAIN_PIPE(pipe);
705 			pref = intel_display_power_get_if_enabled(dev_priv,
706 								  power_domain);
707 			if (!pref) {
708 				seq_printf(m, "Pipe %c power disabled\n",
709 					   pipe_name(pipe));
710 				continue;
711 			}
712 
713 			seq_printf(m, "Pipe %c stat:\t%08x\n",
714 				   pipe_name(pipe),
715 				   I915_READ(PIPESTAT(pipe)));
716 
717 			intel_display_power_put(dev_priv, power_domain, pref);
718 		}
719 
720 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
721 		seq_printf(m, "Port hotplug:\t%08x\n",
722 			   I915_READ(PORT_HOTPLUG_EN));
723 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
724 			   I915_READ(VLV_DPFLIPSTAT));
725 		seq_printf(m, "DPINVGTT:\t%08x\n",
726 			   I915_READ(DPINVGTT));
727 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
728 
729 		for (i = 0; i < 4; i++) {
730 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
731 				   i, I915_READ(GEN8_GT_IMR(i)));
732 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
733 				   i, I915_READ(GEN8_GT_IIR(i)));
734 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
735 				   i, I915_READ(GEN8_GT_IER(i)));
736 		}
737 
738 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
739 			   I915_READ(GEN8_PCU_IMR));
740 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
741 			   I915_READ(GEN8_PCU_IIR));
742 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
743 			   I915_READ(GEN8_PCU_IER));
744 	} else if (INTEL_GEN(dev_priv) >= 11) {
745 		seq_printf(m, "Master Interrupt Control:  %08x\n",
746 			   I915_READ(GEN11_GFX_MSTR_IRQ));
747 
748 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
749 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
750 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
751 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
752 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
753 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
754 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
755 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
756 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
757 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
758 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
759 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
760 
761 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
762 			   I915_READ(GEN11_DISPLAY_INT_CTL));
763 
764 		gen8_display_interrupt_info(m);
765 	} else if (INTEL_GEN(dev_priv) >= 8) {
766 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
767 			   I915_READ(GEN8_MASTER_IRQ));
768 
769 		for (i = 0; i < 4; i++) {
770 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
771 				   i, I915_READ(GEN8_GT_IMR(i)));
772 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
773 				   i, I915_READ(GEN8_GT_IIR(i)));
774 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
775 				   i, I915_READ(GEN8_GT_IER(i)));
776 		}
777 
778 		gen8_display_interrupt_info(m);
779 	} else if (IS_VALLEYVIEW(dev_priv)) {
780 		seq_printf(m, "Display IER:\t%08x\n",
781 			   I915_READ(VLV_IER));
782 		seq_printf(m, "Display IIR:\t%08x\n",
783 			   I915_READ(VLV_IIR));
784 		seq_printf(m, "Display IIR_RW:\t%08x\n",
785 			   I915_READ(VLV_IIR_RW));
786 		seq_printf(m, "Display IMR:\t%08x\n",
787 			   I915_READ(VLV_IMR));
788 		for_each_pipe(dev_priv, pipe) {
789 			enum intel_display_power_domain power_domain;
790 			intel_wakeref_t pref;
791 
792 			power_domain = POWER_DOMAIN_PIPE(pipe);
793 			pref = intel_display_power_get_if_enabled(dev_priv,
794 								  power_domain);
795 			if (!pref) {
796 				seq_printf(m, "Pipe %c power disabled\n",
797 					   pipe_name(pipe));
798 				continue;
799 			}
800 
801 			seq_printf(m, "Pipe %c stat:\t%08x\n",
802 				   pipe_name(pipe),
803 				   I915_READ(PIPESTAT(pipe)));
804 			intel_display_power_put(dev_priv, power_domain, pref);
805 		}
806 
807 		seq_printf(m, "Master IER:\t%08x\n",
808 			   I915_READ(VLV_MASTER_IER));
809 
810 		seq_printf(m, "Render IER:\t%08x\n",
811 			   I915_READ(GTIER));
812 		seq_printf(m, "Render IIR:\t%08x\n",
813 			   I915_READ(GTIIR));
814 		seq_printf(m, "Render IMR:\t%08x\n",
815 			   I915_READ(GTIMR));
816 
817 		seq_printf(m, "PM IER:\t\t%08x\n",
818 			   I915_READ(GEN6_PMIER));
819 		seq_printf(m, "PM IIR:\t\t%08x\n",
820 			   I915_READ(GEN6_PMIIR));
821 		seq_printf(m, "PM IMR:\t\t%08x\n",
822 			   I915_READ(GEN6_PMIMR));
823 
824 		seq_printf(m, "Port hotplug:\t%08x\n",
825 			   I915_READ(PORT_HOTPLUG_EN));
826 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
827 			   I915_READ(VLV_DPFLIPSTAT));
828 		seq_printf(m, "DPINVGTT:\t%08x\n",
829 			   I915_READ(DPINVGTT));
830 
831 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
832 		seq_printf(m, "Interrupt enable:    %08x\n",
833 			   I915_READ(IER));
834 		seq_printf(m, "Interrupt identity:  %08x\n",
835 			   I915_READ(IIR));
836 		seq_printf(m, "Interrupt mask:      %08x\n",
837 			   I915_READ(IMR));
838 		for_each_pipe(dev_priv, pipe)
839 			seq_printf(m, "Pipe %c stat:         %08x\n",
840 				   pipe_name(pipe),
841 				   I915_READ(PIPESTAT(pipe)));
842 	} else {
843 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
844 			   I915_READ(DEIER));
845 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
846 			   I915_READ(DEIIR));
847 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
848 			   I915_READ(DEIMR));
849 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
850 			   I915_READ(SDEIER));
851 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
852 			   I915_READ(SDEIIR));
853 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
854 			   I915_READ(SDEIMR));
855 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
856 			   I915_READ(GTIER));
857 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
858 			   I915_READ(GTIIR));
859 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
860 			   I915_READ(GTIMR));
861 	}
862 
863 	if (INTEL_GEN(dev_priv) >= 11) {
864 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
865 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
866 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
867 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
868 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
869 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
870 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
871 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
872 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
873 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
874 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
875 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
876 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
877 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
878 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
879 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
880 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
881 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
882 
883 	} else if (INTEL_GEN(dev_priv) >= 6) {
884 		for_each_engine(engine, dev_priv, id) {
885 			seq_printf(m,
886 				   "Graphics Interrupt mask (%s):	%08x\n",
887 				   engine->name, I915_READ_IMR(engine));
888 		}
889 	}
890 
891 	intel_runtime_pm_put(dev_priv, wakeref);
892 
893 	return 0;
894 }
895 
896 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
897 {
898 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
899 	struct drm_device *dev = &dev_priv->drm;
900 	int i, ret;
901 
902 	ret = mutex_lock_interruptible(&dev->struct_mutex);
903 	if (ret)
904 		return ret;
905 
906 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
907 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
908 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
909 
910 		seq_printf(m, "Fence %d, pin count = %d, object = ",
911 			   i, dev_priv->fence_regs[i].pin_count);
912 		if (!vma)
913 			seq_puts(m, "unused");
914 		else
915 			describe_obj(m, vma->obj);
916 		seq_putc(m, '\n');
917 	}
918 
919 	mutex_unlock(&dev->struct_mutex);
920 	return 0;
921 }
922 
923 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
924 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
925 			      size_t count, loff_t *pos)
926 {
927 	struct i915_gpu_state *error;
928 	ssize_t ret;
929 	void *buf;
930 
931 	error = file->private_data;
932 	if (!error)
933 		return 0;
934 
935 	/* Bounce buffer required because of kernfs __user API convenience. */
936 	buf = kmalloc(count, GFP_KERNEL);
937 	if (!buf)
938 		return -ENOMEM;
939 
940 	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
941 	if (ret <= 0)
942 		goto out;
943 
944 	if (!copy_to_user(ubuf, buf, ret))
945 		*pos += ret;
946 	else
947 		ret = -EFAULT;
948 
949 out:
950 	kfree(buf);
951 	return ret;
952 }
953 
954 static int gpu_state_release(struct inode *inode, struct file *file)
955 {
956 	i915_gpu_state_put(file->private_data);
957 	return 0;
958 }
959 
960 static int i915_gpu_info_open(struct inode *inode, struct file *file)
961 {
962 	struct drm_i915_private *i915 = inode->i_private;
963 	struct i915_gpu_state *gpu;
964 	intel_wakeref_t wakeref;
965 
966 	gpu = NULL;
967 	with_intel_runtime_pm(i915, wakeref)
968 		gpu = i915_capture_gpu_state(i915);
969 	if (IS_ERR(gpu))
970 		return PTR_ERR(gpu);
971 
972 	file->private_data = gpu;
973 	return 0;
974 }
975 
976 static const struct file_operations i915_gpu_info_fops = {
977 	.owner = THIS_MODULE,
978 	.open = i915_gpu_info_open,
979 	.read = gpu_state_read,
980 	.llseek = default_llseek,
981 	.release = gpu_state_release,
982 };
983 
984 static ssize_t
985 i915_error_state_write(struct file *filp,
986 		       const char __user *ubuf,
987 		       size_t cnt,
988 		       loff_t *ppos)
989 {
990 	struct i915_gpu_state *error = filp->private_data;
991 
992 	if (!error)
993 		return 0;
994 
995 	DRM_DEBUG_DRIVER("Resetting error state\n");
996 	i915_reset_error_state(error->i915);
997 
998 	return cnt;
999 }
1000 
1001 static int i915_error_state_open(struct inode *inode, struct file *file)
1002 {
1003 	struct i915_gpu_state *error;
1004 
1005 	error = i915_first_error_state(inode->i_private);
1006 	if (IS_ERR(error))
1007 		return PTR_ERR(error);
1008 
1009 	file->private_data  = error;
1010 	return 0;
1011 }
1012 
1013 static const struct file_operations i915_error_state_fops = {
1014 	.owner = THIS_MODULE,
1015 	.open = i915_error_state_open,
1016 	.read = gpu_state_read,
1017 	.write = i915_error_state_write,
1018 	.llseek = default_llseek,
1019 	.release = gpu_state_release,
1020 };
1021 #endif
1022 
1023 static int i915_frequency_info(struct seq_file *m, void *unused)
1024 {
1025 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1026 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1027 	intel_wakeref_t wakeref;
1028 	int ret = 0;
1029 
1030 	wakeref = intel_runtime_pm_get(dev_priv);
1031 
1032 	if (IS_GEN(dev_priv, 5)) {
1033 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1034 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1035 
1036 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1037 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1038 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1039 			   MEMSTAT_VID_SHIFT);
1040 		seq_printf(m, "Current P-state: %d\n",
1041 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1042 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1043 		u32 rpmodectl, freq_sts;
1044 
1045 		mutex_lock(&dev_priv->pcu_lock);
1046 
1047 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1048 		seq_printf(m, "Video Turbo Mode: %s\n",
1049 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1050 		seq_printf(m, "HW control enabled: %s\n",
1051 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1052 		seq_printf(m, "SW control enabled: %s\n",
1053 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1054 				  GEN6_RP_MEDIA_SW_MODE));
1055 
1056 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1057 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1058 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1059 
1060 		seq_printf(m, "actual GPU freq: %d MHz\n",
1061 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1062 
1063 		seq_printf(m, "current GPU freq: %d MHz\n",
1064 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1065 
1066 		seq_printf(m, "max GPU freq: %d MHz\n",
1067 			   intel_gpu_freq(dev_priv, rps->max_freq));
1068 
1069 		seq_printf(m, "min GPU freq: %d MHz\n",
1070 			   intel_gpu_freq(dev_priv, rps->min_freq));
1071 
1072 		seq_printf(m, "idle GPU freq: %d MHz\n",
1073 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1074 
1075 		seq_printf(m,
1076 			   "efficient (RPe) frequency: %d MHz\n",
1077 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1078 		mutex_unlock(&dev_priv->pcu_lock);
1079 	} else if (INTEL_GEN(dev_priv) >= 6) {
1080 		u32 rp_state_limits;
1081 		u32 gt_perf_status;
1082 		u32 rp_state_cap;
1083 		u32 rpmodectl, rpinclimit, rpdeclimit;
1084 		u32 rpstat, cagf, reqf;
1085 		u32 rpupei, rpcurup, rpprevup;
1086 		u32 rpdownei, rpcurdown, rpprevdown;
1087 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1088 		int max_freq;
1089 
1090 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1091 		if (IS_GEN9_LP(dev_priv)) {
1092 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1093 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1094 		} else {
1095 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1096 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1097 		}
1098 
1099 		/* RPSTAT1 is in the GT power well */
1100 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1101 
1102 		reqf = I915_READ(GEN6_RPNSWREQ);
1103 		if (INTEL_GEN(dev_priv) >= 9)
1104 			reqf >>= 23;
1105 		else {
1106 			reqf &= ~GEN6_TURBO_DISABLE;
1107 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1108 				reqf >>= 24;
1109 			else
1110 				reqf >>= 25;
1111 		}
1112 		reqf = intel_gpu_freq(dev_priv, reqf);
1113 
1114 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1115 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1116 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1117 
1118 		rpstat = I915_READ(GEN6_RPSTAT1);
1119 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1120 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1121 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1122 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1123 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1124 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1125 		cagf = intel_gpu_freq(dev_priv,
1126 				      intel_get_cagf(dev_priv, rpstat));
1127 
1128 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1129 
1130 		if (INTEL_GEN(dev_priv) >= 11) {
1131 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1132 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1133 			/*
1134 			 * The equivalent to the PM ISR & IIR cannot be read
1135 			 * without affecting the current state of the system
1136 			 */
1137 			pm_isr = 0;
1138 			pm_iir = 0;
1139 		} else if (INTEL_GEN(dev_priv) >= 8) {
1140 			pm_ier = I915_READ(GEN8_GT_IER(2));
1141 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1142 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1143 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1144 		} else {
1145 			pm_ier = I915_READ(GEN6_PMIER);
1146 			pm_imr = I915_READ(GEN6_PMIMR);
1147 			pm_isr = I915_READ(GEN6_PMISR);
1148 			pm_iir = I915_READ(GEN6_PMIIR);
1149 		}
1150 		pm_mask = I915_READ(GEN6_PMINTRMSK);
1151 
1152 		seq_printf(m, "Video Turbo Mode: %s\n",
1153 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1154 		seq_printf(m, "HW control enabled: %s\n",
1155 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1156 		seq_printf(m, "SW control enabled: %s\n",
1157 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1158 				  GEN6_RP_MEDIA_SW_MODE));
1159 
1160 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1161 			   pm_ier, pm_imr, pm_mask);
1162 		if (INTEL_GEN(dev_priv) <= 10)
1163 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1164 				   pm_isr, pm_iir);
1165 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1166 			   rps->pm_intrmsk_mbz);
1167 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1168 		seq_printf(m, "Render p-state ratio: %d\n",
1169 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1170 		seq_printf(m, "Render p-state VID: %d\n",
1171 			   gt_perf_status & 0xff);
1172 		seq_printf(m, "Render p-state limit: %d\n",
1173 			   rp_state_limits & 0xff);
1174 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1175 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1176 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1177 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1178 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1179 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1180 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1181 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1182 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1183 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1184 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1185 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1186 		seq_printf(m, "Up threshold: %d%%\n",
1187 			   rps->power.up_threshold);
1188 
1189 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1190 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1191 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1192 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1193 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1194 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1195 		seq_printf(m, "Down threshold: %d%%\n",
1196 			   rps->power.down_threshold);
1197 
1198 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1199 			    rp_state_cap >> 16) & 0xff;
1200 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1201 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1202 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1203 			   intel_gpu_freq(dev_priv, max_freq));
1204 
1205 		max_freq = (rp_state_cap & 0xff00) >> 8;
1206 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1207 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1208 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1209 			   intel_gpu_freq(dev_priv, max_freq));
1210 
1211 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1212 			    rp_state_cap >> 0) & 0xff;
1213 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1214 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1215 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1216 			   intel_gpu_freq(dev_priv, max_freq));
1217 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1218 			   intel_gpu_freq(dev_priv, rps->max_freq));
1219 
1220 		seq_printf(m, "Current freq: %d MHz\n",
1221 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1222 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1223 		seq_printf(m, "Idle freq: %d MHz\n",
1224 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1225 		seq_printf(m, "Min freq: %d MHz\n",
1226 			   intel_gpu_freq(dev_priv, rps->min_freq));
1227 		seq_printf(m, "Boost freq: %d MHz\n",
1228 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1229 		seq_printf(m, "Max freq: %d MHz\n",
1230 			   intel_gpu_freq(dev_priv, rps->max_freq));
1231 		seq_printf(m,
1232 			   "efficient (RPe) frequency: %d MHz\n",
1233 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1234 	} else {
1235 		seq_puts(m, "no P-state info available\n");
1236 	}
1237 
1238 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1239 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1240 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1241 
1242 	intel_runtime_pm_put(dev_priv, wakeref);
1243 	return ret;
1244 }
1245 
1246 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1247 			       struct seq_file *m,
1248 			       struct intel_instdone *instdone)
1249 {
1250 	int slice;
1251 	int subslice;
1252 
1253 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1254 		   instdone->instdone);
1255 
1256 	if (INTEL_GEN(dev_priv) <= 3)
1257 		return;
1258 
1259 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1260 		   instdone->slice_common);
1261 
1262 	if (INTEL_GEN(dev_priv) <= 6)
1263 		return;
1264 
1265 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1266 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1267 			   slice, subslice, instdone->sampler[slice][subslice]);
1268 
1269 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1270 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1271 			   slice, subslice, instdone->row[slice][subslice]);
1272 }
1273 
1274 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1275 {
1276 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1277 	struct intel_engine_cs *engine;
1278 	u64 acthd[I915_NUM_ENGINES];
1279 	u32 seqno[I915_NUM_ENGINES];
1280 	struct intel_instdone instdone;
1281 	intel_wakeref_t wakeref;
1282 	enum intel_engine_id id;
1283 
1284 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1285 		seq_puts(m, "Wedged\n");
1286 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1287 		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1288 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1289 		seq_puts(m, "Waiter holding struct mutex\n");
1290 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1291 		seq_puts(m, "struct_mutex blocked for reset\n");
1292 
1293 	if (!i915_modparams.enable_hangcheck) {
1294 		seq_puts(m, "Hangcheck disabled\n");
1295 		return 0;
1296 	}
1297 
1298 	with_intel_runtime_pm(dev_priv, wakeref) {
1299 		for_each_engine(engine, dev_priv, id) {
1300 			acthd[id] = intel_engine_get_active_head(engine);
1301 			seqno[id] = intel_engine_get_seqno(engine);
1302 		}
1303 
1304 		intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1305 	}
1306 
1307 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1308 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1309 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1310 					    jiffies));
1311 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1312 		seq_puts(m, "Hangcheck active, work pending\n");
1313 	else
1314 		seq_puts(m, "Hangcheck inactive\n");
1315 
1316 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1317 
1318 	for_each_engine(engine, dev_priv, id) {
1319 		seq_printf(m, "%s:\n", engine->name);
1320 		seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1321 			   engine->hangcheck.seqno, seqno[id],
1322 			   intel_engine_last_submit(engine),
1323 			   jiffies_to_msecs(jiffies -
1324 					    engine->hangcheck.action_timestamp));
1325 
1326 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1327 			   (long long)engine->hangcheck.acthd,
1328 			   (long long)acthd[id]);
1329 
1330 		if (engine->id == RCS) {
1331 			seq_puts(m, "\tinstdone read =\n");
1332 
1333 			i915_instdone_info(dev_priv, m, &instdone);
1334 
1335 			seq_puts(m, "\tinstdone accu =\n");
1336 
1337 			i915_instdone_info(dev_priv, m,
1338 					   &engine->hangcheck.instdone);
1339 		}
1340 	}
1341 
1342 	return 0;
1343 }
1344 
1345 static int i915_reset_info(struct seq_file *m, void *unused)
1346 {
1347 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1348 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1349 	struct intel_engine_cs *engine;
1350 	enum intel_engine_id id;
1351 
1352 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1353 
1354 	for_each_engine(engine, dev_priv, id) {
1355 		seq_printf(m, "%s = %u\n", engine->name,
1356 			   i915_reset_engine_count(error, engine));
1357 	}
1358 
1359 	return 0;
1360 }
1361 
1362 static int ironlake_drpc_info(struct seq_file *m)
1363 {
1364 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1365 	u32 rgvmodectl, rstdbyctl;
1366 	u16 crstandvid;
1367 
1368 	rgvmodectl = I915_READ(MEMMODECTL);
1369 	rstdbyctl = I915_READ(RSTDBYCTL);
1370 	crstandvid = I915_READ16(CRSTANDVID);
1371 
1372 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1373 	seq_printf(m, "Boost freq: %d\n",
1374 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1375 		   MEMMODE_BOOST_FREQ_SHIFT);
1376 	seq_printf(m, "HW control enabled: %s\n",
1377 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1378 	seq_printf(m, "SW control enabled: %s\n",
1379 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1380 	seq_printf(m, "Gated voltage change: %s\n",
1381 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1382 	seq_printf(m, "Starting frequency: P%d\n",
1383 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1384 	seq_printf(m, "Max P-state: P%d\n",
1385 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1386 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1387 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1388 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1389 	seq_printf(m, "Render standby enabled: %s\n",
1390 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1391 	seq_puts(m, "Current RS state: ");
1392 	switch (rstdbyctl & RSX_STATUS_MASK) {
1393 	case RSX_STATUS_ON:
1394 		seq_puts(m, "on\n");
1395 		break;
1396 	case RSX_STATUS_RC1:
1397 		seq_puts(m, "RC1\n");
1398 		break;
1399 	case RSX_STATUS_RC1E:
1400 		seq_puts(m, "RC1E\n");
1401 		break;
1402 	case RSX_STATUS_RS1:
1403 		seq_puts(m, "RS1\n");
1404 		break;
1405 	case RSX_STATUS_RS2:
1406 		seq_puts(m, "RS2 (RC6)\n");
1407 		break;
1408 	case RSX_STATUS_RS3:
1409 		seq_puts(m, "RC3 (RC6+)\n");
1410 		break;
1411 	default:
1412 		seq_puts(m, "unknown\n");
1413 		break;
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 static int i915_forcewake_domains(struct seq_file *m, void *data)
1420 {
1421 	struct drm_i915_private *i915 = node_to_i915(m->private);
1422 	struct intel_uncore_forcewake_domain *fw_domain;
1423 	unsigned int tmp;
1424 
1425 	seq_printf(m, "user.bypass_count = %u\n",
1426 		   i915->uncore.user_forcewake.count);
1427 
1428 	for_each_fw_domain(fw_domain, i915, tmp)
1429 		seq_printf(m, "%s.wake_count = %u\n",
1430 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1431 			   READ_ONCE(fw_domain->wake_count));
1432 
1433 	return 0;
1434 }
1435 
1436 static void print_rc6_res(struct seq_file *m,
1437 			  const char *title,
1438 			  const i915_reg_t reg)
1439 {
1440 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1441 
1442 	seq_printf(m, "%s %u (%llu us)\n",
1443 		   title, I915_READ(reg),
1444 		   intel_rc6_residency_us(dev_priv, reg));
1445 }
1446 
1447 static int vlv_drpc_info(struct seq_file *m)
1448 {
1449 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1450 	u32 rcctl1, pw_status;
1451 
1452 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1453 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1454 
1455 	seq_printf(m, "RC6 Enabled: %s\n",
1456 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1457 					GEN6_RC_CTL_EI_MODE(1))));
1458 	seq_printf(m, "Render Power Well: %s\n",
1459 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1460 	seq_printf(m, "Media Power Well: %s\n",
1461 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1462 
1463 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1464 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1465 
1466 	return i915_forcewake_domains(m, NULL);
1467 }
1468 
1469 static int gen6_drpc_info(struct seq_file *m)
1470 {
1471 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1472 	u32 gt_core_status, rcctl1, rc6vids = 0;
1473 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1474 
1475 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1476 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1477 
1478 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1479 	if (INTEL_GEN(dev_priv) >= 9) {
1480 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1481 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1482 	}
1483 
1484 	if (INTEL_GEN(dev_priv) <= 7) {
1485 		mutex_lock(&dev_priv->pcu_lock);
1486 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1487 				       &rc6vids);
1488 		mutex_unlock(&dev_priv->pcu_lock);
1489 	}
1490 
1491 	seq_printf(m, "RC1e Enabled: %s\n",
1492 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1493 	seq_printf(m, "RC6 Enabled: %s\n",
1494 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1495 	if (INTEL_GEN(dev_priv) >= 9) {
1496 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1497 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1498 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1499 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1500 	}
1501 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1502 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1503 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1504 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1505 	seq_puts(m, "Current RC state: ");
1506 	switch (gt_core_status & GEN6_RCn_MASK) {
1507 	case GEN6_RC0:
1508 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1509 			seq_puts(m, "Core Power Down\n");
1510 		else
1511 			seq_puts(m, "on\n");
1512 		break;
1513 	case GEN6_RC3:
1514 		seq_puts(m, "RC3\n");
1515 		break;
1516 	case GEN6_RC6:
1517 		seq_puts(m, "RC6\n");
1518 		break;
1519 	case GEN6_RC7:
1520 		seq_puts(m, "RC7\n");
1521 		break;
1522 	default:
1523 		seq_puts(m, "Unknown\n");
1524 		break;
1525 	}
1526 
1527 	seq_printf(m, "Core Power Down: %s\n",
1528 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1529 	if (INTEL_GEN(dev_priv) >= 9) {
1530 		seq_printf(m, "Render Power Well: %s\n",
1531 			(gen9_powergate_status &
1532 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1533 		seq_printf(m, "Media Power Well: %s\n",
1534 			(gen9_powergate_status &
1535 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1536 	}
1537 
1538 	/* Not exactly sure what this is */
1539 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1540 		      GEN6_GT_GFX_RC6_LOCKED);
1541 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1542 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1543 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1544 
1545 	if (INTEL_GEN(dev_priv) <= 7) {
1546 		seq_printf(m, "RC6   voltage: %dmV\n",
1547 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1548 		seq_printf(m, "RC6+  voltage: %dmV\n",
1549 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1550 		seq_printf(m, "RC6++ voltage: %dmV\n",
1551 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1552 	}
1553 
1554 	return i915_forcewake_domains(m, NULL);
1555 }
1556 
1557 static int i915_drpc_info(struct seq_file *m, void *unused)
1558 {
1559 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1560 	intel_wakeref_t wakeref;
1561 	int err = -ENODEV;
1562 
1563 	with_intel_runtime_pm(dev_priv, wakeref) {
1564 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1565 			err = vlv_drpc_info(m);
1566 		else if (INTEL_GEN(dev_priv) >= 6)
1567 			err = gen6_drpc_info(m);
1568 		else
1569 			err = ironlake_drpc_info(m);
1570 	}
1571 
1572 	return err;
1573 }
1574 
1575 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1576 {
1577 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1578 
1579 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1580 		   dev_priv->fb_tracking.busy_bits);
1581 
1582 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1583 		   dev_priv->fb_tracking.flip_bits);
1584 
1585 	return 0;
1586 }
1587 
1588 static int i915_fbc_status(struct seq_file *m, void *unused)
1589 {
1590 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1591 	struct intel_fbc *fbc = &dev_priv->fbc;
1592 	intel_wakeref_t wakeref;
1593 
1594 	if (!HAS_FBC(dev_priv))
1595 		return -ENODEV;
1596 
1597 	wakeref = intel_runtime_pm_get(dev_priv);
1598 	mutex_lock(&fbc->lock);
1599 
1600 	if (intel_fbc_is_active(dev_priv))
1601 		seq_puts(m, "FBC enabled\n");
1602 	else
1603 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1604 
1605 	if (intel_fbc_is_active(dev_priv)) {
1606 		u32 mask;
1607 
1608 		if (INTEL_GEN(dev_priv) >= 8)
1609 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1610 		else if (INTEL_GEN(dev_priv) >= 7)
1611 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1612 		else if (INTEL_GEN(dev_priv) >= 5)
1613 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1614 		else if (IS_G4X(dev_priv))
1615 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1616 		else
1617 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1618 							FBC_STAT_COMPRESSED);
1619 
1620 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1621 	}
1622 
1623 	mutex_unlock(&fbc->lock);
1624 	intel_runtime_pm_put(dev_priv, wakeref);
1625 
1626 	return 0;
1627 }
1628 
1629 static int i915_fbc_false_color_get(void *data, u64 *val)
1630 {
1631 	struct drm_i915_private *dev_priv = data;
1632 
1633 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1634 		return -ENODEV;
1635 
1636 	*val = dev_priv->fbc.false_color;
1637 
1638 	return 0;
1639 }
1640 
1641 static int i915_fbc_false_color_set(void *data, u64 val)
1642 {
1643 	struct drm_i915_private *dev_priv = data;
1644 	u32 reg;
1645 
1646 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1647 		return -ENODEV;
1648 
1649 	mutex_lock(&dev_priv->fbc.lock);
1650 
1651 	reg = I915_READ(ILK_DPFC_CONTROL);
1652 	dev_priv->fbc.false_color = val;
1653 
1654 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1655 		   (reg | FBC_CTL_FALSE_COLOR) :
1656 		   (reg & ~FBC_CTL_FALSE_COLOR));
1657 
1658 	mutex_unlock(&dev_priv->fbc.lock);
1659 	return 0;
1660 }
1661 
1662 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1663 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1664 			"%llu\n");
1665 
1666 static int i915_ips_status(struct seq_file *m, void *unused)
1667 {
1668 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1669 	intel_wakeref_t wakeref;
1670 
1671 	if (!HAS_IPS(dev_priv))
1672 		return -ENODEV;
1673 
1674 	wakeref = intel_runtime_pm_get(dev_priv);
1675 
1676 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1677 		   yesno(i915_modparams.enable_ips));
1678 
1679 	if (INTEL_GEN(dev_priv) >= 8) {
1680 		seq_puts(m, "Currently: unknown\n");
1681 	} else {
1682 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1683 			seq_puts(m, "Currently: enabled\n");
1684 		else
1685 			seq_puts(m, "Currently: disabled\n");
1686 	}
1687 
1688 	intel_runtime_pm_put(dev_priv, wakeref);
1689 
1690 	return 0;
1691 }
1692 
1693 static int i915_sr_status(struct seq_file *m, void *unused)
1694 {
1695 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1696 	intel_wakeref_t wakeref;
1697 	bool sr_enabled = false;
1698 
1699 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1700 
1701 	if (INTEL_GEN(dev_priv) >= 9)
1702 		/* no global SR status; inspect per-plane WM */;
1703 	else if (HAS_PCH_SPLIT(dev_priv))
1704 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1705 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1706 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1707 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1708 	else if (IS_I915GM(dev_priv))
1709 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1710 	else if (IS_PINEVIEW(dev_priv))
1711 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1712 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1713 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1714 
1715 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1716 
1717 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1718 
1719 	return 0;
1720 }
1721 
1722 static int i915_emon_status(struct seq_file *m, void *unused)
1723 {
1724 	struct drm_i915_private *i915 = node_to_i915(m->private);
1725 	intel_wakeref_t wakeref;
1726 
1727 	if (!IS_GEN(i915, 5))
1728 		return -ENODEV;
1729 
1730 	with_intel_runtime_pm(i915, wakeref) {
1731 		unsigned long temp, chipset, gfx;
1732 
1733 		temp = i915_mch_val(i915);
1734 		chipset = i915_chipset_val(i915);
1735 		gfx = i915_gfx_val(i915);
1736 
1737 		seq_printf(m, "GMCH temp: %ld\n", temp);
1738 		seq_printf(m, "Chipset power: %ld\n", chipset);
1739 		seq_printf(m, "GFX power: %ld\n", gfx);
1740 		seq_printf(m, "Total power: %ld\n", chipset + gfx);
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1747 {
1748 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1749 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1750 	unsigned int max_gpu_freq, min_gpu_freq;
1751 	intel_wakeref_t wakeref;
1752 	int gpu_freq, ia_freq;
1753 	int ret;
1754 
1755 	if (!HAS_LLC(dev_priv))
1756 		return -ENODEV;
1757 
1758 	wakeref = intel_runtime_pm_get(dev_priv);
1759 
1760 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1761 	if (ret)
1762 		goto out;
1763 
1764 	min_gpu_freq = rps->min_freq;
1765 	max_gpu_freq = rps->max_freq;
1766 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1767 		/* Convert GT frequency to 50 HZ units */
1768 		min_gpu_freq /= GEN9_FREQ_SCALER;
1769 		max_gpu_freq /= GEN9_FREQ_SCALER;
1770 	}
1771 
1772 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1773 
1774 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1775 		ia_freq = gpu_freq;
1776 		sandybridge_pcode_read(dev_priv,
1777 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1778 				       &ia_freq);
1779 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1780 			   intel_gpu_freq(dev_priv, (gpu_freq *
1781 						     (IS_GEN9_BC(dev_priv) ||
1782 						      INTEL_GEN(dev_priv) >= 10 ?
1783 						      GEN9_FREQ_SCALER : 1))),
1784 			   ((ia_freq >> 0) & 0xff) * 100,
1785 			   ((ia_freq >> 8) & 0xff) * 100);
1786 	}
1787 
1788 	mutex_unlock(&dev_priv->pcu_lock);
1789 
1790 out:
1791 	intel_runtime_pm_put(dev_priv, wakeref);
1792 	return ret;
1793 }
1794 
1795 static int i915_opregion(struct seq_file *m, void *unused)
1796 {
1797 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1798 	struct drm_device *dev = &dev_priv->drm;
1799 	struct intel_opregion *opregion = &dev_priv->opregion;
1800 	int ret;
1801 
1802 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1803 	if (ret)
1804 		goto out;
1805 
1806 	if (opregion->header)
1807 		seq_write(m, opregion->header, OPREGION_SIZE);
1808 
1809 	mutex_unlock(&dev->struct_mutex);
1810 
1811 out:
1812 	return 0;
1813 }
1814 
1815 static int i915_vbt(struct seq_file *m, void *unused)
1816 {
1817 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1818 
1819 	if (opregion->vbt)
1820 		seq_write(m, opregion->vbt, opregion->vbt_size);
1821 
1822 	return 0;
1823 }
1824 
1825 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1826 {
1827 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1828 	struct drm_device *dev = &dev_priv->drm;
1829 	struct intel_framebuffer *fbdev_fb = NULL;
1830 	struct drm_framebuffer *drm_fb;
1831 	int ret;
1832 
1833 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1834 	if (ret)
1835 		return ret;
1836 
1837 #ifdef CONFIG_DRM_FBDEV_EMULATION
1838 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1839 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1840 
1841 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1842 			   fbdev_fb->base.width,
1843 			   fbdev_fb->base.height,
1844 			   fbdev_fb->base.format->depth,
1845 			   fbdev_fb->base.format->cpp[0] * 8,
1846 			   fbdev_fb->base.modifier,
1847 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1848 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1849 		seq_putc(m, '\n');
1850 	}
1851 #endif
1852 
1853 	mutex_lock(&dev->mode_config.fb_lock);
1854 	drm_for_each_fb(drm_fb, dev) {
1855 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1856 		if (fb == fbdev_fb)
1857 			continue;
1858 
1859 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1860 			   fb->base.width,
1861 			   fb->base.height,
1862 			   fb->base.format->depth,
1863 			   fb->base.format->cpp[0] * 8,
1864 			   fb->base.modifier,
1865 			   drm_framebuffer_read_refcount(&fb->base));
1866 		describe_obj(m, intel_fb_obj(&fb->base));
1867 		seq_putc(m, '\n');
1868 	}
1869 	mutex_unlock(&dev->mode_config.fb_lock);
1870 	mutex_unlock(&dev->struct_mutex);
1871 
1872 	return 0;
1873 }
1874 
1875 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1876 {
1877 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1878 		   ring->space, ring->head, ring->tail, ring->emit);
1879 }
1880 
1881 static int i915_context_status(struct seq_file *m, void *unused)
1882 {
1883 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1884 	struct drm_device *dev = &dev_priv->drm;
1885 	struct intel_engine_cs *engine;
1886 	struct i915_gem_context *ctx;
1887 	enum intel_engine_id id;
1888 	int ret;
1889 
1890 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1891 	if (ret)
1892 		return ret;
1893 
1894 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1895 		seq_puts(m, "HW context ");
1896 		if (!list_empty(&ctx->hw_id_link))
1897 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1898 				   atomic_read(&ctx->hw_id_pin_count));
1899 		if (ctx->pid) {
1900 			struct task_struct *task;
1901 
1902 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1903 			if (task) {
1904 				seq_printf(m, "(%s [%d]) ",
1905 					   task->comm, task->pid);
1906 				put_task_struct(task);
1907 			}
1908 		} else if (IS_ERR(ctx->file_priv)) {
1909 			seq_puts(m, "(deleted) ");
1910 		} else {
1911 			seq_puts(m, "(kernel) ");
1912 		}
1913 
1914 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1915 		seq_putc(m, '\n');
1916 
1917 		for_each_engine(engine, dev_priv, id) {
1918 			struct intel_context *ce =
1919 				to_intel_context(ctx, engine);
1920 
1921 			seq_printf(m, "%s: ", engine->name);
1922 			if (ce->state)
1923 				describe_obj(m, ce->state->obj);
1924 			if (ce->ring)
1925 				describe_ctx_ring(m, ce->ring);
1926 			seq_putc(m, '\n');
1927 		}
1928 
1929 		seq_putc(m, '\n');
1930 	}
1931 
1932 	mutex_unlock(&dev->struct_mutex);
1933 
1934 	return 0;
1935 }
1936 
1937 static const char *swizzle_string(unsigned swizzle)
1938 {
1939 	switch (swizzle) {
1940 	case I915_BIT_6_SWIZZLE_NONE:
1941 		return "none";
1942 	case I915_BIT_6_SWIZZLE_9:
1943 		return "bit9";
1944 	case I915_BIT_6_SWIZZLE_9_10:
1945 		return "bit9/bit10";
1946 	case I915_BIT_6_SWIZZLE_9_11:
1947 		return "bit9/bit11";
1948 	case I915_BIT_6_SWIZZLE_9_10_11:
1949 		return "bit9/bit10/bit11";
1950 	case I915_BIT_6_SWIZZLE_9_17:
1951 		return "bit9/bit17";
1952 	case I915_BIT_6_SWIZZLE_9_10_17:
1953 		return "bit9/bit10/bit17";
1954 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1955 		return "unknown";
1956 	}
1957 
1958 	return "bug";
1959 }
1960 
1961 static int i915_swizzle_info(struct seq_file *m, void *data)
1962 {
1963 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1964 	intel_wakeref_t wakeref;
1965 
1966 	wakeref = intel_runtime_pm_get(dev_priv);
1967 
1968 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1969 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1970 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1971 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1972 
1973 	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1974 		seq_printf(m, "DDC = 0x%08x\n",
1975 			   I915_READ(DCC));
1976 		seq_printf(m, "DDC2 = 0x%08x\n",
1977 			   I915_READ(DCC2));
1978 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1979 			   I915_READ16(C0DRB3));
1980 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1981 			   I915_READ16(C1DRB3));
1982 	} else if (INTEL_GEN(dev_priv) >= 6) {
1983 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1984 			   I915_READ(MAD_DIMM_C0));
1985 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1986 			   I915_READ(MAD_DIMM_C1));
1987 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1988 			   I915_READ(MAD_DIMM_C2));
1989 		seq_printf(m, "TILECTL = 0x%08x\n",
1990 			   I915_READ(TILECTL));
1991 		if (INTEL_GEN(dev_priv) >= 8)
1992 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1993 				   I915_READ(GAMTARBMODE));
1994 		else
1995 			seq_printf(m, "ARB_MODE = 0x%08x\n",
1996 				   I915_READ(ARB_MODE));
1997 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1998 			   I915_READ(DISP_ARB_CTL));
1999 	}
2000 
2001 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2002 		seq_puts(m, "L-shaped memory detected\n");
2003 
2004 	intel_runtime_pm_put(dev_priv, wakeref);
2005 
2006 	return 0;
2007 }
2008 
2009 static const char *rps_power_to_str(unsigned int power)
2010 {
2011 	static const char * const strings[] = {
2012 		[LOW_POWER] = "low power",
2013 		[BETWEEN] = "mixed",
2014 		[HIGH_POWER] = "high power",
2015 	};
2016 
2017 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2018 		return "unknown";
2019 
2020 	return strings[power];
2021 }
2022 
2023 static int i915_rps_boost_info(struct seq_file *m, void *data)
2024 {
2025 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2026 	struct drm_device *dev = &dev_priv->drm;
2027 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2028 	u32 act_freq = rps->cur_freq;
2029 	intel_wakeref_t wakeref;
2030 	struct drm_file *file;
2031 
2032 	with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2033 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2034 			mutex_lock(&dev_priv->pcu_lock);
2035 			act_freq = vlv_punit_read(dev_priv,
2036 						  PUNIT_REG_GPU_FREQ_STS);
2037 			act_freq = (act_freq >> 8) & 0xff;
2038 			mutex_unlock(&dev_priv->pcu_lock);
2039 		} else {
2040 			act_freq = intel_get_cagf(dev_priv,
2041 						  I915_READ(GEN6_RPSTAT1));
2042 		}
2043 	}
2044 
2045 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2046 	seq_printf(m, "GPU busy? %s [%d requests]\n",
2047 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2048 	seq_printf(m, "Boosts outstanding? %d\n",
2049 		   atomic_read(&rps->num_waiters));
2050 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2051 	seq_printf(m, "Frequency requested %d, actual %d\n",
2052 		   intel_gpu_freq(dev_priv, rps->cur_freq),
2053 		   intel_gpu_freq(dev_priv, act_freq));
2054 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2055 		   intel_gpu_freq(dev_priv, rps->min_freq),
2056 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2057 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2058 		   intel_gpu_freq(dev_priv, rps->max_freq));
2059 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2060 		   intel_gpu_freq(dev_priv, rps->idle_freq),
2061 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2062 		   intel_gpu_freq(dev_priv, rps->boost_freq));
2063 
2064 	mutex_lock(&dev->filelist_mutex);
2065 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2066 		struct drm_i915_file_private *file_priv = file->driver_priv;
2067 		struct task_struct *task;
2068 
2069 		rcu_read_lock();
2070 		task = pid_task(file->pid, PIDTYPE_PID);
2071 		seq_printf(m, "%s [%d]: %d boosts\n",
2072 			   task ? task->comm : "<unknown>",
2073 			   task ? task->pid : -1,
2074 			   atomic_read(&file_priv->rps_client.boosts));
2075 		rcu_read_unlock();
2076 	}
2077 	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2078 		   atomic_read(&rps->boosts));
2079 	mutex_unlock(&dev->filelist_mutex);
2080 
2081 	if (INTEL_GEN(dev_priv) >= 6 &&
2082 	    rps->enabled &&
2083 	    dev_priv->gt.active_requests) {
2084 		u32 rpup, rpupei;
2085 		u32 rpdown, rpdownei;
2086 
2087 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2088 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2089 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2090 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2091 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2092 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2093 
2094 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2095 			   rps_power_to_str(rps->power.mode));
2096 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2097 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2098 			   rps->power.up_threshold);
2099 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2100 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2101 			   rps->power.down_threshold);
2102 	} else {
2103 		seq_puts(m, "\nRPS Autotuning inactive\n");
2104 	}
2105 
2106 	return 0;
2107 }
2108 
2109 static int i915_llc(struct seq_file *m, void *data)
2110 {
2111 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2112 	const bool edram = INTEL_GEN(dev_priv) > 8;
2113 
2114 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2115 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2116 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2117 
2118 	return 0;
2119 }
2120 
2121 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2122 {
2123 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2124 	intel_wakeref_t wakeref;
2125 	struct drm_printer p;
2126 
2127 	if (!HAS_HUC(dev_priv))
2128 		return -ENODEV;
2129 
2130 	p = drm_seq_file_printer(m);
2131 	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2132 
2133 	with_intel_runtime_pm(dev_priv, wakeref)
2134 		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2135 
2136 	return 0;
2137 }
2138 
2139 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2140 {
2141 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2142 	intel_wakeref_t wakeref;
2143 	struct drm_printer p;
2144 
2145 	if (!HAS_GUC(dev_priv))
2146 		return -ENODEV;
2147 
2148 	p = drm_seq_file_printer(m);
2149 	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2150 
2151 	with_intel_runtime_pm(dev_priv, wakeref) {
2152 		u32 tmp = I915_READ(GUC_STATUS);
2153 		u32 i;
2154 
2155 		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2156 		seq_printf(m, "\tBootrom status = 0x%x\n",
2157 			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2158 		seq_printf(m, "\tuKernel status = 0x%x\n",
2159 			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2160 		seq_printf(m, "\tMIA Core status = 0x%x\n",
2161 			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2162 		seq_puts(m, "\nScratch registers:\n");
2163 		for (i = 0; i < 16; i++) {
2164 			seq_printf(m, "\t%2d: \t0x%x\n",
2165 				   i, I915_READ(SOFT_SCRATCH(i)));
2166 		}
2167 	}
2168 
2169 	return 0;
2170 }
2171 
2172 static const char *
2173 stringify_guc_log_type(enum guc_log_buffer_type type)
2174 {
2175 	switch (type) {
2176 	case GUC_ISR_LOG_BUFFER:
2177 		return "ISR";
2178 	case GUC_DPC_LOG_BUFFER:
2179 		return "DPC";
2180 	case GUC_CRASH_DUMP_LOG_BUFFER:
2181 		return "CRASH";
2182 	default:
2183 		MISSING_CASE(type);
2184 	}
2185 
2186 	return "";
2187 }
2188 
2189 static void i915_guc_log_info(struct seq_file *m,
2190 			      struct drm_i915_private *dev_priv)
2191 {
2192 	struct intel_guc_log *log = &dev_priv->guc.log;
2193 	enum guc_log_buffer_type type;
2194 
2195 	if (!intel_guc_log_relay_enabled(log)) {
2196 		seq_puts(m, "GuC log relay disabled\n");
2197 		return;
2198 	}
2199 
2200 	seq_puts(m, "GuC logging stats:\n");
2201 
2202 	seq_printf(m, "\tRelay full count: %u\n",
2203 		   log->relay.full_count);
2204 
2205 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2206 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2207 			   stringify_guc_log_type(type),
2208 			   log->stats[type].flush,
2209 			   log->stats[type].sampled_overflow);
2210 	}
2211 }
2212 
2213 static void i915_guc_client_info(struct seq_file *m,
2214 				 struct drm_i915_private *dev_priv,
2215 				 struct intel_guc_client *client)
2216 {
2217 	struct intel_engine_cs *engine;
2218 	enum intel_engine_id id;
2219 	u64 tot = 0;
2220 
2221 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2222 		client->priority, client->stage_id, client->proc_desc_offset);
2223 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2224 		client->doorbell_id, client->doorbell_offset);
2225 
2226 	for_each_engine(engine, dev_priv, id) {
2227 		u64 submissions = client->submissions[id];
2228 		tot += submissions;
2229 		seq_printf(m, "\tSubmissions: %llu %s\n",
2230 				submissions, engine->name);
2231 	}
2232 	seq_printf(m, "\tTotal: %llu\n", tot);
2233 }
2234 
2235 static int i915_guc_info(struct seq_file *m, void *data)
2236 {
2237 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2238 	const struct intel_guc *guc = &dev_priv->guc;
2239 
2240 	if (!USES_GUC(dev_priv))
2241 		return -ENODEV;
2242 
2243 	i915_guc_log_info(m, dev_priv);
2244 
2245 	if (!USES_GUC_SUBMISSION(dev_priv))
2246 		return 0;
2247 
2248 	GEM_BUG_ON(!guc->execbuf_client);
2249 
2250 	seq_printf(m, "\nDoorbell map:\n");
2251 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2252 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2253 
2254 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2255 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2256 	if (guc->preempt_client) {
2257 		seq_printf(m, "\nGuC preempt client @ %p:\n",
2258 			   guc->preempt_client);
2259 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
2260 	}
2261 
2262 	/* Add more as required ... */
2263 
2264 	return 0;
2265 }
2266 
2267 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2268 {
2269 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2270 	const struct intel_guc *guc = &dev_priv->guc;
2271 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2272 	struct intel_guc_client *client = guc->execbuf_client;
2273 	unsigned int tmp;
2274 	int index;
2275 
2276 	if (!USES_GUC_SUBMISSION(dev_priv))
2277 		return -ENODEV;
2278 
2279 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2280 		struct intel_engine_cs *engine;
2281 
2282 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2283 			continue;
2284 
2285 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2286 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2287 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2288 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2289 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2290 		seq_printf(m, "\tEngines used: 0x%x\n",
2291 			   desc->engines_used);
2292 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2293 			   desc->db_trigger_phy,
2294 			   desc->db_trigger_cpu,
2295 			   desc->db_trigger_uk);
2296 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2297 			   desc->process_desc);
2298 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2299 			   desc->wq_addr, desc->wq_size);
2300 		seq_putc(m, '\n');
2301 
2302 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2303 			u32 guc_engine_id = engine->guc_id;
2304 			struct guc_execlist_context *lrc =
2305 						&desc->lrc[guc_engine_id];
2306 
2307 			seq_printf(m, "\t%s LRC:\n", engine->name);
2308 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2309 				   lrc->context_desc);
2310 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2311 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2312 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2313 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2314 			seq_putc(m, '\n');
2315 		}
2316 	}
2317 
2318 	return 0;
2319 }
2320 
2321 static int i915_guc_log_dump(struct seq_file *m, void *data)
2322 {
2323 	struct drm_info_node *node = m->private;
2324 	struct drm_i915_private *dev_priv = node_to_i915(node);
2325 	bool dump_load_err = !!node->info_ent->data;
2326 	struct drm_i915_gem_object *obj = NULL;
2327 	u32 *log;
2328 	int i = 0;
2329 
2330 	if (!HAS_GUC(dev_priv))
2331 		return -ENODEV;
2332 
2333 	if (dump_load_err)
2334 		obj = dev_priv->guc.load_err_log;
2335 	else if (dev_priv->guc.log.vma)
2336 		obj = dev_priv->guc.log.vma->obj;
2337 
2338 	if (!obj)
2339 		return 0;
2340 
2341 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2342 	if (IS_ERR(log)) {
2343 		DRM_DEBUG("Failed to pin object\n");
2344 		seq_puts(m, "(log data unaccessible)\n");
2345 		return PTR_ERR(log);
2346 	}
2347 
2348 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2349 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2350 			   *(log + i), *(log + i + 1),
2351 			   *(log + i + 2), *(log + i + 3));
2352 
2353 	seq_putc(m, '\n');
2354 
2355 	i915_gem_object_unpin_map(obj);
2356 
2357 	return 0;
2358 }
2359 
2360 static int i915_guc_log_level_get(void *data, u64 *val)
2361 {
2362 	struct drm_i915_private *dev_priv = data;
2363 
2364 	if (!USES_GUC(dev_priv))
2365 		return -ENODEV;
2366 
2367 	*val = intel_guc_log_get_level(&dev_priv->guc.log);
2368 
2369 	return 0;
2370 }
2371 
2372 static int i915_guc_log_level_set(void *data, u64 val)
2373 {
2374 	struct drm_i915_private *dev_priv = data;
2375 
2376 	if (!USES_GUC(dev_priv))
2377 		return -ENODEV;
2378 
2379 	return intel_guc_log_set_level(&dev_priv->guc.log, val);
2380 }
2381 
2382 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2383 			i915_guc_log_level_get, i915_guc_log_level_set,
2384 			"%lld\n");
2385 
2386 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2387 {
2388 	struct drm_i915_private *dev_priv = inode->i_private;
2389 
2390 	if (!USES_GUC(dev_priv))
2391 		return -ENODEV;
2392 
2393 	file->private_data = &dev_priv->guc.log;
2394 
2395 	return intel_guc_log_relay_open(&dev_priv->guc.log);
2396 }
2397 
2398 static ssize_t
2399 i915_guc_log_relay_write(struct file *filp,
2400 			 const char __user *ubuf,
2401 			 size_t cnt,
2402 			 loff_t *ppos)
2403 {
2404 	struct intel_guc_log *log = filp->private_data;
2405 
2406 	intel_guc_log_relay_flush(log);
2407 
2408 	return cnt;
2409 }
2410 
2411 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2412 {
2413 	struct drm_i915_private *dev_priv = inode->i_private;
2414 
2415 	intel_guc_log_relay_close(&dev_priv->guc.log);
2416 
2417 	return 0;
2418 }
2419 
2420 static const struct file_operations i915_guc_log_relay_fops = {
2421 	.owner = THIS_MODULE,
2422 	.open = i915_guc_log_relay_open,
2423 	.write = i915_guc_log_relay_write,
2424 	.release = i915_guc_log_relay_release,
2425 };
2426 
2427 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2428 {
2429 	u8 val;
2430 	static const char * const sink_status[] = {
2431 		"inactive",
2432 		"transition to active, capture and display",
2433 		"active, display from RFB",
2434 		"active, capture and display on sink device timings",
2435 		"transition to inactive, capture and display, timing re-sync",
2436 		"reserved",
2437 		"reserved",
2438 		"sink internal error",
2439 	};
2440 	struct drm_connector *connector = m->private;
2441 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2442 	struct intel_dp *intel_dp =
2443 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2444 	int ret;
2445 
2446 	if (!CAN_PSR(dev_priv)) {
2447 		seq_puts(m, "PSR Unsupported\n");
2448 		return -ENODEV;
2449 	}
2450 
2451 	if (connector->status != connector_status_connected)
2452 		return -ENODEV;
2453 
2454 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2455 
2456 	if (ret == 1) {
2457 		const char *str = "unknown";
2458 
2459 		val &= DP_PSR_SINK_STATE_MASK;
2460 		if (val < ARRAY_SIZE(sink_status))
2461 			str = sink_status[val];
2462 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2463 	} else {
2464 		return ret;
2465 	}
2466 
2467 	return 0;
2468 }
2469 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2470 
2471 static void
2472 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2473 {
2474 	u32 val, status_val;
2475 	const char *status = "unknown";
2476 
2477 	if (dev_priv->psr.psr2_enabled) {
2478 		static const char * const live_status[] = {
2479 			"IDLE",
2480 			"CAPTURE",
2481 			"CAPTURE_FS",
2482 			"SLEEP",
2483 			"BUFON_FW",
2484 			"ML_UP",
2485 			"SU_STANDBY",
2486 			"FAST_SLEEP",
2487 			"DEEP_SLEEP",
2488 			"BUF_ON",
2489 			"TG_ON"
2490 		};
2491 		val = I915_READ(EDP_PSR2_STATUS);
2492 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2493 			      EDP_PSR2_STATUS_STATE_SHIFT;
2494 		if (status_val < ARRAY_SIZE(live_status))
2495 			status = live_status[status_val];
2496 	} else {
2497 		static const char * const live_status[] = {
2498 			"IDLE",
2499 			"SRDONACK",
2500 			"SRDENT",
2501 			"BUFOFF",
2502 			"BUFON",
2503 			"AUXACK",
2504 			"SRDOFFACK",
2505 			"SRDENT_ON",
2506 		};
2507 		val = I915_READ(EDP_PSR_STATUS);
2508 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2509 			      EDP_PSR_STATUS_STATE_SHIFT;
2510 		if (status_val < ARRAY_SIZE(live_status))
2511 			status = live_status[status_val];
2512 	}
2513 
2514 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2515 }
2516 
2517 static int i915_edp_psr_status(struct seq_file *m, void *data)
2518 {
2519 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2520 	struct i915_psr *psr = &dev_priv->psr;
2521 	intel_wakeref_t wakeref;
2522 	const char *status;
2523 	bool enabled;
2524 	u32 val;
2525 
2526 	if (!HAS_PSR(dev_priv))
2527 		return -ENODEV;
2528 
2529 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2530 	if (psr->dp)
2531 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2532 	seq_puts(m, "\n");
2533 
2534 	if (!psr->sink_support)
2535 		return 0;
2536 
2537 	wakeref = intel_runtime_pm_get(dev_priv);
2538 	mutex_lock(&psr->lock);
2539 
2540 	if (psr->enabled)
2541 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2542 	else
2543 		status = "disabled";
2544 	seq_printf(m, "PSR mode: %s\n", status);
2545 
2546 	if (!psr->enabled)
2547 		goto unlock;
2548 
2549 	if (psr->psr2_enabled) {
2550 		val = I915_READ(EDP_PSR2_CTL);
2551 		enabled = val & EDP_PSR2_ENABLE;
2552 	} else {
2553 		val = I915_READ(EDP_PSR_CTL);
2554 		enabled = val & EDP_PSR_ENABLE;
2555 	}
2556 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2557 		   enableddisabled(enabled), val);
2558 	psr_source_status(dev_priv, m);
2559 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2560 		   psr->busy_frontbuffer_bits);
2561 
2562 	/*
2563 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2564 	 */
2565 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2566 		val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2567 		seq_printf(m, "Performance counter: %u\n", val);
2568 	}
2569 
2570 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2571 		seq_printf(m, "Last attempted entry at: %lld\n",
2572 			   psr->last_entry_attempt);
2573 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2574 	}
2575 
2576 	if (psr->psr2_enabled) {
2577 		u32 su_frames_val[3];
2578 		int frame;
2579 
2580 		/*
2581 		 * Reading all 3 registers before hand to minimize crossing a
2582 		 * frame boundary between register reads
2583 		 */
2584 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2585 			su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2586 
2587 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2588 
2589 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2590 			u32 su_blocks;
2591 
2592 			su_blocks = su_frames_val[frame / 3] &
2593 				    PSR2_SU_STATUS_MASK(frame);
2594 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2595 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2596 		}
2597 	}
2598 
2599 unlock:
2600 	mutex_unlock(&psr->lock);
2601 	intel_runtime_pm_put(dev_priv, wakeref);
2602 
2603 	return 0;
2604 }
2605 
2606 static int
2607 i915_edp_psr_debug_set(void *data, u64 val)
2608 {
2609 	struct drm_i915_private *dev_priv = data;
2610 	struct drm_modeset_acquire_ctx ctx;
2611 	intel_wakeref_t wakeref;
2612 	int ret;
2613 
2614 	if (!CAN_PSR(dev_priv))
2615 		return -ENODEV;
2616 
2617 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2618 
2619 	wakeref = intel_runtime_pm_get(dev_priv);
2620 
2621 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2622 
2623 retry:
2624 	ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2625 	if (ret == -EDEADLK) {
2626 		ret = drm_modeset_backoff(&ctx);
2627 		if (!ret)
2628 			goto retry;
2629 	}
2630 
2631 	drm_modeset_drop_locks(&ctx);
2632 	drm_modeset_acquire_fini(&ctx);
2633 
2634 	intel_runtime_pm_put(dev_priv, wakeref);
2635 
2636 	return ret;
2637 }
2638 
2639 static int
2640 i915_edp_psr_debug_get(void *data, u64 *val)
2641 {
2642 	struct drm_i915_private *dev_priv = data;
2643 
2644 	if (!CAN_PSR(dev_priv))
2645 		return -ENODEV;
2646 
2647 	*val = READ_ONCE(dev_priv->psr.debug);
2648 	return 0;
2649 }
2650 
2651 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2652 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2653 			"%llu\n");
2654 
2655 static int i915_energy_uJ(struct seq_file *m, void *data)
2656 {
2657 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2658 	unsigned long long power;
2659 	intel_wakeref_t wakeref;
2660 	u32 units;
2661 
2662 	if (INTEL_GEN(dev_priv) < 6)
2663 		return -ENODEV;
2664 
2665 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2666 		return -ENODEV;
2667 
2668 	units = (power & 0x1f00) >> 8;
2669 	with_intel_runtime_pm(dev_priv, wakeref)
2670 		power = I915_READ(MCH_SECP_NRG_STTS);
2671 
2672 	power = (1000000 * power) >> units; /* convert to uJ */
2673 	seq_printf(m, "%llu", power);
2674 
2675 	return 0;
2676 }
2677 
2678 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2679 {
2680 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2681 	struct pci_dev *pdev = dev_priv->drm.pdev;
2682 
2683 	if (!HAS_RUNTIME_PM(dev_priv))
2684 		seq_puts(m, "Runtime power management not supported\n");
2685 
2686 	seq_printf(m, "Runtime power status: %s\n",
2687 		   enableddisabled(!dev_priv->power_domains.wakeref));
2688 
2689 	seq_printf(m, "GPU idle: %s (epoch %u)\n",
2690 		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2691 	seq_printf(m, "IRQs disabled: %s\n",
2692 		   yesno(!intel_irqs_enabled(dev_priv)));
2693 #ifdef CONFIG_PM
2694 	seq_printf(m, "Usage count: %d\n",
2695 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2696 #else
2697 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2698 #endif
2699 	seq_printf(m, "PCI device power state: %s [%d]\n",
2700 		   pci_power_name(pdev->current_state),
2701 		   pdev->current_state);
2702 
2703 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2704 		struct drm_printer p = drm_seq_file_printer(m);
2705 
2706 		print_intel_runtime_pm_wakeref(dev_priv, &p);
2707 	}
2708 
2709 	return 0;
2710 }
2711 
2712 static int i915_power_domain_info(struct seq_file *m, void *unused)
2713 {
2714 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2715 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2716 	int i;
2717 
2718 	mutex_lock(&power_domains->lock);
2719 
2720 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2721 	for (i = 0; i < power_domains->power_well_count; i++) {
2722 		struct i915_power_well *power_well;
2723 		enum intel_display_power_domain power_domain;
2724 
2725 		power_well = &power_domains->power_wells[i];
2726 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2727 			   power_well->count);
2728 
2729 		for_each_power_domain(power_domain, power_well->desc->domains)
2730 			seq_printf(m, "  %-23s %d\n",
2731 				 intel_display_power_domain_str(power_domain),
2732 				 power_domains->domain_use_count[power_domain]);
2733 	}
2734 
2735 	mutex_unlock(&power_domains->lock);
2736 
2737 	return 0;
2738 }
2739 
2740 static int i915_dmc_info(struct seq_file *m, void *unused)
2741 {
2742 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2743 	intel_wakeref_t wakeref;
2744 	struct intel_csr *csr;
2745 
2746 	if (!HAS_CSR(dev_priv))
2747 		return -ENODEV;
2748 
2749 	csr = &dev_priv->csr;
2750 
2751 	wakeref = intel_runtime_pm_get(dev_priv);
2752 
2753 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2754 	seq_printf(m, "path: %s\n", csr->fw_path);
2755 
2756 	if (!csr->dmc_payload)
2757 		goto out;
2758 
2759 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2760 		   CSR_VERSION_MINOR(csr->version));
2761 
2762 	if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2763 		goto out;
2764 
2765 	seq_printf(m, "DC3 -> DC5 count: %d\n",
2766 		   I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2767 						    SKL_CSR_DC3_DC5_COUNT));
2768 	if (!IS_GEN9_LP(dev_priv))
2769 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2770 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2771 
2772 out:
2773 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2774 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2775 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2776 
2777 	intel_runtime_pm_put(dev_priv, wakeref);
2778 
2779 	return 0;
2780 }
2781 
2782 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2783 				 struct drm_display_mode *mode)
2784 {
2785 	int i;
2786 
2787 	for (i = 0; i < tabs; i++)
2788 		seq_putc(m, '\t');
2789 
2790 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2791 }
2792 
2793 static void intel_encoder_info(struct seq_file *m,
2794 			       struct intel_crtc *intel_crtc,
2795 			       struct intel_encoder *intel_encoder)
2796 {
2797 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2798 	struct drm_device *dev = &dev_priv->drm;
2799 	struct drm_crtc *crtc = &intel_crtc->base;
2800 	struct intel_connector *intel_connector;
2801 	struct drm_encoder *encoder;
2802 
2803 	encoder = &intel_encoder->base;
2804 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2805 		   encoder->base.id, encoder->name);
2806 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2807 		struct drm_connector *connector = &intel_connector->base;
2808 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2809 			   connector->base.id,
2810 			   connector->name,
2811 			   drm_get_connector_status_name(connector->status));
2812 		if (connector->status == connector_status_connected) {
2813 			struct drm_display_mode *mode = &crtc->mode;
2814 			seq_printf(m, ", mode:\n");
2815 			intel_seq_print_mode(m, 2, mode);
2816 		} else {
2817 			seq_putc(m, '\n');
2818 		}
2819 	}
2820 }
2821 
2822 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2823 {
2824 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2825 	struct drm_device *dev = &dev_priv->drm;
2826 	struct drm_crtc *crtc = &intel_crtc->base;
2827 	struct intel_encoder *intel_encoder;
2828 	struct drm_plane_state *plane_state = crtc->primary->state;
2829 	struct drm_framebuffer *fb = plane_state->fb;
2830 
2831 	if (fb)
2832 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2833 			   fb->base.id, plane_state->src_x >> 16,
2834 			   plane_state->src_y >> 16, fb->width, fb->height);
2835 	else
2836 		seq_puts(m, "\tprimary plane disabled\n");
2837 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2838 		intel_encoder_info(m, intel_crtc, intel_encoder);
2839 }
2840 
2841 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2842 {
2843 	struct drm_display_mode *mode = panel->fixed_mode;
2844 
2845 	seq_printf(m, "\tfixed mode:\n");
2846 	intel_seq_print_mode(m, 2, mode);
2847 }
2848 
2849 static void intel_dp_info(struct seq_file *m,
2850 			  struct intel_connector *intel_connector)
2851 {
2852 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2853 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2854 
2855 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2856 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2857 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2858 		intel_panel_info(m, &intel_connector->panel);
2859 
2860 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2861 				&intel_dp->aux);
2862 }
2863 
2864 static void intel_dp_mst_info(struct seq_file *m,
2865 			  struct intel_connector *intel_connector)
2866 {
2867 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2868 	struct intel_dp_mst_encoder *intel_mst =
2869 		enc_to_mst(&intel_encoder->base);
2870 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2871 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2872 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2873 					intel_connector->port);
2874 
2875 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2876 }
2877 
2878 static void intel_hdmi_info(struct seq_file *m,
2879 			    struct intel_connector *intel_connector)
2880 {
2881 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2882 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2883 
2884 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2885 }
2886 
2887 static void intel_lvds_info(struct seq_file *m,
2888 			    struct intel_connector *intel_connector)
2889 {
2890 	intel_panel_info(m, &intel_connector->panel);
2891 }
2892 
2893 static void intel_connector_info(struct seq_file *m,
2894 				 struct drm_connector *connector)
2895 {
2896 	struct intel_connector *intel_connector = to_intel_connector(connector);
2897 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2898 	struct drm_display_mode *mode;
2899 
2900 	seq_printf(m, "connector %d: type %s, status: %s\n",
2901 		   connector->base.id, connector->name,
2902 		   drm_get_connector_status_name(connector->status));
2903 
2904 	if (connector->status == connector_status_disconnected)
2905 		return;
2906 
2907 	seq_printf(m, "\tname: %s\n", connector->display_info.name);
2908 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2909 		   connector->display_info.width_mm,
2910 		   connector->display_info.height_mm);
2911 	seq_printf(m, "\tsubpixel order: %s\n",
2912 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2913 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2914 
2915 	if (!intel_encoder)
2916 		return;
2917 
2918 	switch (connector->connector_type) {
2919 	case DRM_MODE_CONNECTOR_DisplayPort:
2920 	case DRM_MODE_CONNECTOR_eDP:
2921 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2922 			intel_dp_mst_info(m, intel_connector);
2923 		else
2924 			intel_dp_info(m, intel_connector);
2925 		break;
2926 	case DRM_MODE_CONNECTOR_LVDS:
2927 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2928 			intel_lvds_info(m, intel_connector);
2929 		break;
2930 	case DRM_MODE_CONNECTOR_HDMIA:
2931 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2932 		    intel_encoder->type == INTEL_OUTPUT_DDI)
2933 			intel_hdmi_info(m, intel_connector);
2934 		break;
2935 	default:
2936 		break;
2937 	}
2938 
2939 	seq_printf(m, "\tmodes:\n");
2940 	list_for_each_entry(mode, &connector->modes, head)
2941 		intel_seq_print_mode(m, 2, mode);
2942 }
2943 
2944 static const char *plane_type(enum drm_plane_type type)
2945 {
2946 	switch (type) {
2947 	case DRM_PLANE_TYPE_OVERLAY:
2948 		return "OVL";
2949 	case DRM_PLANE_TYPE_PRIMARY:
2950 		return "PRI";
2951 	case DRM_PLANE_TYPE_CURSOR:
2952 		return "CUR";
2953 	/*
2954 	 * Deliberately omitting default: to generate compiler warnings
2955 	 * when a new drm_plane_type gets added.
2956 	 */
2957 	}
2958 
2959 	return "unknown";
2960 }
2961 
2962 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2963 {
2964 	/*
2965 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2966 	 * will print them all to visualize if the values are misused
2967 	 */
2968 	snprintf(buf, bufsize,
2969 		 "%s%s%s%s%s%s(0x%08x)",
2970 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2971 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2972 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2973 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2974 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2975 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2976 		 rotation);
2977 }
2978 
2979 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2980 {
2981 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2982 	struct drm_device *dev = &dev_priv->drm;
2983 	struct intel_plane *intel_plane;
2984 
2985 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2986 		struct drm_plane_state *state;
2987 		struct drm_plane *plane = &intel_plane->base;
2988 		struct drm_format_name_buf format_name;
2989 		char rot_str[48];
2990 
2991 		if (!plane->state) {
2992 			seq_puts(m, "plane->state is NULL!\n");
2993 			continue;
2994 		}
2995 
2996 		state = plane->state;
2997 
2998 		if (state->fb) {
2999 			drm_get_format_name(state->fb->format->format,
3000 					    &format_name);
3001 		} else {
3002 			sprintf(format_name.str, "N/A");
3003 		}
3004 
3005 		plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3006 
3007 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3008 			   plane->base.id,
3009 			   plane_type(intel_plane->base.type),
3010 			   state->crtc_x, state->crtc_y,
3011 			   state->crtc_w, state->crtc_h,
3012 			   (state->src_x >> 16),
3013 			   ((state->src_x & 0xffff) * 15625) >> 10,
3014 			   (state->src_y >> 16),
3015 			   ((state->src_y & 0xffff) * 15625) >> 10,
3016 			   (state->src_w >> 16),
3017 			   ((state->src_w & 0xffff) * 15625) >> 10,
3018 			   (state->src_h >> 16),
3019 			   ((state->src_h & 0xffff) * 15625) >> 10,
3020 			   format_name.str,
3021 			   rot_str);
3022 	}
3023 }
3024 
3025 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3026 {
3027 	struct intel_crtc_state *pipe_config;
3028 	int num_scalers = intel_crtc->num_scalers;
3029 	int i;
3030 
3031 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3032 
3033 	/* Not all platformas have a scaler */
3034 	if (num_scalers) {
3035 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3036 			   num_scalers,
3037 			   pipe_config->scaler_state.scaler_users,
3038 			   pipe_config->scaler_state.scaler_id);
3039 
3040 		for (i = 0; i < num_scalers; i++) {
3041 			struct intel_scaler *sc =
3042 					&pipe_config->scaler_state.scalers[i];
3043 
3044 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3045 				   i, yesno(sc->in_use), sc->mode);
3046 		}
3047 		seq_puts(m, "\n");
3048 	} else {
3049 		seq_puts(m, "\tNo scalers available on this platform\n");
3050 	}
3051 }
3052 
3053 static int i915_display_info(struct seq_file *m, void *unused)
3054 {
3055 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3056 	struct drm_device *dev = &dev_priv->drm;
3057 	struct intel_crtc *crtc;
3058 	struct drm_connector *connector;
3059 	struct drm_connector_list_iter conn_iter;
3060 	intel_wakeref_t wakeref;
3061 
3062 	wakeref = intel_runtime_pm_get(dev_priv);
3063 
3064 	seq_printf(m, "CRTC info\n");
3065 	seq_printf(m, "---------\n");
3066 	for_each_intel_crtc(dev, crtc) {
3067 		struct intel_crtc_state *pipe_config;
3068 
3069 		drm_modeset_lock(&crtc->base.mutex, NULL);
3070 		pipe_config = to_intel_crtc_state(crtc->base.state);
3071 
3072 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3073 			   crtc->base.base.id, pipe_name(crtc->pipe),
3074 			   yesno(pipe_config->base.active),
3075 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3076 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3077 
3078 		if (pipe_config->base.active) {
3079 			struct intel_plane *cursor =
3080 				to_intel_plane(crtc->base.cursor);
3081 
3082 			intel_crtc_info(m, crtc);
3083 
3084 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3085 				   yesno(cursor->base.state->visible),
3086 				   cursor->base.state->crtc_x,
3087 				   cursor->base.state->crtc_y,
3088 				   cursor->base.state->crtc_w,
3089 				   cursor->base.state->crtc_h,
3090 				   cursor->cursor.base);
3091 			intel_scaler_info(m, crtc);
3092 			intel_plane_info(m, crtc);
3093 		}
3094 
3095 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3096 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3097 			   yesno(!crtc->pch_fifo_underrun_disabled));
3098 		drm_modeset_unlock(&crtc->base.mutex);
3099 	}
3100 
3101 	seq_printf(m, "\n");
3102 	seq_printf(m, "Connector info\n");
3103 	seq_printf(m, "--------------\n");
3104 	mutex_lock(&dev->mode_config.mutex);
3105 	drm_connector_list_iter_begin(dev, &conn_iter);
3106 	drm_for_each_connector_iter(connector, &conn_iter)
3107 		intel_connector_info(m, connector);
3108 	drm_connector_list_iter_end(&conn_iter);
3109 	mutex_unlock(&dev->mode_config.mutex);
3110 
3111 	intel_runtime_pm_put(dev_priv, wakeref);
3112 
3113 	return 0;
3114 }
3115 
3116 static int i915_engine_info(struct seq_file *m, void *unused)
3117 {
3118 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3119 	struct intel_engine_cs *engine;
3120 	intel_wakeref_t wakeref;
3121 	enum intel_engine_id id;
3122 	struct drm_printer p;
3123 
3124 	wakeref = intel_runtime_pm_get(dev_priv);
3125 
3126 	seq_printf(m, "GT awake? %s (epoch %u)\n",
3127 		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3128 	seq_printf(m, "Global active requests: %d\n",
3129 		   dev_priv->gt.active_requests);
3130 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3131 		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3132 
3133 	p = drm_seq_file_printer(m);
3134 	for_each_engine(engine, dev_priv, id)
3135 		intel_engine_dump(engine, &p, "%s\n", engine->name);
3136 
3137 	intel_runtime_pm_put(dev_priv, wakeref);
3138 
3139 	return 0;
3140 }
3141 
3142 static int i915_rcs_topology(struct seq_file *m, void *unused)
3143 {
3144 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3145 	struct drm_printer p = drm_seq_file_printer(m);
3146 
3147 	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3148 
3149 	return 0;
3150 }
3151 
3152 static int i915_shrinker_info(struct seq_file *m, void *unused)
3153 {
3154 	struct drm_i915_private *i915 = node_to_i915(m->private);
3155 
3156 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3157 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3158 
3159 	return 0;
3160 }
3161 
3162 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3163 {
3164 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3165 	struct drm_device *dev = &dev_priv->drm;
3166 	int i;
3167 
3168 	drm_modeset_lock_all(dev);
3169 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3170 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3171 
3172 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3173 			   pll->info->id);
3174 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3175 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3176 		seq_printf(m, " tracked hardware state:\n");
3177 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3178 		seq_printf(m, " dpll_md: 0x%08x\n",
3179 			   pll->state.hw_state.dpll_md);
3180 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3181 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3182 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3183 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3184 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3185 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3186 			   pll->state.hw_state.mg_refclkin_ctl);
3187 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3188 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
3189 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3190 			   pll->state.hw_state.mg_clktop2_hsclkctl);
3191 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
3192 			   pll->state.hw_state.mg_pll_div0);
3193 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
3194 			   pll->state.hw_state.mg_pll_div1);
3195 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
3196 			   pll->state.hw_state.mg_pll_lf);
3197 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3198 			   pll->state.hw_state.mg_pll_frac_lock);
3199 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3200 			   pll->state.hw_state.mg_pll_ssc);
3201 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
3202 			   pll->state.hw_state.mg_pll_bias);
3203 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3204 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
3205 	}
3206 	drm_modeset_unlock_all(dev);
3207 
3208 	return 0;
3209 }
3210 
3211 static int i915_wa_registers(struct seq_file *m, void *unused)
3212 {
3213 	struct drm_i915_private *i915 = node_to_i915(m->private);
3214 	const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3215 	struct i915_wa *wa;
3216 	unsigned int i;
3217 
3218 	seq_printf(m, "Workarounds applied: %u\n", wal->count);
3219 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3220 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3221 			   i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3222 
3223 	return 0;
3224 }
3225 
3226 static int i915_ipc_status_show(struct seq_file *m, void *data)
3227 {
3228 	struct drm_i915_private *dev_priv = m->private;
3229 
3230 	seq_printf(m, "Isochronous Priority Control: %s\n",
3231 			yesno(dev_priv->ipc_enabled));
3232 	return 0;
3233 }
3234 
3235 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3236 {
3237 	struct drm_i915_private *dev_priv = inode->i_private;
3238 
3239 	if (!HAS_IPC(dev_priv))
3240 		return -ENODEV;
3241 
3242 	return single_open(file, i915_ipc_status_show, dev_priv);
3243 }
3244 
3245 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3246 				     size_t len, loff_t *offp)
3247 {
3248 	struct seq_file *m = file->private_data;
3249 	struct drm_i915_private *dev_priv = m->private;
3250 	intel_wakeref_t wakeref;
3251 	bool enable;
3252 	int ret;
3253 
3254 	ret = kstrtobool_from_user(ubuf, len, &enable);
3255 	if (ret < 0)
3256 		return ret;
3257 
3258 	with_intel_runtime_pm(dev_priv, wakeref) {
3259 		if (!dev_priv->ipc_enabled && enable)
3260 			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3261 		dev_priv->wm.distrust_bios_wm = true;
3262 		dev_priv->ipc_enabled = enable;
3263 		intel_enable_ipc(dev_priv);
3264 	}
3265 
3266 	return len;
3267 }
3268 
3269 static const struct file_operations i915_ipc_status_fops = {
3270 	.owner = THIS_MODULE,
3271 	.open = i915_ipc_status_open,
3272 	.read = seq_read,
3273 	.llseek = seq_lseek,
3274 	.release = single_release,
3275 	.write = i915_ipc_status_write
3276 };
3277 
3278 static int i915_ddb_info(struct seq_file *m, void *unused)
3279 {
3280 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3281 	struct drm_device *dev = &dev_priv->drm;
3282 	struct skl_ddb_entry *entry;
3283 	struct intel_crtc *crtc;
3284 
3285 	if (INTEL_GEN(dev_priv) < 9)
3286 		return -ENODEV;
3287 
3288 	drm_modeset_lock_all(dev);
3289 
3290 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3291 
3292 	for_each_intel_crtc(&dev_priv->drm, crtc) {
3293 		struct intel_crtc_state *crtc_state =
3294 			to_intel_crtc_state(crtc->base.state);
3295 		enum pipe pipe = crtc->pipe;
3296 		enum plane_id plane_id;
3297 
3298 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3299 
3300 		for_each_plane_id_on_crtc(crtc, plane_id) {
3301 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3302 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3303 				   entry->start, entry->end,
3304 				   skl_ddb_entry_size(entry));
3305 		}
3306 
3307 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3308 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3309 			   entry->end, skl_ddb_entry_size(entry));
3310 	}
3311 
3312 	drm_modeset_unlock_all(dev);
3313 
3314 	return 0;
3315 }
3316 
3317 static void drrs_status_per_crtc(struct seq_file *m,
3318 				 struct drm_device *dev,
3319 				 struct intel_crtc *intel_crtc)
3320 {
3321 	struct drm_i915_private *dev_priv = to_i915(dev);
3322 	struct i915_drrs *drrs = &dev_priv->drrs;
3323 	int vrefresh = 0;
3324 	struct drm_connector *connector;
3325 	struct drm_connector_list_iter conn_iter;
3326 
3327 	drm_connector_list_iter_begin(dev, &conn_iter);
3328 	drm_for_each_connector_iter(connector, &conn_iter) {
3329 		if (connector->state->crtc != &intel_crtc->base)
3330 			continue;
3331 
3332 		seq_printf(m, "%s:\n", connector->name);
3333 	}
3334 	drm_connector_list_iter_end(&conn_iter);
3335 
3336 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3337 		seq_puts(m, "\tVBT: DRRS_type: Static");
3338 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3339 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3340 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3341 		seq_puts(m, "\tVBT: DRRS_type: None");
3342 	else
3343 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3344 
3345 	seq_puts(m, "\n\n");
3346 
3347 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3348 		struct intel_panel *panel;
3349 
3350 		mutex_lock(&drrs->mutex);
3351 		/* DRRS Supported */
3352 		seq_puts(m, "\tDRRS Supported: Yes\n");
3353 
3354 		/* disable_drrs() will make drrs->dp NULL */
3355 		if (!drrs->dp) {
3356 			seq_puts(m, "Idleness DRRS: Disabled\n");
3357 			if (dev_priv->psr.enabled)
3358 				seq_puts(m,
3359 				"\tAs PSR is enabled, DRRS is not enabled\n");
3360 			mutex_unlock(&drrs->mutex);
3361 			return;
3362 		}
3363 
3364 		panel = &drrs->dp->attached_connector->panel;
3365 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3366 					drrs->busy_frontbuffer_bits);
3367 
3368 		seq_puts(m, "\n\t\t");
3369 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3370 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3371 			vrefresh = panel->fixed_mode->vrefresh;
3372 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3373 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3374 			vrefresh = panel->downclock_mode->vrefresh;
3375 		} else {
3376 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3377 						drrs->refresh_rate_type);
3378 			mutex_unlock(&drrs->mutex);
3379 			return;
3380 		}
3381 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3382 
3383 		seq_puts(m, "\n\t\t");
3384 		mutex_unlock(&drrs->mutex);
3385 	} else {
3386 		/* DRRS not supported. Print the VBT parameter*/
3387 		seq_puts(m, "\tDRRS Supported : No");
3388 	}
3389 	seq_puts(m, "\n");
3390 }
3391 
3392 static int i915_drrs_status(struct seq_file *m, void *unused)
3393 {
3394 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3395 	struct drm_device *dev = &dev_priv->drm;
3396 	struct intel_crtc *intel_crtc;
3397 	int active_crtc_cnt = 0;
3398 
3399 	drm_modeset_lock_all(dev);
3400 	for_each_intel_crtc(dev, intel_crtc) {
3401 		if (intel_crtc->base.state->active) {
3402 			active_crtc_cnt++;
3403 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3404 
3405 			drrs_status_per_crtc(m, dev, intel_crtc);
3406 		}
3407 	}
3408 	drm_modeset_unlock_all(dev);
3409 
3410 	if (!active_crtc_cnt)
3411 		seq_puts(m, "No active crtc found\n");
3412 
3413 	return 0;
3414 }
3415 
3416 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3417 {
3418 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3419 	struct drm_device *dev = &dev_priv->drm;
3420 	struct intel_encoder *intel_encoder;
3421 	struct intel_digital_port *intel_dig_port;
3422 	struct drm_connector *connector;
3423 	struct drm_connector_list_iter conn_iter;
3424 
3425 	drm_connector_list_iter_begin(dev, &conn_iter);
3426 	drm_for_each_connector_iter(connector, &conn_iter) {
3427 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3428 			continue;
3429 
3430 		intel_encoder = intel_attached_encoder(connector);
3431 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3432 			continue;
3433 
3434 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3435 		if (!intel_dig_port->dp.can_mst)
3436 			continue;
3437 
3438 		seq_printf(m, "MST Source Port %c\n",
3439 			   port_name(intel_dig_port->base.port));
3440 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3441 	}
3442 	drm_connector_list_iter_end(&conn_iter);
3443 
3444 	return 0;
3445 }
3446 
3447 static ssize_t i915_displayport_test_active_write(struct file *file,
3448 						  const char __user *ubuf,
3449 						  size_t len, loff_t *offp)
3450 {
3451 	char *input_buffer;
3452 	int status = 0;
3453 	struct drm_device *dev;
3454 	struct drm_connector *connector;
3455 	struct drm_connector_list_iter conn_iter;
3456 	struct intel_dp *intel_dp;
3457 	int val = 0;
3458 
3459 	dev = ((struct seq_file *)file->private_data)->private;
3460 
3461 	if (len == 0)
3462 		return 0;
3463 
3464 	input_buffer = memdup_user_nul(ubuf, len);
3465 	if (IS_ERR(input_buffer))
3466 		return PTR_ERR(input_buffer);
3467 
3468 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3469 
3470 	drm_connector_list_iter_begin(dev, &conn_iter);
3471 	drm_for_each_connector_iter(connector, &conn_iter) {
3472 		struct intel_encoder *encoder;
3473 
3474 		if (connector->connector_type !=
3475 		    DRM_MODE_CONNECTOR_DisplayPort)
3476 			continue;
3477 
3478 		encoder = to_intel_encoder(connector->encoder);
3479 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3480 			continue;
3481 
3482 		if (encoder && connector->status == connector_status_connected) {
3483 			intel_dp = enc_to_intel_dp(&encoder->base);
3484 			status = kstrtoint(input_buffer, 10, &val);
3485 			if (status < 0)
3486 				break;
3487 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3488 			/* To prevent erroneous activation of the compliance
3489 			 * testing code, only accept an actual value of 1 here
3490 			 */
3491 			if (val == 1)
3492 				intel_dp->compliance.test_active = 1;
3493 			else
3494 				intel_dp->compliance.test_active = 0;
3495 		}
3496 	}
3497 	drm_connector_list_iter_end(&conn_iter);
3498 	kfree(input_buffer);
3499 	if (status < 0)
3500 		return status;
3501 
3502 	*offp += len;
3503 	return len;
3504 }
3505 
3506 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3507 {
3508 	struct drm_i915_private *dev_priv = m->private;
3509 	struct drm_device *dev = &dev_priv->drm;
3510 	struct drm_connector *connector;
3511 	struct drm_connector_list_iter conn_iter;
3512 	struct intel_dp *intel_dp;
3513 
3514 	drm_connector_list_iter_begin(dev, &conn_iter);
3515 	drm_for_each_connector_iter(connector, &conn_iter) {
3516 		struct intel_encoder *encoder;
3517 
3518 		if (connector->connector_type !=
3519 		    DRM_MODE_CONNECTOR_DisplayPort)
3520 			continue;
3521 
3522 		encoder = to_intel_encoder(connector->encoder);
3523 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3524 			continue;
3525 
3526 		if (encoder && connector->status == connector_status_connected) {
3527 			intel_dp = enc_to_intel_dp(&encoder->base);
3528 			if (intel_dp->compliance.test_active)
3529 				seq_puts(m, "1");
3530 			else
3531 				seq_puts(m, "0");
3532 		} else
3533 			seq_puts(m, "0");
3534 	}
3535 	drm_connector_list_iter_end(&conn_iter);
3536 
3537 	return 0;
3538 }
3539 
3540 static int i915_displayport_test_active_open(struct inode *inode,
3541 					     struct file *file)
3542 {
3543 	return single_open(file, i915_displayport_test_active_show,
3544 			   inode->i_private);
3545 }
3546 
3547 static const struct file_operations i915_displayport_test_active_fops = {
3548 	.owner = THIS_MODULE,
3549 	.open = i915_displayport_test_active_open,
3550 	.read = seq_read,
3551 	.llseek = seq_lseek,
3552 	.release = single_release,
3553 	.write = i915_displayport_test_active_write
3554 };
3555 
3556 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3557 {
3558 	struct drm_i915_private *dev_priv = m->private;
3559 	struct drm_device *dev = &dev_priv->drm;
3560 	struct drm_connector *connector;
3561 	struct drm_connector_list_iter conn_iter;
3562 	struct intel_dp *intel_dp;
3563 
3564 	drm_connector_list_iter_begin(dev, &conn_iter);
3565 	drm_for_each_connector_iter(connector, &conn_iter) {
3566 		struct intel_encoder *encoder;
3567 
3568 		if (connector->connector_type !=
3569 		    DRM_MODE_CONNECTOR_DisplayPort)
3570 			continue;
3571 
3572 		encoder = to_intel_encoder(connector->encoder);
3573 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3574 			continue;
3575 
3576 		if (encoder && connector->status == connector_status_connected) {
3577 			intel_dp = enc_to_intel_dp(&encoder->base);
3578 			if (intel_dp->compliance.test_type ==
3579 			    DP_TEST_LINK_EDID_READ)
3580 				seq_printf(m, "%lx",
3581 					   intel_dp->compliance.test_data.edid);
3582 			else if (intel_dp->compliance.test_type ==
3583 				 DP_TEST_LINK_VIDEO_PATTERN) {
3584 				seq_printf(m, "hdisplay: %d\n",
3585 					   intel_dp->compliance.test_data.hdisplay);
3586 				seq_printf(m, "vdisplay: %d\n",
3587 					   intel_dp->compliance.test_data.vdisplay);
3588 				seq_printf(m, "bpc: %u\n",
3589 					   intel_dp->compliance.test_data.bpc);
3590 			}
3591 		} else
3592 			seq_puts(m, "0");
3593 	}
3594 	drm_connector_list_iter_end(&conn_iter);
3595 
3596 	return 0;
3597 }
3598 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3599 
3600 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3601 {
3602 	struct drm_i915_private *dev_priv = m->private;
3603 	struct drm_device *dev = &dev_priv->drm;
3604 	struct drm_connector *connector;
3605 	struct drm_connector_list_iter conn_iter;
3606 	struct intel_dp *intel_dp;
3607 
3608 	drm_connector_list_iter_begin(dev, &conn_iter);
3609 	drm_for_each_connector_iter(connector, &conn_iter) {
3610 		struct intel_encoder *encoder;
3611 
3612 		if (connector->connector_type !=
3613 		    DRM_MODE_CONNECTOR_DisplayPort)
3614 			continue;
3615 
3616 		encoder = to_intel_encoder(connector->encoder);
3617 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3618 			continue;
3619 
3620 		if (encoder && connector->status == connector_status_connected) {
3621 			intel_dp = enc_to_intel_dp(&encoder->base);
3622 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3623 		} else
3624 			seq_puts(m, "0");
3625 	}
3626 	drm_connector_list_iter_end(&conn_iter);
3627 
3628 	return 0;
3629 }
3630 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3631 
3632 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3633 {
3634 	struct drm_i915_private *dev_priv = m->private;
3635 	struct drm_device *dev = &dev_priv->drm;
3636 	int level;
3637 	int num_levels;
3638 
3639 	if (IS_CHERRYVIEW(dev_priv))
3640 		num_levels = 3;
3641 	else if (IS_VALLEYVIEW(dev_priv))
3642 		num_levels = 1;
3643 	else if (IS_G4X(dev_priv))
3644 		num_levels = 3;
3645 	else
3646 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3647 
3648 	drm_modeset_lock_all(dev);
3649 
3650 	for (level = 0; level < num_levels; level++) {
3651 		unsigned int latency = wm[level];
3652 
3653 		/*
3654 		 * - WM1+ latency values in 0.5us units
3655 		 * - latencies are in us on gen9/vlv/chv
3656 		 */
3657 		if (INTEL_GEN(dev_priv) >= 9 ||
3658 		    IS_VALLEYVIEW(dev_priv) ||
3659 		    IS_CHERRYVIEW(dev_priv) ||
3660 		    IS_G4X(dev_priv))
3661 			latency *= 10;
3662 		else if (level > 0)
3663 			latency *= 5;
3664 
3665 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3666 			   level, wm[level], latency / 10, latency % 10);
3667 	}
3668 
3669 	drm_modeset_unlock_all(dev);
3670 }
3671 
3672 static int pri_wm_latency_show(struct seq_file *m, void *data)
3673 {
3674 	struct drm_i915_private *dev_priv = m->private;
3675 	const u16 *latencies;
3676 
3677 	if (INTEL_GEN(dev_priv) >= 9)
3678 		latencies = dev_priv->wm.skl_latency;
3679 	else
3680 		latencies = dev_priv->wm.pri_latency;
3681 
3682 	wm_latency_show(m, latencies);
3683 
3684 	return 0;
3685 }
3686 
3687 static int spr_wm_latency_show(struct seq_file *m, void *data)
3688 {
3689 	struct drm_i915_private *dev_priv = m->private;
3690 	const u16 *latencies;
3691 
3692 	if (INTEL_GEN(dev_priv) >= 9)
3693 		latencies = dev_priv->wm.skl_latency;
3694 	else
3695 		latencies = dev_priv->wm.spr_latency;
3696 
3697 	wm_latency_show(m, latencies);
3698 
3699 	return 0;
3700 }
3701 
3702 static int cur_wm_latency_show(struct seq_file *m, void *data)
3703 {
3704 	struct drm_i915_private *dev_priv = m->private;
3705 	const u16 *latencies;
3706 
3707 	if (INTEL_GEN(dev_priv) >= 9)
3708 		latencies = dev_priv->wm.skl_latency;
3709 	else
3710 		latencies = dev_priv->wm.cur_latency;
3711 
3712 	wm_latency_show(m, latencies);
3713 
3714 	return 0;
3715 }
3716 
3717 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3718 {
3719 	struct drm_i915_private *dev_priv = inode->i_private;
3720 
3721 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3722 		return -ENODEV;
3723 
3724 	return single_open(file, pri_wm_latency_show, dev_priv);
3725 }
3726 
3727 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3728 {
3729 	struct drm_i915_private *dev_priv = inode->i_private;
3730 
3731 	if (HAS_GMCH(dev_priv))
3732 		return -ENODEV;
3733 
3734 	return single_open(file, spr_wm_latency_show, dev_priv);
3735 }
3736 
3737 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3738 {
3739 	struct drm_i915_private *dev_priv = inode->i_private;
3740 
3741 	if (HAS_GMCH(dev_priv))
3742 		return -ENODEV;
3743 
3744 	return single_open(file, cur_wm_latency_show, dev_priv);
3745 }
3746 
3747 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3748 				size_t len, loff_t *offp, u16 wm[8])
3749 {
3750 	struct seq_file *m = file->private_data;
3751 	struct drm_i915_private *dev_priv = m->private;
3752 	struct drm_device *dev = &dev_priv->drm;
3753 	u16 new[8] = { 0 };
3754 	int num_levels;
3755 	int level;
3756 	int ret;
3757 	char tmp[32];
3758 
3759 	if (IS_CHERRYVIEW(dev_priv))
3760 		num_levels = 3;
3761 	else if (IS_VALLEYVIEW(dev_priv))
3762 		num_levels = 1;
3763 	else if (IS_G4X(dev_priv))
3764 		num_levels = 3;
3765 	else
3766 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3767 
3768 	if (len >= sizeof(tmp))
3769 		return -EINVAL;
3770 
3771 	if (copy_from_user(tmp, ubuf, len))
3772 		return -EFAULT;
3773 
3774 	tmp[len] = '\0';
3775 
3776 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3777 		     &new[0], &new[1], &new[2], &new[3],
3778 		     &new[4], &new[5], &new[6], &new[7]);
3779 	if (ret != num_levels)
3780 		return -EINVAL;
3781 
3782 	drm_modeset_lock_all(dev);
3783 
3784 	for (level = 0; level < num_levels; level++)
3785 		wm[level] = new[level];
3786 
3787 	drm_modeset_unlock_all(dev);
3788 
3789 	return len;
3790 }
3791 
3792 
3793 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3794 				    size_t len, loff_t *offp)
3795 {
3796 	struct seq_file *m = file->private_data;
3797 	struct drm_i915_private *dev_priv = m->private;
3798 	u16 *latencies;
3799 
3800 	if (INTEL_GEN(dev_priv) >= 9)
3801 		latencies = dev_priv->wm.skl_latency;
3802 	else
3803 		latencies = dev_priv->wm.pri_latency;
3804 
3805 	return wm_latency_write(file, ubuf, len, offp, latencies);
3806 }
3807 
3808 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3809 				    size_t len, loff_t *offp)
3810 {
3811 	struct seq_file *m = file->private_data;
3812 	struct drm_i915_private *dev_priv = m->private;
3813 	u16 *latencies;
3814 
3815 	if (INTEL_GEN(dev_priv) >= 9)
3816 		latencies = dev_priv->wm.skl_latency;
3817 	else
3818 		latencies = dev_priv->wm.spr_latency;
3819 
3820 	return wm_latency_write(file, ubuf, len, offp, latencies);
3821 }
3822 
3823 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3824 				    size_t len, loff_t *offp)
3825 {
3826 	struct seq_file *m = file->private_data;
3827 	struct drm_i915_private *dev_priv = m->private;
3828 	u16 *latencies;
3829 
3830 	if (INTEL_GEN(dev_priv) >= 9)
3831 		latencies = dev_priv->wm.skl_latency;
3832 	else
3833 		latencies = dev_priv->wm.cur_latency;
3834 
3835 	return wm_latency_write(file, ubuf, len, offp, latencies);
3836 }
3837 
3838 static const struct file_operations i915_pri_wm_latency_fops = {
3839 	.owner = THIS_MODULE,
3840 	.open = pri_wm_latency_open,
3841 	.read = seq_read,
3842 	.llseek = seq_lseek,
3843 	.release = single_release,
3844 	.write = pri_wm_latency_write
3845 };
3846 
3847 static const struct file_operations i915_spr_wm_latency_fops = {
3848 	.owner = THIS_MODULE,
3849 	.open = spr_wm_latency_open,
3850 	.read = seq_read,
3851 	.llseek = seq_lseek,
3852 	.release = single_release,
3853 	.write = spr_wm_latency_write
3854 };
3855 
3856 static const struct file_operations i915_cur_wm_latency_fops = {
3857 	.owner = THIS_MODULE,
3858 	.open = cur_wm_latency_open,
3859 	.read = seq_read,
3860 	.llseek = seq_lseek,
3861 	.release = single_release,
3862 	.write = cur_wm_latency_write
3863 };
3864 
3865 static int
3866 i915_wedged_get(void *data, u64 *val)
3867 {
3868 	struct drm_i915_private *dev_priv = data;
3869 
3870 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
3871 
3872 	return 0;
3873 }
3874 
3875 static int
3876 i915_wedged_set(void *data, u64 val)
3877 {
3878 	struct drm_i915_private *i915 = data;
3879 
3880 	/*
3881 	 * There is no safeguard against this debugfs entry colliding
3882 	 * with the hangcheck calling same i915_handle_error() in
3883 	 * parallel, causing an explosion. For now we assume that the
3884 	 * test harness is responsible enough not to inject gpu hangs
3885 	 * while it is writing to 'i915_wedged'
3886 	 */
3887 
3888 	if (i915_reset_backoff(&i915->gpu_error))
3889 		return -EAGAIN;
3890 
3891 	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3892 			  "Manually set wedged engine mask = %llx", val);
3893 	return 0;
3894 }
3895 
3896 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3897 			i915_wedged_get, i915_wedged_set,
3898 			"%llu\n");
3899 
3900 #define DROP_UNBOUND	BIT(0)
3901 #define DROP_BOUND	BIT(1)
3902 #define DROP_RETIRE	BIT(2)
3903 #define DROP_ACTIVE	BIT(3)
3904 #define DROP_FREED	BIT(4)
3905 #define DROP_SHRINK_ALL	BIT(5)
3906 #define DROP_IDLE	BIT(6)
3907 #define DROP_RESET_ACTIVE	BIT(7)
3908 #define DROP_RESET_SEQNO	BIT(8)
3909 #define DROP_ALL (DROP_UNBOUND	| \
3910 		  DROP_BOUND	| \
3911 		  DROP_RETIRE	| \
3912 		  DROP_ACTIVE	| \
3913 		  DROP_FREED	| \
3914 		  DROP_SHRINK_ALL |\
3915 		  DROP_IDLE	| \
3916 		  DROP_RESET_ACTIVE | \
3917 		  DROP_RESET_SEQNO)
3918 static int
3919 i915_drop_caches_get(void *data, u64 *val)
3920 {
3921 	*val = DROP_ALL;
3922 
3923 	return 0;
3924 }
3925 
3926 static int
3927 i915_drop_caches_set(void *data, u64 val)
3928 {
3929 	struct drm_i915_private *i915 = data;
3930 	intel_wakeref_t wakeref;
3931 	int ret = 0;
3932 
3933 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3934 		  val, val & DROP_ALL);
3935 	wakeref = intel_runtime_pm_get(i915);
3936 
3937 	if (val & DROP_RESET_ACTIVE &&
3938 	    wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3939 		i915_gem_set_wedged(i915);
3940 
3941 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
3942 	 * on ioctls on -EAGAIN. */
3943 	if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3944 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3945 		if (ret)
3946 			goto out;
3947 
3948 		if (val & DROP_ACTIVE)
3949 			ret = i915_gem_wait_for_idle(i915,
3950 						     I915_WAIT_INTERRUPTIBLE |
3951 						     I915_WAIT_LOCKED,
3952 						     MAX_SCHEDULE_TIMEOUT);
3953 
3954 		if (val & DROP_RETIRE)
3955 			i915_retire_requests(i915);
3956 
3957 		mutex_unlock(&i915->drm.struct_mutex);
3958 	}
3959 
3960 	if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
3961 		i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3962 
3963 	fs_reclaim_acquire(GFP_KERNEL);
3964 	if (val & DROP_BOUND)
3965 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3966 
3967 	if (val & DROP_UNBOUND)
3968 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3969 
3970 	if (val & DROP_SHRINK_ALL)
3971 		i915_gem_shrink_all(i915);
3972 	fs_reclaim_release(GFP_KERNEL);
3973 
3974 	if (val & DROP_IDLE) {
3975 		do {
3976 			if (READ_ONCE(i915->gt.active_requests))
3977 				flush_delayed_work(&i915->gt.retire_work);
3978 			drain_delayed_work(&i915->gt.idle_work);
3979 		} while (READ_ONCE(i915->gt.awake));
3980 	}
3981 
3982 	if (val & DROP_FREED)
3983 		i915_gem_drain_freed_objects(i915);
3984 
3985 out:
3986 	intel_runtime_pm_put(i915, wakeref);
3987 
3988 	return ret;
3989 }
3990 
3991 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3992 			i915_drop_caches_get, i915_drop_caches_set,
3993 			"0x%08llx\n");
3994 
3995 static int
3996 i915_cache_sharing_get(void *data, u64 *val)
3997 {
3998 	struct drm_i915_private *dev_priv = data;
3999 	intel_wakeref_t wakeref;
4000 	u32 snpcr = 0;
4001 
4002 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4003 		return -ENODEV;
4004 
4005 	with_intel_runtime_pm(dev_priv, wakeref)
4006 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4007 
4008 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4009 
4010 	return 0;
4011 }
4012 
4013 static int
4014 i915_cache_sharing_set(void *data, u64 val)
4015 {
4016 	struct drm_i915_private *dev_priv = data;
4017 	intel_wakeref_t wakeref;
4018 
4019 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4020 		return -ENODEV;
4021 
4022 	if (val > 3)
4023 		return -EINVAL;
4024 
4025 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4026 	with_intel_runtime_pm(dev_priv, wakeref) {
4027 		u32 snpcr;
4028 
4029 		/* Update the cache sharing policy here as well */
4030 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4031 		snpcr &= ~GEN6_MBC_SNPCR_MASK;
4032 		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4033 		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4034 	}
4035 
4036 	return 0;
4037 }
4038 
4039 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4040 			i915_cache_sharing_get, i915_cache_sharing_set,
4041 			"%llu\n");
4042 
4043 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4044 					  struct sseu_dev_info *sseu)
4045 {
4046 #define SS_MAX 2
4047 	const int ss_max = SS_MAX;
4048 	u32 sig1[SS_MAX], sig2[SS_MAX];
4049 	int ss;
4050 
4051 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4052 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4053 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4054 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4055 
4056 	for (ss = 0; ss < ss_max; ss++) {
4057 		unsigned int eu_cnt;
4058 
4059 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4060 			/* skip disabled subslice */
4061 			continue;
4062 
4063 		sseu->slice_mask = BIT(0);
4064 		sseu->subslice_mask[0] |= BIT(ss);
4065 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4066 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4067 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4068 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4069 		sseu->eu_total += eu_cnt;
4070 		sseu->eu_per_subslice = max_t(unsigned int,
4071 					      sseu->eu_per_subslice, eu_cnt);
4072 	}
4073 #undef SS_MAX
4074 }
4075 
4076 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4077 				     struct sseu_dev_info *sseu)
4078 {
4079 #define SS_MAX 6
4080 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4081 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4082 	int s, ss;
4083 
4084 	for (s = 0; s < info->sseu.max_slices; s++) {
4085 		/*
4086 		 * FIXME: Valid SS Mask respects the spec and read
4087 		 * only valid bits for those registers, excluding reserved
4088 		 * although this seems wrong because it would leave many
4089 		 * subslices without ACK.
4090 		 */
4091 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4092 			GEN10_PGCTL_VALID_SS_MASK(s);
4093 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4094 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4095 	}
4096 
4097 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4098 		     GEN9_PGCTL_SSA_EU19_ACK |
4099 		     GEN9_PGCTL_SSA_EU210_ACK |
4100 		     GEN9_PGCTL_SSA_EU311_ACK;
4101 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4102 		     GEN9_PGCTL_SSB_EU19_ACK |
4103 		     GEN9_PGCTL_SSB_EU210_ACK |
4104 		     GEN9_PGCTL_SSB_EU311_ACK;
4105 
4106 	for (s = 0; s < info->sseu.max_slices; s++) {
4107 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4108 			/* skip disabled slice */
4109 			continue;
4110 
4111 		sseu->slice_mask |= BIT(s);
4112 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4113 
4114 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4115 			unsigned int eu_cnt;
4116 
4117 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4118 				/* skip disabled subslice */
4119 				continue;
4120 
4121 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4122 					       eu_mask[ss % 2]);
4123 			sseu->eu_total += eu_cnt;
4124 			sseu->eu_per_subslice = max_t(unsigned int,
4125 						      sseu->eu_per_subslice,
4126 						      eu_cnt);
4127 		}
4128 	}
4129 #undef SS_MAX
4130 }
4131 
4132 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4133 				    struct sseu_dev_info *sseu)
4134 {
4135 #define SS_MAX 3
4136 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4137 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4138 	int s, ss;
4139 
4140 	for (s = 0; s < info->sseu.max_slices; s++) {
4141 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4142 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4143 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4144 	}
4145 
4146 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4147 		     GEN9_PGCTL_SSA_EU19_ACK |
4148 		     GEN9_PGCTL_SSA_EU210_ACK |
4149 		     GEN9_PGCTL_SSA_EU311_ACK;
4150 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4151 		     GEN9_PGCTL_SSB_EU19_ACK |
4152 		     GEN9_PGCTL_SSB_EU210_ACK |
4153 		     GEN9_PGCTL_SSB_EU311_ACK;
4154 
4155 	for (s = 0; s < info->sseu.max_slices; s++) {
4156 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4157 			/* skip disabled slice */
4158 			continue;
4159 
4160 		sseu->slice_mask |= BIT(s);
4161 
4162 		if (IS_GEN9_BC(dev_priv))
4163 			sseu->subslice_mask[s] =
4164 				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4165 
4166 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4167 			unsigned int eu_cnt;
4168 
4169 			if (IS_GEN9_LP(dev_priv)) {
4170 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4171 					/* skip disabled subslice */
4172 					continue;
4173 
4174 				sseu->subslice_mask[s] |= BIT(ss);
4175 			}
4176 
4177 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4178 					       eu_mask[ss%2]);
4179 			sseu->eu_total += eu_cnt;
4180 			sseu->eu_per_subslice = max_t(unsigned int,
4181 						      sseu->eu_per_subslice,
4182 						      eu_cnt);
4183 		}
4184 	}
4185 #undef SS_MAX
4186 }
4187 
4188 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4189 					 struct sseu_dev_info *sseu)
4190 {
4191 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4192 	int s;
4193 
4194 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4195 
4196 	if (sseu->slice_mask) {
4197 		sseu->eu_per_subslice =
4198 			RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4199 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4200 			sseu->subslice_mask[s] =
4201 				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4202 		}
4203 		sseu->eu_total = sseu->eu_per_subslice *
4204 				 sseu_subslice_total(sseu);
4205 
4206 		/* subtract fused off EU(s) from enabled slice(s) */
4207 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4208 			u8 subslice_7eu =
4209 				RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4210 
4211 			sseu->eu_total -= hweight8(subslice_7eu);
4212 		}
4213 	}
4214 }
4215 
4216 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4217 				 const struct sseu_dev_info *sseu)
4218 {
4219 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4220 	const char *type = is_available_info ? "Available" : "Enabled";
4221 	int s;
4222 
4223 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4224 		   sseu->slice_mask);
4225 	seq_printf(m, "  %s Slice Total: %u\n", type,
4226 		   hweight8(sseu->slice_mask));
4227 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4228 		   sseu_subslice_total(sseu));
4229 	for (s = 0; s < fls(sseu->slice_mask); s++) {
4230 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4231 			   s, hweight8(sseu->subslice_mask[s]));
4232 	}
4233 	seq_printf(m, "  %s EU Total: %u\n", type,
4234 		   sseu->eu_total);
4235 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4236 		   sseu->eu_per_subslice);
4237 
4238 	if (!is_available_info)
4239 		return;
4240 
4241 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4242 	if (HAS_POOLED_EU(dev_priv))
4243 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4244 
4245 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4246 		   yesno(sseu->has_slice_pg));
4247 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4248 		   yesno(sseu->has_subslice_pg));
4249 	seq_printf(m, "  Has EU Power Gating: %s\n",
4250 		   yesno(sseu->has_eu_pg));
4251 }
4252 
4253 static int i915_sseu_status(struct seq_file *m, void *unused)
4254 {
4255 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4256 	struct sseu_dev_info sseu;
4257 	intel_wakeref_t wakeref;
4258 
4259 	if (INTEL_GEN(dev_priv) < 8)
4260 		return -ENODEV;
4261 
4262 	seq_puts(m, "SSEU Device Info\n");
4263 	i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4264 
4265 	seq_puts(m, "SSEU Device Status\n");
4266 	memset(&sseu, 0, sizeof(sseu));
4267 	sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4268 	sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4269 	sseu.max_eus_per_subslice =
4270 		RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4271 
4272 	with_intel_runtime_pm(dev_priv, wakeref) {
4273 		if (IS_CHERRYVIEW(dev_priv))
4274 			cherryview_sseu_device_status(dev_priv, &sseu);
4275 		else if (IS_BROADWELL(dev_priv))
4276 			broadwell_sseu_device_status(dev_priv, &sseu);
4277 		else if (IS_GEN(dev_priv, 9))
4278 			gen9_sseu_device_status(dev_priv, &sseu);
4279 		else if (INTEL_GEN(dev_priv) >= 10)
4280 			gen10_sseu_device_status(dev_priv, &sseu);
4281 	}
4282 
4283 	i915_print_sseu_info(m, false, &sseu);
4284 
4285 	return 0;
4286 }
4287 
4288 static int i915_forcewake_open(struct inode *inode, struct file *file)
4289 {
4290 	struct drm_i915_private *i915 = inode->i_private;
4291 
4292 	if (INTEL_GEN(i915) < 6)
4293 		return 0;
4294 
4295 	file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
4296 	intel_uncore_forcewake_user_get(i915);
4297 
4298 	return 0;
4299 }
4300 
4301 static int i915_forcewake_release(struct inode *inode, struct file *file)
4302 {
4303 	struct drm_i915_private *i915 = inode->i_private;
4304 
4305 	if (INTEL_GEN(i915) < 6)
4306 		return 0;
4307 
4308 	intel_uncore_forcewake_user_put(i915);
4309 	intel_runtime_pm_put(i915,
4310 			     (intel_wakeref_t)(uintptr_t)file->private_data);
4311 
4312 	return 0;
4313 }
4314 
4315 static const struct file_operations i915_forcewake_fops = {
4316 	.owner = THIS_MODULE,
4317 	.open = i915_forcewake_open,
4318 	.release = i915_forcewake_release,
4319 };
4320 
4321 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4322 {
4323 	struct drm_i915_private *dev_priv = m->private;
4324 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4325 
4326 	/* Synchronize with everything first in case there's been an HPD
4327 	 * storm, but we haven't finished handling it in the kernel yet
4328 	 */
4329 	synchronize_irq(dev_priv->drm.irq);
4330 	flush_work(&dev_priv->hotplug.dig_port_work);
4331 	flush_work(&dev_priv->hotplug.hotplug_work);
4332 
4333 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4334 	seq_printf(m, "Detected: %s\n",
4335 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4336 
4337 	return 0;
4338 }
4339 
4340 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4341 					const char __user *ubuf, size_t len,
4342 					loff_t *offp)
4343 {
4344 	struct seq_file *m = file->private_data;
4345 	struct drm_i915_private *dev_priv = m->private;
4346 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4347 	unsigned int new_threshold;
4348 	int i;
4349 	char *newline;
4350 	char tmp[16];
4351 
4352 	if (len >= sizeof(tmp))
4353 		return -EINVAL;
4354 
4355 	if (copy_from_user(tmp, ubuf, len))
4356 		return -EFAULT;
4357 
4358 	tmp[len] = '\0';
4359 
4360 	/* Strip newline, if any */
4361 	newline = strchr(tmp, '\n');
4362 	if (newline)
4363 		*newline = '\0';
4364 
4365 	if (strcmp(tmp, "reset") == 0)
4366 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4367 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4368 		return -EINVAL;
4369 
4370 	if (new_threshold > 0)
4371 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4372 			      new_threshold);
4373 	else
4374 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4375 
4376 	spin_lock_irq(&dev_priv->irq_lock);
4377 	hotplug->hpd_storm_threshold = new_threshold;
4378 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4379 	for_each_hpd_pin(i)
4380 		hotplug->stats[i].count = 0;
4381 	spin_unlock_irq(&dev_priv->irq_lock);
4382 
4383 	/* Re-enable hpd immediately if we were in an irq storm */
4384 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4385 
4386 	return len;
4387 }
4388 
4389 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4390 {
4391 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4392 }
4393 
4394 static const struct file_operations i915_hpd_storm_ctl_fops = {
4395 	.owner = THIS_MODULE,
4396 	.open = i915_hpd_storm_ctl_open,
4397 	.read = seq_read,
4398 	.llseek = seq_lseek,
4399 	.release = single_release,
4400 	.write = i915_hpd_storm_ctl_write
4401 };
4402 
4403 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4404 {
4405 	struct drm_i915_private *dev_priv = m->private;
4406 
4407 	seq_printf(m, "Enabled: %s\n",
4408 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4409 
4410 	return 0;
4411 }
4412 
4413 static int
4414 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4415 {
4416 	return single_open(file, i915_hpd_short_storm_ctl_show,
4417 			   inode->i_private);
4418 }
4419 
4420 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4421 					      const char __user *ubuf,
4422 					      size_t len, loff_t *offp)
4423 {
4424 	struct seq_file *m = file->private_data;
4425 	struct drm_i915_private *dev_priv = m->private;
4426 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4427 	char *newline;
4428 	char tmp[16];
4429 	int i;
4430 	bool new_state;
4431 
4432 	if (len >= sizeof(tmp))
4433 		return -EINVAL;
4434 
4435 	if (copy_from_user(tmp, ubuf, len))
4436 		return -EFAULT;
4437 
4438 	tmp[len] = '\0';
4439 
4440 	/* Strip newline, if any */
4441 	newline = strchr(tmp, '\n');
4442 	if (newline)
4443 		*newline = '\0';
4444 
4445 	/* Reset to the "default" state for this system */
4446 	if (strcmp(tmp, "reset") == 0)
4447 		new_state = !HAS_DP_MST(dev_priv);
4448 	else if (kstrtobool(tmp, &new_state) != 0)
4449 		return -EINVAL;
4450 
4451 	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4452 		      new_state ? "En" : "Dis");
4453 
4454 	spin_lock_irq(&dev_priv->irq_lock);
4455 	hotplug->hpd_short_storm_enabled = new_state;
4456 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4457 	for_each_hpd_pin(i)
4458 		hotplug->stats[i].count = 0;
4459 	spin_unlock_irq(&dev_priv->irq_lock);
4460 
4461 	/* Re-enable hpd immediately if we were in an irq storm */
4462 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4463 
4464 	return len;
4465 }
4466 
4467 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4468 	.owner = THIS_MODULE,
4469 	.open = i915_hpd_short_storm_ctl_open,
4470 	.read = seq_read,
4471 	.llseek = seq_lseek,
4472 	.release = single_release,
4473 	.write = i915_hpd_short_storm_ctl_write,
4474 };
4475 
4476 static int i915_drrs_ctl_set(void *data, u64 val)
4477 {
4478 	struct drm_i915_private *dev_priv = data;
4479 	struct drm_device *dev = &dev_priv->drm;
4480 	struct intel_crtc *crtc;
4481 
4482 	if (INTEL_GEN(dev_priv) < 7)
4483 		return -ENODEV;
4484 
4485 	for_each_intel_crtc(dev, crtc) {
4486 		struct drm_connector_list_iter conn_iter;
4487 		struct intel_crtc_state *crtc_state;
4488 		struct drm_connector *connector;
4489 		struct drm_crtc_commit *commit;
4490 		int ret;
4491 
4492 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4493 		if (ret)
4494 			return ret;
4495 
4496 		crtc_state = to_intel_crtc_state(crtc->base.state);
4497 
4498 		if (!crtc_state->base.active ||
4499 		    !crtc_state->has_drrs)
4500 			goto out;
4501 
4502 		commit = crtc_state->base.commit;
4503 		if (commit) {
4504 			ret = wait_for_completion_interruptible(&commit->hw_done);
4505 			if (ret)
4506 				goto out;
4507 		}
4508 
4509 		drm_connector_list_iter_begin(dev, &conn_iter);
4510 		drm_for_each_connector_iter(connector, &conn_iter) {
4511 			struct intel_encoder *encoder;
4512 			struct intel_dp *intel_dp;
4513 
4514 			if (!(crtc_state->base.connector_mask &
4515 			      drm_connector_mask(connector)))
4516 				continue;
4517 
4518 			encoder = intel_attached_encoder(connector);
4519 			if (encoder->type != INTEL_OUTPUT_EDP)
4520 				continue;
4521 
4522 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4523 						val ? "en" : "dis", val);
4524 
4525 			intel_dp = enc_to_intel_dp(&encoder->base);
4526 			if (val)
4527 				intel_edp_drrs_enable(intel_dp,
4528 						      crtc_state);
4529 			else
4530 				intel_edp_drrs_disable(intel_dp,
4531 						       crtc_state);
4532 		}
4533 		drm_connector_list_iter_end(&conn_iter);
4534 
4535 out:
4536 		drm_modeset_unlock(&crtc->base.mutex);
4537 		if (ret)
4538 			return ret;
4539 	}
4540 
4541 	return 0;
4542 }
4543 
4544 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4545 
4546 static ssize_t
4547 i915_fifo_underrun_reset_write(struct file *filp,
4548 			       const char __user *ubuf,
4549 			       size_t cnt, loff_t *ppos)
4550 {
4551 	struct drm_i915_private *dev_priv = filp->private_data;
4552 	struct intel_crtc *intel_crtc;
4553 	struct drm_device *dev = &dev_priv->drm;
4554 	int ret;
4555 	bool reset;
4556 
4557 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4558 	if (ret)
4559 		return ret;
4560 
4561 	if (!reset)
4562 		return cnt;
4563 
4564 	for_each_intel_crtc(dev, intel_crtc) {
4565 		struct drm_crtc_commit *commit;
4566 		struct intel_crtc_state *crtc_state;
4567 
4568 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4569 		if (ret)
4570 			return ret;
4571 
4572 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4573 		commit = crtc_state->base.commit;
4574 		if (commit) {
4575 			ret = wait_for_completion_interruptible(&commit->hw_done);
4576 			if (!ret)
4577 				ret = wait_for_completion_interruptible(&commit->flip_done);
4578 		}
4579 
4580 		if (!ret && crtc_state->base.active) {
4581 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4582 				      pipe_name(intel_crtc->pipe));
4583 
4584 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4585 		}
4586 
4587 		drm_modeset_unlock(&intel_crtc->base.mutex);
4588 
4589 		if (ret)
4590 			return ret;
4591 	}
4592 
4593 	ret = intel_fbc_reset_underrun(dev_priv);
4594 	if (ret)
4595 		return ret;
4596 
4597 	return cnt;
4598 }
4599 
4600 static const struct file_operations i915_fifo_underrun_reset_ops = {
4601 	.owner = THIS_MODULE,
4602 	.open = simple_open,
4603 	.write = i915_fifo_underrun_reset_write,
4604 	.llseek = default_llseek,
4605 };
4606 
4607 static const struct drm_info_list i915_debugfs_list[] = {
4608 	{"i915_capabilities", i915_capabilities, 0},
4609 	{"i915_gem_objects", i915_gem_object_info, 0},
4610 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4611 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4612 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4613 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4614 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4615 	{"i915_guc_info", i915_guc_info, 0},
4616 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4617 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4618 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4619 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4620 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4621 	{"i915_frequency_info", i915_frequency_info, 0},
4622 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4623 	{"i915_reset_info", i915_reset_info, 0},
4624 	{"i915_drpc_info", i915_drpc_info, 0},
4625 	{"i915_emon_status", i915_emon_status, 0},
4626 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4627 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4628 	{"i915_fbc_status", i915_fbc_status, 0},
4629 	{"i915_ips_status", i915_ips_status, 0},
4630 	{"i915_sr_status", i915_sr_status, 0},
4631 	{"i915_opregion", i915_opregion, 0},
4632 	{"i915_vbt", i915_vbt, 0},
4633 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4634 	{"i915_context_status", i915_context_status, 0},
4635 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4636 	{"i915_swizzle_info", i915_swizzle_info, 0},
4637 	{"i915_llc", i915_llc, 0},
4638 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4639 	{"i915_energy_uJ", i915_energy_uJ, 0},
4640 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4641 	{"i915_power_domain_info", i915_power_domain_info, 0},
4642 	{"i915_dmc_info", i915_dmc_info, 0},
4643 	{"i915_display_info", i915_display_info, 0},
4644 	{"i915_engine_info", i915_engine_info, 0},
4645 	{"i915_rcs_topology", i915_rcs_topology, 0},
4646 	{"i915_shrinker_info", i915_shrinker_info, 0},
4647 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4648 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4649 	{"i915_wa_registers", i915_wa_registers, 0},
4650 	{"i915_ddb_info", i915_ddb_info, 0},
4651 	{"i915_sseu_status", i915_sseu_status, 0},
4652 	{"i915_drrs_status", i915_drrs_status, 0},
4653 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4654 };
4655 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4656 
4657 static const struct i915_debugfs_files {
4658 	const char *name;
4659 	const struct file_operations *fops;
4660 } i915_debugfs_files[] = {
4661 	{"i915_wedged", &i915_wedged_fops},
4662 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4663 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4664 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4665 	{"i915_error_state", &i915_error_state_fops},
4666 	{"i915_gpu_info", &i915_gpu_info_fops},
4667 #endif
4668 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4669 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4670 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4671 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4672 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4673 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4674 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4675 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4676 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4677 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4678 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4679 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4680 	{"i915_ipc_status", &i915_ipc_status_fops},
4681 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4682 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4683 };
4684 
4685 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4686 {
4687 	struct drm_minor *minor = dev_priv->drm.primary;
4688 	struct dentry *ent;
4689 	int i;
4690 
4691 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4692 				  minor->debugfs_root, to_i915(minor->dev),
4693 				  &i915_forcewake_fops);
4694 	if (!ent)
4695 		return -ENOMEM;
4696 
4697 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4698 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4699 					  S_IRUGO | S_IWUSR,
4700 					  minor->debugfs_root,
4701 					  to_i915(minor->dev),
4702 					  i915_debugfs_files[i].fops);
4703 		if (!ent)
4704 			return -ENOMEM;
4705 	}
4706 
4707 	return drm_debugfs_create_files(i915_debugfs_list,
4708 					I915_DEBUGFS_ENTRIES,
4709 					minor->debugfs_root, minor);
4710 }
4711 
4712 struct dpcd_block {
4713 	/* DPCD dump start address. */
4714 	unsigned int offset;
4715 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4716 	unsigned int end;
4717 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4718 	size_t size;
4719 	/* Only valid for eDP. */
4720 	bool edp;
4721 };
4722 
4723 static const struct dpcd_block i915_dpcd_debug[] = {
4724 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4725 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4726 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4727 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4728 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4729 	{ .offset = DP_SET_POWER },
4730 	{ .offset = DP_EDP_DPCD_REV },
4731 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4732 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4733 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4734 };
4735 
4736 static int i915_dpcd_show(struct seq_file *m, void *data)
4737 {
4738 	struct drm_connector *connector = m->private;
4739 	struct intel_dp *intel_dp =
4740 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4741 	u8 buf[16];
4742 	ssize_t err;
4743 	int i;
4744 
4745 	if (connector->status != connector_status_connected)
4746 		return -ENODEV;
4747 
4748 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4749 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4750 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4751 
4752 		if (b->edp &&
4753 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4754 			continue;
4755 
4756 		/* low tech for now */
4757 		if (WARN_ON(size > sizeof(buf)))
4758 			continue;
4759 
4760 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4761 		if (err < 0)
4762 			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4763 		else
4764 			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4765 	}
4766 
4767 	return 0;
4768 }
4769 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4770 
4771 static int i915_panel_show(struct seq_file *m, void *data)
4772 {
4773 	struct drm_connector *connector = m->private;
4774 	struct intel_dp *intel_dp =
4775 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4776 
4777 	if (connector->status != connector_status_connected)
4778 		return -ENODEV;
4779 
4780 	seq_printf(m, "Panel power up delay: %d\n",
4781 		   intel_dp->panel_power_up_delay);
4782 	seq_printf(m, "Panel power down delay: %d\n",
4783 		   intel_dp->panel_power_down_delay);
4784 	seq_printf(m, "Backlight on delay: %d\n",
4785 		   intel_dp->backlight_on_delay);
4786 	seq_printf(m, "Backlight off delay: %d\n",
4787 		   intel_dp->backlight_off_delay);
4788 
4789 	return 0;
4790 }
4791 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4792 
4793 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4794 {
4795 	struct drm_connector *connector = m->private;
4796 	struct intel_connector *intel_connector = to_intel_connector(connector);
4797 
4798 	if (connector->status != connector_status_connected)
4799 		return -ENODEV;
4800 
4801 	/* HDCP is supported by connector */
4802 	if (!intel_connector->hdcp.shim)
4803 		return -EINVAL;
4804 
4805 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4806 		   connector->base.id);
4807 	seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4808 		   "None" : "HDCP1.4");
4809 	seq_puts(m, "\n");
4810 
4811 	return 0;
4812 }
4813 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4814 
4815 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4816 {
4817 	struct drm_connector *connector = m->private;
4818 	struct drm_device *dev = connector->dev;
4819 	struct drm_crtc *crtc;
4820 	struct intel_dp *intel_dp;
4821 	struct drm_modeset_acquire_ctx ctx;
4822 	struct intel_crtc_state *crtc_state = NULL;
4823 	int ret = 0;
4824 	bool try_again = false;
4825 
4826 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4827 
4828 	do {
4829 		try_again = false;
4830 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4831 				       &ctx);
4832 		if (ret) {
4833 			ret = -EINTR;
4834 			break;
4835 		}
4836 		crtc = connector->state->crtc;
4837 		if (connector->status != connector_status_connected || !crtc) {
4838 			ret = -ENODEV;
4839 			break;
4840 		}
4841 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
4842 		if (ret == -EDEADLK) {
4843 			ret = drm_modeset_backoff(&ctx);
4844 			if (!ret) {
4845 				try_again = true;
4846 				continue;
4847 			}
4848 			break;
4849 		} else if (ret) {
4850 			break;
4851 		}
4852 		intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4853 		crtc_state = to_intel_crtc_state(crtc->state);
4854 		seq_printf(m, "DSC_Enabled: %s\n",
4855 			   yesno(crtc_state->dsc_params.compression_enable));
4856 		seq_printf(m, "DSC_Sink_Support: %s\n",
4857 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4858 		if (!intel_dp_is_edp(intel_dp))
4859 			seq_printf(m, "FEC_Sink_Support: %s\n",
4860 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4861 	} while (try_again);
4862 
4863 	drm_modeset_drop_locks(&ctx);
4864 	drm_modeset_acquire_fini(&ctx);
4865 
4866 	return ret;
4867 }
4868 
4869 static ssize_t i915_dsc_fec_support_write(struct file *file,
4870 					  const char __user *ubuf,
4871 					  size_t len, loff_t *offp)
4872 {
4873 	bool dsc_enable = false;
4874 	int ret;
4875 	struct drm_connector *connector =
4876 		((struct seq_file *)file->private_data)->private;
4877 	struct intel_encoder *encoder = intel_attached_encoder(connector);
4878 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4879 
4880 	if (len == 0)
4881 		return 0;
4882 
4883 	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4884 			 len);
4885 
4886 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4887 	if (ret < 0)
4888 		return ret;
4889 
4890 	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4891 			 (dsc_enable) ? "true" : "false");
4892 	intel_dp->force_dsc_en = dsc_enable;
4893 
4894 	*offp += len;
4895 	return len;
4896 }
4897 
4898 static int i915_dsc_fec_support_open(struct inode *inode,
4899 				     struct file *file)
4900 {
4901 	return single_open(file, i915_dsc_fec_support_show,
4902 			   inode->i_private);
4903 }
4904 
4905 static const struct file_operations i915_dsc_fec_support_fops = {
4906 	.owner = THIS_MODULE,
4907 	.open = i915_dsc_fec_support_open,
4908 	.read = seq_read,
4909 	.llseek = seq_lseek,
4910 	.release = single_release,
4911 	.write = i915_dsc_fec_support_write
4912 };
4913 
4914 /**
4915  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4916  * @connector: pointer to a registered drm_connector
4917  *
4918  * Cleanup will be done by drm_connector_unregister() through a call to
4919  * drm_debugfs_connector_remove().
4920  *
4921  * Returns 0 on success, negative error codes on error.
4922  */
4923 int i915_debugfs_connector_add(struct drm_connector *connector)
4924 {
4925 	struct dentry *root = connector->debugfs_entry;
4926 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4927 
4928 	/* The connector must have been registered beforehands. */
4929 	if (!root)
4930 		return -ENODEV;
4931 
4932 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4933 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4934 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4935 				    connector, &i915_dpcd_fops);
4936 
4937 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4938 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4939 				    connector, &i915_panel_fops);
4940 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4941 				    connector, &i915_psr_sink_status_fops);
4942 	}
4943 
4944 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4945 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4946 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4947 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4948 				    connector, &i915_hdcp_sink_capability_fops);
4949 	}
4950 
4951 	if (INTEL_GEN(dev_priv) >= 10 &&
4952 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4953 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4954 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4955 				    connector, &i915_dsc_fec_support_fops);
4956 
4957 	return 0;
4958 }
4959