xref: /linux/drivers/gpu/drm/i915/i915_debugfs.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31 
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
34 
35 #include "gt/intel_reset.h"
36 
37 #include "i915_debugfs.h"
38 #include "i915_gem_context.h"
39 #include "i915_irq.h"
40 #include "intel_csr.h"
41 #include "intel_dp.h"
42 #include "intel_drv.h"
43 #include "intel_fbc.h"
44 #include "intel_guc_submission.h"
45 #include "intel_hdcp.h"
46 #include "intel_hdmi.h"
47 #include "intel_pm.h"
48 #include "intel_psr.h"
49 #include "intel_sideband.h"
50 
51 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
52 {
53 	return to_i915(node->minor->dev);
54 }
55 
56 static int i915_capabilities(struct seq_file *m, void *data)
57 {
58 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
59 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
60 	struct drm_printer p = drm_seq_file_printer(m);
61 
62 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
63 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
64 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
65 
66 	intel_device_info_dump_flags(info, &p);
67 	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
68 	intel_driver_caps_print(&dev_priv->caps, &p);
69 
70 	kernel_param_lock(THIS_MODULE);
71 	i915_params_dump(&i915_modparams, &p);
72 	kernel_param_unlock(THIS_MODULE);
73 
74 	return 0;
75 }
76 
77 static char get_active_flag(struct drm_i915_gem_object *obj)
78 {
79 	return i915_gem_object_is_active(obj) ? '*' : ' ';
80 }
81 
82 static char get_pin_flag(struct drm_i915_gem_object *obj)
83 {
84 	return obj->pin_global ? 'p' : ' ';
85 }
86 
87 static char get_tiling_flag(struct drm_i915_gem_object *obj)
88 {
89 	switch (i915_gem_object_get_tiling(obj)) {
90 	default:
91 	case I915_TILING_NONE: return ' ';
92 	case I915_TILING_X: return 'X';
93 	case I915_TILING_Y: return 'Y';
94 	}
95 }
96 
97 static char get_global_flag(struct drm_i915_gem_object *obj)
98 {
99 	return obj->userfault_count ? 'g' : ' ';
100 }
101 
102 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
103 {
104 	return obj->mm.mapping ? 'M' : ' ';
105 }
106 
107 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
108 {
109 	u64 size = 0;
110 	struct i915_vma *vma;
111 
112 	for_each_ggtt_vma(vma, obj) {
113 		if (drm_mm_node_allocated(&vma->node))
114 			size += vma->node.size;
115 	}
116 
117 	return size;
118 }
119 
120 static const char *
121 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
122 {
123 	size_t x = 0;
124 
125 	switch (page_sizes) {
126 	case 0:
127 		return "";
128 	case I915_GTT_PAGE_SIZE_4K:
129 		return "4K";
130 	case I915_GTT_PAGE_SIZE_64K:
131 		return "64K";
132 	case I915_GTT_PAGE_SIZE_2M:
133 		return "2M";
134 	default:
135 		if (!buf)
136 			return "M";
137 
138 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
139 			x += snprintf(buf + x, len - x, "2M, ");
140 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
141 			x += snprintf(buf + x, len - x, "64K, ");
142 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
143 			x += snprintf(buf + x, len - x, "4K, ");
144 		buf[x-2] = '\0';
145 
146 		return buf;
147 	}
148 }
149 
150 static void
151 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
152 {
153 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
154 	struct intel_engine_cs *engine;
155 	struct i915_vma *vma;
156 	unsigned int frontbuffer_bits;
157 	int pin_count = 0;
158 
159 	lockdep_assert_held(&obj->base.dev->struct_mutex);
160 
161 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
162 		   &obj->base,
163 		   get_active_flag(obj),
164 		   get_pin_flag(obj),
165 		   get_tiling_flag(obj),
166 		   get_global_flag(obj),
167 		   get_pin_mapped_flag(obj),
168 		   obj->base.size / 1024,
169 		   obj->read_domains,
170 		   obj->write_domain,
171 		   i915_cache_level_str(dev_priv, obj->cache_level),
172 		   obj->mm.dirty ? " dirty" : "",
173 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
174 	if (obj->base.name)
175 		seq_printf(m, " (name: %d)", obj->base.name);
176 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
177 		if (i915_vma_is_pinned(vma))
178 			pin_count++;
179 	}
180 	seq_printf(m, " (pinned x %d)", pin_count);
181 	if (obj->pin_global)
182 		seq_printf(m, " (global)");
183 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
184 		if (!drm_mm_node_allocated(&vma->node))
185 			continue;
186 
187 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
188 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
189 			   vma->node.start, vma->node.size,
190 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
191 		if (i915_vma_is_ggtt(vma)) {
192 			switch (vma->ggtt_view.type) {
193 			case I915_GGTT_VIEW_NORMAL:
194 				seq_puts(m, ", normal");
195 				break;
196 
197 			case I915_GGTT_VIEW_PARTIAL:
198 				seq_printf(m, ", partial [%08llx+%x]",
199 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
200 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
201 				break;
202 
203 			case I915_GGTT_VIEW_ROTATED:
204 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
205 					   vma->ggtt_view.rotated.plane[0].width,
206 					   vma->ggtt_view.rotated.plane[0].height,
207 					   vma->ggtt_view.rotated.plane[0].stride,
208 					   vma->ggtt_view.rotated.plane[0].offset,
209 					   vma->ggtt_view.rotated.plane[1].width,
210 					   vma->ggtt_view.rotated.plane[1].height,
211 					   vma->ggtt_view.rotated.plane[1].stride,
212 					   vma->ggtt_view.rotated.plane[1].offset);
213 				break;
214 
215 			case I915_GGTT_VIEW_REMAPPED:
216 				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
217 					   vma->ggtt_view.remapped.plane[0].width,
218 					   vma->ggtt_view.remapped.plane[0].height,
219 					   vma->ggtt_view.remapped.plane[0].stride,
220 					   vma->ggtt_view.remapped.plane[0].offset,
221 					   vma->ggtt_view.remapped.plane[1].width,
222 					   vma->ggtt_view.remapped.plane[1].height,
223 					   vma->ggtt_view.remapped.plane[1].stride,
224 					   vma->ggtt_view.remapped.plane[1].offset);
225 				break;
226 
227 			default:
228 				MISSING_CASE(vma->ggtt_view.type);
229 				break;
230 			}
231 		}
232 		if (vma->fence)
233 			seq_printf(m, " , fence: %d%s",
234 				   vma->fence->id,
235 				   i915_active_request_isset(&vma->last_fence) ? "*" : "");
236 		seq_puts(m, ")");
237 	}
238 	if (obj->stolen)
239 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
240 
241 	engine = i915_gem_object_last_write_engine(obj);
242 	if (engine)
243 		seq_printf(m, " (%s)", engine->name);
244 
245 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
246 	if (frontbuffer_bits)
247 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
248 }
249 
250 static int obj_rank_by_stolen(const void *A, const void *B)
251 {
252 	const struct drm_i915_gem_object *a =
253 		*(const struct drm_i915_gem_object **)A;
254 	const struct drm_i915_gem_object *b =
255 		*(const struct drm_i915_gem_object **)B;
256 
257 	if (a->stolen->start < b->stolen->start)
258 		return -1;
259 	if (a->stolen->start > b->stolen->start)
260 		return 1;
261 	return 0;
262 }
263 
264 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
265 {
266 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
267 	struct drm_device *dev = &dev_priv->drm;
268 	struct drm_i915_gem_object **objects;
269 	struct drm_i915_gem_object *obj;
270 	u64 total_obj_size, total_gtt_size;
271 	unsigned long total, count, n;
272 	int ret;
273 
274 	total = READ_ONCE(dev_priv->mm.object_count);
275 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
276 	if (!objects)
277 		return -ENOMEM;
278 
279 	ret = mutex_lock_interruptible(&dev->struct_mutex);
280 	if (ret)
281 		goto out;
282 
283 	total_obj_size = total_gtt_size = count = 0;
284 
285 	spin_lock(&dev_priv->mm.obj_lock);
286 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
287 		if (count == total)
288 			break;
289 
290 		if (obj->stolen == NULL)
291 			continue;
292 
293 		objects[count++] = obj;
294 		total_obj_size += obj->base.size;
295 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
296 
297 	}
298 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
299 		if (count == total)
300 			break;
301 
302 		if (obj->stolen == NULL)
303 			continue;
304 
305 		objects[count++] = obj;
306 		total_obj_size += obj->base.size;
307 	}
308 	spin_unlock(&dev_priv->mm.obj_lock);
309 
310 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
311 
312 	seq_puts(m, "Stolen:\n");
313 	for (n = 0; n < count; n++) {
314 		seq_puts(m, "   ");
315 		describe_obj(m, objects[n]);
316 		seq_putc(m, '\n');
317 	}
318 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
319 		   count, total_obj_size, total_gtt_size);
320 
321 	mutex_unlock(&dev->struct_mutex);
322 out:
323 	kvfree(objects);
324 	return ret;
325 }
326 
327 struct file_stats {
328 	struct i915_address_space *vm;
329 	unsigned long count;
330 	u64 total, unbound;
331 	u64 global, shared;
332 	u64 active, inactive;
333 	u64 closed;
334 };
335 
336 static int per_file_stats(int id, void *ptr, void *data)
337 {
338 	struct drm_i915_gem_object *obj = ptr;
339 	struct file_stats *stats = data;
340 	struct i915_vma *vma;
341 
342 	lockdep_assert_held(&obj->base.dev->struct_mutex);
343 
344 	stats->count++;
345 	stats->total += obj->base.size;
346 	if (!obj->bind_count)
347 		stats->unbound += obj->base.size;
348 	if (obj->base.name || obj->base.dma_buf)
349 		stats->shared += obj->base.size;
350 
351 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
352 		if (!drm_mm_node_allocated(&vma->node))
353 			continue;
354 
355 		if (i915_vma_is_ggtt(vma)) {
356 			stats->global += vma->node.size;
357 		} else {
358 			if (vma->vm != stats->vm)
359 				continue;
360 		}
361 
362 		if (i915_vma_is_active(vma))
363 			stats->active += vma->node.size;
364 		else
365 			stats->inactive += vma->node.size;
366 
367 		if (i915_vma_is_closed(vma))
368 			stats->closed += vma->node.size;
369 	}
370 
371 	return 0;
372 }
373 
374 #define print_file_stats(m, name, stats) do { \
375 	if (stats.count) \
376 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
377 			   name, \
378 			   stats.count, \
379 			   stats.total, \
380 			   stats.active, \
381 			   stats.inactive, \
382 			   stats.global, \
383 			   stats.shared, \
384 			   stats.unbound, \
385 			   stats.closed); \
386 } while (0)
387 
388 static void print_batch_pool_stats(struct seq_file *m,
389 				   struct drm_i915_private *dev_priv)
390 {
391 	struct drm_i915_gem_object *obj;
392 	struct intel_engine_cs *engine;
393 	struct file_stats stats = {};
394 	enum intel_engine_id id;
395 	int j;
396 
397 	for_each_engine(engine, dev_priv, id) {
398 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
399 			list_for_each_entry(obj,
400 					    &engine->batch_pool.cache_list[j],
401 					    batch_pool_link)
402 				per_file_stats(0, obj, &stats);
403 		}
404 	}
405 
406 	print_file_stats(m, "[k]batch pool", stats);
407 }
408 
409 static void print_context_stats(struct seq_file *m,
410 				struct drm_i915_private *i915)
411 {
412 	struct file_stats kstats = {};
413 	struct i915_gem_context *ctx;
414 
415 	list_for_each_entry(ctx, &i915->contexts.list, link) {
416 		struct i915_gem_engines_iter it;
417 		struct intel_context *ce;
418 
419 		for_each_gem_engine(ce,
420 				    i915_gem_context_lock_engines(ctx), it) {
421 			if (ce->state)
422 				per_file_stats(0, ce->state->obj, &kstats);
423 			if (ce->ring)
424 				per_file_stats(0, ce->ring->vma->obj, &kstats);
425 		}
426 		i915_gem_context_unlock_engines(ctx);
427 
428 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
429 			struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
430 			struct drm_file *file = ctx->file_priv->file;
431 			struct task_struct *task;
432 			char name[80];
433 
434 			spin_lock(&file->table_lock);
435 			idr_for_each(&file->object_idr, per_file_stats, &stats);
436 			spin_unlock(&file->table_lock);
437 
438 			rcu_read_lock();
439 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
440 			snprintf(name, sizeof(name), "%s",
441 				 task ? task->comm : "<unknown>");
442 			rcu_read_unlock();
443 
444 			print_file_stats(m, name, stats);
445 		}
446 	}
447 
448 	print_file_stats(m, "[k]contexts", kstats);
449 }
450 
451 static int i915_gem_object_info(struct seq_file *m, void *data)
452 {
453 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
454 	struct drm_device *dev = &dev_priv->drm;
455 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
456 	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
457 	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
458 	struct drm_i915_gem_object *obj;
459 	unsigned int page_sizes = 0;
460 	char buf[80];
461 	int ret;
462 
463 	seq_printf(m, "%u objects, %llu bytes\n",
464 		   dev_priv->mm.object_count,
465 		   dev_priv->mm.object_memory);
466 
467 	size = count = 0;
468 	mapped_size = mapped_count = 0;
469 	purgeable_size = purgeable_count = 0;
470 	huge_size = huge_count = 0;
471 
472 	spin_lock(&dev_priv->mm.obj_lock);
473 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
474 		size += obj->base.size;
475 		++count;
476 
477 		if (obj->mm.madv == I915_MADV_DONTNEED) {
478 			purgeable_size += obj->base.size;
479 			++purgeable_count;
480 		}
481 
482 		if (obj->mm.mapping) {
483 			mapped_count++;
484 			mapped_size += obj->base.size;
485 		}
486 
487 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
488 			huge_count++;
489 			huge_size += obj->base.size;
490 			page_sizes |= obj->mm.page_sizes.sg;
491 		}
492 	}
493 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
494 
495 	size = count = dpy_size = dpy_count = 0;
496 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
497 		size += obj->base.size;
498 		++count;
499 
500 		if (obj->pin_global) {
501 			dpy_size += obj->base.size;
502 			++dpy_count;
503 		}
504 
505 		if (obj->mm.madv == I915_MADV_DONTNEED) {
506 			purgeable_size += obj->base.size;
507 			++purgeable_count;
508 		}
509 
510 		if (obj->mm.mapping) {
511 			mapped_count++;
512 			mapped_size += obj->base.size;
513 		}
514 
515 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
516 			huge_count++;
517 			huge_size += obj->base.size;
518 			page_sizes |= obj->mm.page_sizes.sg;
519 		}
520 	}
521 	spin_unlock(&dev_priv->mm.obj_lock);
522 
523 	seq_printf(m, "%u bound objects, %llu bytes\n",
524 		   count, size);
525 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
526 		   purgeable_count, purgeable_size);
527 	seq_printf(m, "%u mapped objects, %llu bytes\n",
528 		   mapped_count, mapped_size);
529 	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
530 		   huge_count,
531 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
532 		   huge_size);
533 	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
534 		   dpy_count, dpy_size);
535 
536 	seq_printf(m, "%llu [%pa] gtt total\n",
537 		   ggtt->vm.total, &ggtt->mappable_end);
538 	seq_printf(m, "Supported page sizes: %s\n",
539 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
540 					buf, sizeof(buf)));
541 
542 	seq_putc(m, '\n');
543 
544 	ret = mutex_lock_interruptible(&dev->struct_mutex);
545 	if (ret)
546 		return ret;
547 
548 	print_batch_pool_stats(m, dev_priv);
549 	print_context_stats(m, dev_priv);
550 	mutex_unlock(&dev->struct_mutex);
551 
552 	return 0;
553 }
554 
555 static int i915_gem_gtt_info(struct seq_file *m, void *data)
556 {
557 	struct drm_info_node *node = m->private;
558 	struct drm_i915_private *dev_priv = node_to_i915(node);
559 	struct drm_device *dev = &dev_priv->drm;
560 	struct drm_i915_gem_object **objects;
561 	struct drm_i915_gem_object *obj;
562 	u64 total_obj_size, total_gtt_size;
563 	unsigned long nobject, n;
564 	int count, ret;
565 
566 	nobject = READ_ONCE(dev_priv->mm.object_count);
567 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
568 	if (!objects)
569 		return -ENOMEM;
570 
571 	ret = mutex_lock_interruptible(&dev->struct_mutex);
572 	if (ret)
573 		return ret;
574 
575 	count = 0;
576 	spin_lock(&dev_priv->mm.obj_lock);
577 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
578 		objects[count++] = obj;
579 		if (count == nobject)
580 			break;
581 	}
582 	spin_unlock(&dev_priv->mm.obj_lock);
583 
584 	total_obj_size = total_gtt_size = 0;
585 	for (n = 0;  n < count; n++) {
586 		obj = objects[n];
587 
588 		seq_puts(m, "   ");
589 		describe_obj(m, obj);
590 		seq_putc(m, '\n');
591 		total_obj_size += obj->base.size;
592 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
593 	}
594 
595 	mutex_unlock(&dev->struct_mutex);
596 
597 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
598 		   count, total_obj_size, total_gtt_size);
599 	kvfree(objects);
600 
601 	return 0;
602 }
603 
604 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
605 {
606 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
607 	struct drm_device *dev = &dev_priv->drm;
608 	struct drm_i915_gem_object *obj;
609 	struct intel_engine_cs *engine;
610 	enum intel_engine_id id;
611 	int total = 0;
612 	int ret, j;
613 
614 	ret = mutex_lock_interruptible(&dev->struct_mutex);
615 	if (ret)
616 		return ret;
617 
618 	for_each_engine(engine, dev_priv, id) {
619 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
620 			int count;
621 
622 			count = 0;
623 			list_for_each_entry(obj,
624 					    &engine->batch_pool.cache_list[j],
625 					    batch_pool_link)
626 				count++;
627 			seq_printf(m, "%s cache[%d]: %d objects\n",
628 				   engine->name, j, count);
629 
630 			list_for_each_entry(obj,
631 					    &engine->batch_pool.cache_list[j],
632 					    batch_pool_link) {
633 				seq_puts(m, "   ");
634 				describe_obj(m, obj);
635 				seq_putc(m, '\n');
636 			}
637 
638 			total += count;
639 		}
640 	}
641 
642 	seq_printf(m, "total: %d\n", total);
643 
644 	mutex_unlock(&dev->struct_mutex);
645 
646 	return 0;
647 }
648 
649 static void gen8_display_interrupt_info(struct seq_file *m)
650 {
651 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
652 	int pipe;
653 
654 	for_each_pipe(dev_priv, pipe) {
655 		enum intel_display_power_domain power_domain;
656 		intel_wakeref_t wakeref;
657 
658 		power_domain = POWER_DOMAIN_PIPE(pipe);
659 		wakeref = intel_display_power_get_if_enabled(dev_priv,
660 							     power_domain);
661 		if (!wakeref) {
662 			seq_printf(m, "Pipe %c power disabled\n",
663 				   pipe_name(pipe));
664 			continue;
665 		}
666 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
667 			   pipe_name(pipe),
668 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
669 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
670 			   pipe_name(pipe),
671 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
672 		seq_printf(m, "Pipe %c IER:\t%08x\n",
673 			   pipe_name(pipe),
674 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
675 
676 		intel_display_power_put(dev_priv, power_domain, wakeref);
677 	}
678 
679 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
680 		   I915_READ(GEN8_DE_PORT_IMR));
681 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
682 		   I915_READ(GEN8_DE_PORT_IIR));
683 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
684 		   I915_READ(GEN8_DE_PORT_IER));
685 
686 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
687 		   I915_READ(GEN8_DE_MISC_IMR));
688 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
689 		   I915_READ(GEN8_DE_MISC_IIR));
690 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
691 		   I915_READ(GEN8_DE_MISC_IER));
692 
693 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
694 		   I915_READ(GEN8_PCU_IMR));
695 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
696 		   I915_READ(GEN8_PCU_IIR));
697 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
698 		   I915_READ(GEN8_PCU_IER));
699 }
700 
701 static int i915_interrupt_info(struct seq_file *m, void *data)
702 {
703 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
704 	struct intel_engine_cs *engine;
705 	enum intel_engine_id id;
706 	intel_wakeref_t wakeref;
707 	int i, pipe;
708 
709 	wakeref = intel_runtime_pm_get(dev_priv);
710 
711 	if (IS_CHERRYVIEW(dev_priv)) {
712 		intel_wakeref_t pref;
713 
714 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
715 			   I915_READ(GEN8_MASTER_IRQ));
716 
717 		seq_printf(m, "Display IER:\t%08x\n",
718 			   I915_READ(VLV_IER));
719 		seq_printf(m, "Display IIR:\t%08x\n",
720 			   I915_READ(VLV_IIR));
721 		seq_printf(m, "Display IIR_RW:\t%08x\n",
722 			   I915_READ(VLV_IIR_RW));
723 		seq_printf(m, "Display IMR:\t%08x\n",
724 			   I915_READ(VLV_IMR));
725 		for_each_pipe(dev_priv, pipe) {
726 			enum intel_display_power_domain power_domain;
727 
728 			power_domain = POWER_DOMAIN_PIPE(pipe);
729 			pref = intel_display_power_get_if_enabled(dev_priv,
730 								  power_domain);
731 			if (!pref) {
732 				seq_printf(m, "Pipe %c power disabled\n",
733 					   pipe_name(pipe));
734 				continue;
735 			}
736 
737 			seq_printf(m, "Pipe %c stat:\t%08x\n",
738 				   pipe_name(pipe),
739 				   I915_READ(PIPESTAT(pipe)));
740 
741 			intel_display_power_put(dev_priv, power_domain, pref);
742 		}
743 
744 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
745 		seq_printf(m, "Port hotplug:\t%08x\n",
746 			   I915_READ(PORT_HOTPLUG_EN));
747 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
748 			   I915_READ(VLV_DPFLIPSTAT));
749 		seq_printf(m, "DPINVGTT:\t%08x\n",
750 			   I915_READ(DPINVGTT));
751 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
752 
753 		for (i = 0; i < 4; i++) {
754 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
755 				   i, I915_READ(GEN8_GT_IMR(i)));
756 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
757 				   i, I915_READ(GEN8_GT_IIR(i)));
758 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
759 				   i, I915_READ(GEN8_GT_IER(i)));
760 		}
761 
762 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
763 			   I915_READ(GEN8_PCU_IMR));
764 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
765 			   I915_READ(GEN8_PCU_IIR));
766 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
767 			   I915_READ(GEN8_PCU_IER));
768 	} else if (INTEL_GEN(dev_priv) >= 11) {
769 		seq_printf(m, "Master Interrupt Control:  %08x\n",
770 			   I915_READ(GEN11_GFX_MSTR_IRQ));
771 
772 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
773 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
774 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
775 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
776 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
777 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
778 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
779 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
780 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
781 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
782 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
783 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
784 
785 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
786 			   I915_READ(GEN11_DISPLAY_INT_CTL));
787 
788 		gen8_display_interrupt_info(m);
789 	} else if (INTEL_GEN(dev_priv) >= 8) {
790 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
791 			   I915_READ(GEN8_MASTER_IRQ));
792 
793 		for (i = 0; i < 4; i++) {
794 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
795 				   i, I915_READ(GEN8_GT_IMR(i)));
796 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
797 				   i, I915_READ(GEN8_GT_IIR(i)));
798 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
799 				   i, I915_READ(GEN8_GT_IER(i)));
800 		}
801 
802 		gen8_display_interrupt_info(m);
803 	} else if (IS_VALLEYVIEW(dev_priv)) {
804 		seq_printf(m, "Display IER:\t%08x\n",
805 			   I915_READ(VLV_IER));
806 		seq_printf(m, "Display IIR:\t%08x\n",
807 			   I915_READ(VLV_IIR));
808 		seq_printf(m, "Display IIR_RW:\t%08x\n",
809 			   I915_READ(VLV_IIR_RW));
810 		seq_printf(m, "Display IMR:\t%08x\n",
811 			   I915_READ(VLV_IMR));
812 		for_each_pipe(dev_priv, pipe) {
813 			enum intel_display_power_domain power_domain;
814 			intel_wakeref_t pref;
815 
816 			power_domain = POWER_DOMAIN_PIPE(pipe);
817 			pref = intel_display_power_get_if_enabled(dev_priv,
818 								  power_domain);
819 			if (!pref) {
820 				seq_printf(m, "Pipe %c power disabled\n",
821 					   pipe_name(pipe));
822 				continue;
823 			}
824 
825 			seq_printf(m, "Pipe %c stat:\t%08x\n",
826 				   pipe_name(pipe),
827 				   I915_READ(PIPESTAT(pipe)));
828 			intel_display_power_put(dev_priv, power_domain, pref);
829 		}
830 
831 		seq_printf(m, "Master IER:\t%08x\n",
832 			   I915_READ(VLV_MASTER_IER));
833 
834 		seq_printf(m, "Render IER:\t%08x\n",
835 			   I915_READ(GTIER));
836 		seq_printf(m, "Render IIR:\t%08x\n",
837 			   I915_READ(GTIIR));
838 		seq_printf(m, "Render IMR:\t%08x\n",
839 			   I915_READ(GTIMR));
840 
841 		seq_printf(m, "PM IER:\t\t%08x\n",
842 			   I915_READ(GEN6_PMIER));
843 		seq_printf(m, "PM IIR:\t\t%08x\n",
844 			   I915_READ(GEN6_PMIIR));
845 		seq_printf(m, "PM IMR:\t\t%08x\n",
846 			   I915_READ(GEN6_PMIMR));
847 
848 		seq_printf(m, "Port hotplug:\t%08x\n",
849 			   I915_READ(PORT_HOTPLUG_EN));
850 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
851 			   I915_READ(VLV_DPFLIPSTAT));
852 		seq_printf(m, "DPINVGTT:\t%08x\n",
853 			   I915_READ(DPINVGTT));
854 
855 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
856 		seq_printf(m, "Interrupt enable:    %08x\n",
857 			   I915_READ(GEN2_IER));
858 		seq_printf(m, "Interrupt identity:  %08x\n",
859 			   I915_READ(GEN2_IIR));
860 		seq_printf(m, "Interrupt mask:      %08x\n",
861 			   I915_READ(GEN2_IMR));
862 		for_each_pipe(dev_priv, pipe)
863 			seq_printf(m, "Pipe %c stat:         %08x\n",
864 				   pipe_name(pipe),
865 				   I915_READ(PIPESTAT(pipe)));
866 	} else {
867 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
868 			   I915_READ(DEIER));
869 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
870 			   I915_READ(DEIIR));
871 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
872 			   I915_READ(DEIMR));
873 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
874 			   I915_READ(SDEIER));
875 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
876 			   I915_READ(SDEIIR));
877 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
878 			   I915_READ(SDEIMR));
879 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
880 			   I915_READ(GTIER));
881 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
882 			   I915_READ(GTIIR));
883 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
884 			   I915_READ(GTIMR));
885 	}
886 
887 	if (INTEL_GEN(dev_priv) >= 11) {
888 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
889 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
890 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
891 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
892 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
893 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
894 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
895 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
896 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
897 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
898 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
899 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
900 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
901 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
902 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
903 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
904 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
905 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
906 
907 	} else if (INTEL_GEN(dev_priv) >= 6) {
908 		for_each_engine(engine, dev_priv, id) {
909 			seq_printf(m,
910 				   "Graphics Interrupt mask (%s):	%08x\n",
911 				   engine->name, ENGINE_READ(engine, RING_IMR));
912 		}
913 	}
914 
915 	intel_runtime_pm_put(dev_priv, wakeref);
916 
917 	return 0;
918 }
919 
920 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
921 {
922 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
923 	struct drm_device *dev = &dev_priv->drm;
924 	int i, ret;
925 
926 	ret = mutex_lock_interruptible(&dev->struct_mutex);
927 	if (ret)
928 		return ret;
929 
930 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
931 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
932 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
933 
934 		seq_printf(m, "Fence %d, pin count = %d, object = ",
935 			   i, dev_priv->fence_regs[i].pin_count);
936 		if (!vma)
937 			seq_puts(m, "unused");
938 		else
939 			describe_obj(m, vma->obj);
940 		seq_putc(m, '\n');
941 	}
942 
943 	mutex_unlock(&dev->struct_mutex);
944 	return 0;
945 }
946 
947 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
948 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
949 			      size_t count, loff_t *pos)
950 {
951 	struct i915_gpu_state *error;
952 	ssize_t ret;
953 	void *buf;
954 
955 	error = file->private_data;
956 	if (!error)
957 		return 0;
958 
959 	/* Bounce buffer required because of kernfs __user API convenience. */
960 	buf = kmalloc(count, GFP_KERNEL);
961 	if (!buf)
962 		return -ENOMEM;
963 
964 	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
965 	if (ret <= 0)
966 		goto out;
967 
968 	if (!copy_to_user(ubuf, buf, ret))
969 		*pos += ret;
970 	else
971 		ret = -EFAULT;
972 
973 out:
974 	kfree(buf);
975 	return ret;
976 }
977 
978 static int gpu_state_release(struct inode *inode, struct file *file)
979 {
980 	i915_gpu_state_put(file->private_data);
981 	return 0;
982 }
983 
984 static int i915_gpu_info_open(struct inode *inode, struct file *file)
985 {
986 	struct drm_i915_private *i915 = inode->i_private;
987 	struct i915_gpu_state *gpu;
988 	intel_wakeref_t wakeref;
989 
990 	gpu = NULL;
991 	with_intel_runtime_pm(i915, wakeref)
992 		gpu = i915_capture_gpu_state(i915);
993 	if (IS_ERR(gpu))
994 		return PTR_ERR(gpu);
995 
996 	file->private_data = gpu;
997 	return 0;
998 }
999 
1000 static const struct file_operations i915_gpu_info_fops = {
1001 	.owner = THIS_MODULE,
1002 	.open = i915_gpu_info_open,
1003 	.read = gpu_state_read,
1004 	.llseek = default_llseek,
1005 	.release = gpu_state_release,
1006 };
1007 
1008 static ssize_t
1009 i915_error_state_write(struct file *filp,
1010 		       const char __user *ubuf,
1011 		       size_t cnt,
1012 		       loff_t *ppos)
1013 {
1014 	struct i915_gpu_state *error = filp->private_data;
1015 
1016 	if (!error)
1017 		return 0;
1018 
1019 	DRM_DEBUG_DRIVER("Resetting error state\n");
1020 	i915_reset_error_state(error->i915);
1021 
1022 	return cnt;
1023 }
1024 
1025 static int i915_error_state_open(struct inode *inode, struct file *file)
1026 {
1027 	struct i915_gpu_state *error;
1028 
1029 	error = i915_first_error_state(inode->i_private);
1030 	if (IS_ERR(error))
1031 		return PTR_ERR(error);
1032 
1033 	file->private_data  = error;
1034 	return 0;
1035 }
1036 
1037 static const struct file_operations i915_error_state_fops = {
1038 	.owner = THIS_MODULE,
1039 	.open = i915_error_state_open,
1040 	.read = gpu_state_read,
1041 	.write = i915_error_state_write,
1042 	.llseek = default_llseek,
1043 	.release = gpu_state_release,
1044 };
1045 #endif
1046 
1047 static int i915_frequency_info(struct seq_file *m, void *unused)
1048 {
1049 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1050 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1051 	intel_wakeref_t wakeref;
1052 	int ret = 0;
1053 
1054 	wakeref = intel_runtime_pm_get(dev_priv);
1055 
1056 	if (IS_GEN(dev_priv, 5)) {
1057 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1058 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1059 
1060 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1061 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1062 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1063 			   MEMSTAT_VID_SHIFT);
1064 		seq_printf(m, "Current P-state: %d\n",
1065 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1066 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1067 		u32 rpmodectl, freq_sts;
1068 
1069 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1070 		seq_printf(m, "Video Turbo Mode: %s\n",
1071 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1072 		seq_printf(m, "HW control enabled: %s\n",
1073 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1074 		seq_printf(m, "SW control enabled: %s\n",
1075 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1076 				  GEN6_RP_MEDIA_SW_MODE));
1077 
1078 		vlv_punit_get(dev_priv);
1079 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1080 		vlv_punit_put(dev_priv);
1081 
1082 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1083 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1084 
1085 		seq_printf(m, "actual GPU freq: %d MHz\n",
1086 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1087 
1088 		seq_printf(m, "current GPU freq: %d MHz\n",
1089 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1090 
1091 		seq_printf(m, "max GPU freq: %d MHz\n",
1092 			   intel_gpu_freq(dev_priv, rps->max_freq));
1093 
1094 		seq_printf(m, "min GPU freq: %d MHz\n",
1095 			   intel_gpu_freq(dev_priv, rps->min_freq));
1096 
1097 		seq_printf(m, "idle GPU freq: %d MHz\n",
1098 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1099 
1100 		seq_printf(m,
1101 			   "efficient (RPe) frequency: %d MHz\n",
1102 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1103 	} else if (INTEL_GEN(dev_priv) >= 6) {
1104 		u32 rp_state_limits;
1105 		u32 gt_perf_status;
1106 		u32 rp_state_cap;
1107 		u32 rpmodectl, rpinclimit, rpdeclimit;
1108 		u32 rpstat, cagf, reqf;
1109 		u32 rpupei, rpcurup, rpprevup;
1110 		u32 rpdownei, rpcurdown, rpprevdown;
1111 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1112 		int max_freq;
1113 
1114 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1115 		if (IS_GEN9_LP(dev_priv)) {
1116 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1117 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1118 		} else {
1119 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1120 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1121 		}
1122 
1123 		/* RPSTAT1 is in the GT power well */
1124 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1125 
1126 		reqf = I915_READ(GEN6_RPNSWREQ);
1127 		if (INTEL_GEN(dev_priv) >= 9)
1128 			reqf >>= 23;
1129 		else {
1130 			reqf &= ~GEN6_TURBO_DISABLE;
1131 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1132 				reqf >>= 24;
1133 			else
1134 				reqf >>= 25;
1135 		}
1136 		reqf = intel_gpu_freq(dev_priv, reqf);
1137 
1138 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1139 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1140 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1141 
1142 		rpstat = I915_READ(GEN6_RPSTAT1);
1143 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1144 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1145 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1146 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1147 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1148 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1149 		cagf = intel_gpu_freq(dev_priv,
1150 				      intel_get_cagf(dev_priv, rpstat));
1151 
1152 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1153 
1154 		if (INTEL_GEN(dev_priv) >= 11) {
1155 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1156 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1157 			/*
1158 			 * The equivalent to the PM ISR & IIR cannot be read
1159 			 * without affecting the current state of the system
1160 			 */
1161 			pm_isr = 0;
1162 			pm_iir = 0;
1163 		} else if (INTEL_GEN(dev_priv) >= 8) {
1164 			pm_ier = I915_READ(GEN8_GT_IER(2));
1165 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1166 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1167 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1168 		} else {
1169 			pm_ier = I915_READ(GEN6_PMIER);
1170 			pm_imr = I915_READ(GEN6_PMIMR);
1171 			pm_isr = I915_READ(GEN6_PMISR);
1172 			pm_iir = I915_READ(GEN6_PMIIR);
1173 		}
1174 		pm_mask = I915_READ(GEN6_PMINTRMSK);
1175 
1176 		seq_printf(m, "Video Turbo Mode: %s\n",
1177 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1178 		seq_printf(m, "HW control enabled: %s\n",
1179 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1180 		seq_printf(m, "SW control enabled: %s\n",
1181 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1182 				  GEN6_RP_MEDIA_SW_MODE));
1183 
1184 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1185 			   pm_ier, pm_imr, pm_mask);
1186 		if (INTEL_GEN(dev_priv) <= 10)
1187 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1188 				   pm_isr, pm_iir);
1189 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1190 			   rps->pm_intrmsk_mbz);
1191 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1192 		seq_printf(m, "Render p-state ratio: %d\n",
1193 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1194 		seq_printf(m, "Render p-state VID: %d\n",
1195 			   gt_perf_status & 0xff);
1196 		seq_printf(m, "Render p-state limit: %d\n",
1197 			   rp_state_limits & 0xff);
1198 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1199 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1200 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1201 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1202 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1203 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1204 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1205 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1206 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1207 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1208 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1209 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1210 		seq_printf(m, "Up threshold: %d%%\n",
1211 			   rps->power.up_threshold);
1212 
1213 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1214 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1215 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1216 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1217 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1218 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1219 		seq_printf(m, "Down threshold: %d%%\n",
1220 			   rps->power.down_threshold);
1221 
1222 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1223 			    rp_state_cap >> 16) & 0xff;
1224 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1225 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1226 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1227 			   intel_gpu_freq(dev_priv, max_freq));
1228 
1229 		max_freq = (rp_state_cap & 0xff00) >> 8;
1230 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1231 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1232 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1233 			   intel_gpu_freq(dev_priv, max_freq));
1234 
1235 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1236 			    rp_state_cap >> 0) & 0xff;
1237 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1238 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1239 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1240 			   intel_gpu_freq(dev_priv, max_freq));
1241 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1242 			   intel_gpu_freq(dev_priv, rps->max_freq));
1243 
1244 		seq_printf(m, "Current freq: %d MHz\n",
1245 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1246 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1247 		seq_printf(m, "Idle freq: %d MHz\n",
1248 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1249 		seq_printf(m, "Min freq: %d MHz\n",
1250 			   intel_gpu_freq(dev_priv, rps->min_freq));
1251 		seq_printf(m, "Boost freq: %d MHz\n",
1252 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1253 		seq_printf(m, "Max freq: %d MHz\n",
1254 			   intel_gpu_freq(dev_priv, rps->max_freq));
1255 		seq_printf(m,
1256 			   "efficient (RPe) frequency: %d MHz\n",
1257 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1258 	} else {
1259 		seq_puts(m, "no P-state info available\n");
1260 	}
1261 
1262 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1263 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1264 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1265 
1266 	intel_runtime_pm_put(dev_priv, wakeref);
1267 	return ret;
1268 }
1269 
1270 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1271 			       struct seq_file *m,
1272 			       struct intel_instdone *instdone)
1273 {
1274 	int slice;
1275 	int subslice;
1276 
1277 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1278 		   instdone->instdone);
1279 
1280 	if (INTEL_GEN(dev_priv) <= 3)
1281 		return;
1282 
1283 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1284 		   instdone->slice_common);
1285 
1286 	if (INTEL_GEN(dev_priv) <= 6)
1287 		return;
1288 
1289 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1290 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1291 			   slice, subslice, instdone->sampler[slice][subslice]);
1292 
1293 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1294 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1295 			   slice, subslice, instdone->row[slice][subslice]);
1296 }
1297 
1298 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1299 {
1300 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1301 	struct intel_engine_cs *engine;
1302 	u64 acthd[I915_NUM_ENGINES];
1303 	struct intel_instdone instdone;
1304 	intel_wakeref_t wakeref;
1305 	enum intel_engine_id id;
1306 
1307 	seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1308 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1309 		seq_puts(m, "\tWedged\n");
1310 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1311 		seq_puts(m, "\tDevice (global) reset in progress\n");
1312 
1313 	if (!i915_modparams.enable_hangcheck) {
1314 		seq_puts(m, "Hangcheck disabled\n");
1315 		return 0;
1316 	}
1317 
1318 	with_intel_runtime_pm(dev_priv, wakeref) {
1319 		for_each_engine(engine, dev_priv, id)
1320 			acthd[id] = intel_engine_get_active_head(engine);
1321 
1322 		intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1323 	}
1324 
1325 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1326 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1327 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1328 					    jiffies));
1329 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1330 		seq_puts(m, "Hangcheck active, work pending\n");
1331 	else
1332 		seq_puts(m, "Hangcheck inactive\n");
1333 
1334 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1335 
1336 	for_each_engine(engine, dev_priv, id) {
1337 		seq_printf(m, "%s: %d ms ago\n",
1338 			   engine->name,
1339 			   jiffies_to_msecs(jiffies -
1340 					    engine->hangcheck.action_timestamp));
1341 
1342 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1343 			   (long long)engine->hangcheck.acthd,
1344 			   (long long)acthd[id]);
1345 
1346 		if (engine->id == RCS0) {
1347 			seq_puts(m, "\tinstdone read =\n");
1348 
1349 			i915_instdone_info(dev_priv, m, &instdone);
1350 
1351 			seq_puts(m, "\tinstdone accu =\n");
1352 
1353 			i915_instdone_info(dev_priv, m,
1354 					   &engine->hangcheck.instdone);
1355 		}
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 static int i915_reset_info(struct seq_file *m, void *unused)
1362 {
1363 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1364 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1365 	struct intel_engine_cs *engine;
1366 	enum intel_engine_id id;
1367 
1368 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1369 
1370 	for_each_engine(engine, dev_priv, id) {
1371 		seq_printf(m, "%s = %u\n", engine->name,
1372 			   i915_reset_engine_count(error, engine));
1373 	}
1374 
1375 	return 0;
1376 }
1377 
1378 static int ironlake_drpc_info(struct seq_file *m)
1379 {
1380 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1381 	u32 rgvmodectl, rstdbyctl;
1382 	u16 crstandvid;
1383 
1384 	rgvmodectl = I915_READ(MEMMODECTL);
1385 	rstdbyctl = I915_READ(RSTDBYCTL);
1386 	crstandvid = I915_READ16(CRSTANDVID);
1387 
1388 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1389 	seq_printf(m, "Boost freq: %d\n",
1390 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1391 		   MEMMODE_BOOST_FREQ_SHIFT);
1392 	seq_printf(m, "HW control enabled: %s\n",
1393 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1394 	seq_printf(m, "SW control enabled: %s\n",
1395 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1396 	seq_printf(m, "Gated voltage change: %s\n",
1397 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1398 	seq_printf(m, "Starting frequency: P%d\n",
1399 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1400 	seq_printf(m, "Max P-state: P%d\n",
1401 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1402 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1403 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1404 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1405 	seq_printf(m, "Render standby enabled: %s\n",
1406 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1407 	seq_puts(m, "Current RS state: ");
1408 	switch (rstdbyctl & RSX_STATUS_MASK) {
1409 	case RSX_STATUS_ON:
1410 		seq_puts(m, "on\n");
1411 		break;
1412 	case RSX_STATUS_RC1:
1413 		seq_puts(m, "RC1\n");
1414 		break;
1415 	case RSX_STATUS_RC1E:
1416 		seq_puts(m, "RC1E\n");
1417 		break;
1418 	case RSX_STATUS_RS1:
1419 		seq_puts(m, "RS1\n");
1420 		break;
1421 	case RSX_STATUS_RS2:
1422 		seq_puts(m, "RS2 (RC6)\n");
1423 		break;
1424 	case RSX_STATUS_RS3:
1425 		seq_puts(m, "RC3 (RC6+)\n");
1426 		break;
1427 	default:
1428 		seq_puts(m, "unknown\n");
1429 		break;
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 static int i915_forcewake_domains(struct seq_file *m, void *data)
1436 {
1437 	struct drm_i915_private *i915 = node_to_i915(m->private);
1438 	struct intel_uncore *uncore = &i915->uncore;
1439 	struct intel_uncore_forcewake_domain *fw_domain;
1440 	unsigned int tmp;
1441 
1442 	seq_printf(m, "user.bypass_count = %u\n",
1443 		   uncore->user_forcewake.count);
1444 
1445 	for_each_fw_domain(fw_domain, uncore, tmp)
1446 		seq_printf(m, "%s.wake_count = %u\n",
1447 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1448 			   READ_ONCE(fw_domain->wake_count));
1449 
1450 	return 0;
1451 }
1452 
1453 static void print_rc6_res(struct seq_file *m,
1454 			  const char *title,
1455 			  const i915_reg_t reg)
1456 {
1457 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1458 
1459 	seq_printf(m, "%s %u (%llu us)\n",
1460 		   title, I915_READ(reg),
1461 		   intel_rc6_residency_us(dev_priv, reg));
1462 }
1463 
1464 static int vlv_drpc_info(struct seq_file *m)
1465 {
1466 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1467 	u32 rcctl1, pw_status;
1468 
1469 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1470 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1471 
1472 	seq_printf(m, "RC6 Enabled: %s\n",
1473 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1474 					GEN6_RC_CTL_EI_MODE(1))));
1475 	seq_printf(m, "Render Power Well: %s\n",
1476 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1477 	seq_printf(m, "Media Power Well: %s\n",
1478 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1479 
1480 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1481 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1482 
1483 	return i915_forcewake_domains(m, NULL);
1484 }
1485 
1486 static int gen6_drpc_info(struct seq_file *m)
1487 {
1488 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1489 	u32 gt_core_status, rcctl1, rc6vids = 0;
1490 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1491 
1492 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1493 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1494 
1495 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1496 	if (INTEL_GEN(dev_priv) >= 9) {
1497 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1498 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1499 	}
1500 
1501 	if (INTEL_GEN(dev_priv) <= 7)
1502 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1503 				       &rc6vids);
1504 
1505 	seq_printf(m, "RC1e Enabled: %s\n",
1506 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1507 	seq_printf(m, "RC6 Enabled: %s\n",
1508 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1509 	if (INTEL_GEN(dev_priv) >= 9) {
1510 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1511 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1512 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1513 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1514 	}
1515 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1516 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1517 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1518 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1519 	seq_puts(m, "Current RC state: ");
1520 	switch (gt_core_status & GEN6_RCn_MASK) {
1521 	case GEN6_RC0:
1522 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1523 			seq_puts(m, "Core Power Down\n");
1524 		else
1525 			seq_puts(m, "on\n");
1526 		break;
1527 	case GEN6_RC3:
1528 		seq_puts(m, "RC3\n");
1529 		break;
1530 	case GEN6_RC6:
1531 		seq_puts(m, "RC6\n");
1532 		break;
1533 	case GEN6_RC7:
1534 		seq_puts(m, "RC7\n");
1535 		break;
1536 	default:
1537 		seq_puts(m, "Unknown\n");
1538 		break;
1539 	}
1540 
1541 	seq_printf(m, "Core Power Down: %s\n",
1542 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1543 	if (INTEL_GEN(dev_priv) >= 9) {
1544 		seq_printf(m, "Render Power Well: %s\n",
1545 			(gen9_powergate_status &
1546 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1547 		seq_printf(m, "Media Power Well: %s\n",
1548 			(gen9_powergate_status &
1549 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1550 	}
1551 
1552 	/* Not exactly sure what this is */
1553 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1554 		      GEN6_GT_GFX_RC6_LOCKED);
1555 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1556 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1557 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1558 
1559 	if (INTEL_GEN(dev_priv) <= 7) {
1560 		seq_printf(m, "RC6   voltage: %dmV\n",
1561 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1562 		seq_printf(m, "RC6+  voltage: %dmV\n",
1563 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1564 		seq_printf(m, "RC6++ voltage: %dmV\n",
1565 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1566 	}
1567 
1568 	return i915_forcewake_domains(m, NULL);
1569 }
1570 
1571 static int i915_drpc_info(struct seq_file *m, void *unused)
1572 {
1573 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1574 	intel_wakeref_t wakeref;
1575 	int err = -ENODEV;
1576 
1577 	with_intel_runtime_pm(dev_priv, wakeref) {
1578 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1579 			err = vlv_drpc_info(m);
1580 		else if (INTEL_GEN(dev_priv) >= 6)
1581 			err = gen6_drpc_info(m);
1582 		else
1583 			err = ironlake_drpc_info(m);
1584 	}
1585 
1586 	return err;
1587 }
1588 
1589 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1590 {
1591 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1592 
1593 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1594 		   dev_priv->fb_tracking.busy_bits);
1595 
1596 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1597 		   dev_priv->fb_tracking.flip_bits);
1598 
1599 	return 0;
1600 }
1601 
1602 static int i915_fbc_status(struct seq_file *m, void *unused)
1603 {
1604 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1605 	struct intel_fbc *fbc = &dev_priv->fbc;
1606 	intel_wakeref_t wakeref;
1607 
1608 	if (!HAS_FBC(dev_priv))
1609 		return -ENODEV;
1610 
1611 	wakeref = intel_runtime_pm_get(dev_priv);
1612 	mutex_lock(&fbc->lock);
1613 
1614 	if (intel_fbc_is_active(dev_priv))
1615 		seq_puts(m, "FBC enabled\n");
1616 	else
1617 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1618 
1619 	if (intel_fbc_is_active(dev_priv)) {
1620 		u32 mask;
1621 
1622 		if (INTEL_GEN(dev_priv) >= 8)
1623 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1624 		else if (INTEL_GEN(dev_priv) >= 7)
1625 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1626 		else if (INTEL_GEN(dev_priv) >= 5)
1627 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1628 		else if (IS_G4X(dev_priv))
1629 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1630 		else
1631 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1632 							FBC_STAT_COMPRESSED);
1633 
1634 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1635 	}
1636 
1637 	mutex_unlock(&fbc->lock);
1638 	intel_runtime_pm_put(dev_priv, wakeref);
1639 
1640 	return 0;
1641 }
1642 
1643 static int i915_fbc_false_color_get(void *data, u64 *val)
1644 {
1645 	struct drm_i915_private *dev_priv = data;
1646 
1647 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1648 		return -ENODEV;
1649 
1650 	*val = dev_priv->fbc.false_color;
1651 
1652 	return 0;
1653 }
1654 
1655 static int i915_fbc_false_color_set(void *data, u64 val)
1656 {
1657 	struct drm_i915_private *dev_priv = data;
1658 	u32 reg;
1659 
1660 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1661 		return -ENODEV;
1662 
1663 	mutex_lock(&dev_priv->fbc.lock);
1664 
1665 	reg = I915_READ(ILK_DPFC_CONTROL);
1666 	dev_priv->fbc.false_color = val;
1667 
1668 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1669 		   (reg | FBC_CTL_FALSE_COLOR) :
1670 		   (reg & ~FBC_CTL_FALSE_COLOR));
1671 
1672 	mutex_unlock(&dev_priv->fbc.lock);
1673 	return 0;
1674 }
1675 
1676 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1677 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1678 			"%llu\n");
1679 
1680 static int i915_ips_status(struct seq_file *m, void *unused)
1681 {
1682 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1683 	intel_wakeref_t wakeref;
1684 
1685 	if (!HAS_IPS(dev_priv))
1686 		return -ENODEV;
1687 
1688 	wakeref = intel_runtime_pm_get(dev_priv);
1689 
1690 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1691 		   yesno(i915_modparams.enable_ips));
1692 
1693 	if (INTEL_GEN(dev_priv) >= 8) {
1694 		seq_puts(m, "Currently: unknown\n");
1695 	} else {
1696 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1697 			seq_puts(m, "Currently: enabled\n");
1698 		else
1699 			seq_puts(m, "Currently: disabled\n");
1700 	}
1701 
1702 	intel_runtime_pm_put(dev_priv, wakeref);
1703 
1704 	return 0;
1705 }
1706 
1707 static int i915_sr_status(struct seq_file *m, void *unused)
1708 {
1709 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1710 	intel_wakeref_t wakeref;
1711 	bool sr_enabled = false;
1712 
1713 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1714 
1715 	if (INTEL_GEN(dev_priv) >= 9)
1716 		/* no global SR status; inspect per-plane WM */;
1717 	else if (HAS_PCH_SPLIT(dev_priv))
1718 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1719 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1720 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1721 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1722 	else if (IS_I915GM(dev_priv))
1723 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1724 	else if (IS_PINEVIEW(dev_priv))
1725 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1726 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1727 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1728 
1729 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1730 
1731 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1732 
1733 	return 0;
1734 }
1735 
1736 static int i915_emon_status(struct seq_file *m, void *unused)
1737 {
1738 	struct drm_i915_private *i915 = node_to_i915(m->private);
1739 	intel_wakeref_t wakeref;
1740 
1741 	if (!IS_GEN(i915, 5))
1742 		return -ENODEV;
1743 
1744 	with_intel_runtime_pm(i915, wakeref) {
1745 		unsigned long temp, chipset, gfx;
1746 
1747 		temp = i915_mch_val(i915);
1748 		chipset = i915_chipset_val(i915);
1749 		gfx = i915_gfx_val(i915);
1750 
1751 		seq_printf(m, "GMCH temp: %ld\n", temp);
1752 		seq_printf(m, "Chipset power: %ld\n", chipset);
1753 		seq_printf(m, "GFX power: %ld\n", gfx);
1754 		seq_printf(m, "Total power: %ld\n", chipset + gfx);
1755 	}
1756 
1757 	return 0;
1758 }
1759 
1760 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1761 {
1762 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1763 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1764 	unsigned int max_gpu_freq, min_gpu_freq;
1765 	intel_wakeref_t wakeref;
1766 	int gpu_freq, ia_freq;
1767 
1768 	if (!HAS_LLC(dev_priv))
1769 		return -ENODEV;
1770 
1771 	min_gpu_freq = rps->min_freq;
1772 	max_gpu_freq = rps->max_freq;
1773 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1774 		/* Convert GT frequency to 50 HZ units */
1775 		min_gpu_freq /= GEN9_FREQ_SCALER;
1776 		max_gpu_freq /= GEN9_FREQ_SCALER;
1777 	}
1778 
1779 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1780 
1781 	wakeref = intel_runtime_pm_get(dev_priv);
1782 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1783 		ia_freq = gpu_freq;
1784 		sandybridge_pcode_read(dev_priv,
1785 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1786 				       &ia_freq);
1787 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1788 			   intel_gpu_freq(dev_priv, (gpu_freq *
1789 						     (IS_GEN9_BC(dev_priv) ||
1790 						      INTEL_GEN(dev_priv) >= 10 ?
1791 						      GEN9_FREQ_SCALER : 1))),
1792 			   ((ia_freq >> 0) & 0xff) * 100,
1793 			   ((ia_freq >> 8) & 0xff) * 100);
1794 	}
1795 	intel_runtime_pm_put(dev_priv, wakeref);
1796 
1797 	return 0;
1798 }
1799 
1800 static int i915_opregion(struct seq_file *m, void *unused)
1801 {
1802 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1803 	struct drm_device *dev = &dev_priv->drm;
1804 	struct intel_opregion *opregion = &dev_priv->opregion;
1805 	int ret;
1806 
1807 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1808 	if (ret)
1809 		goto out;
1810 
1811 	if (opregion->header)
1812 		seq_write(m, opregion->header, OPREGION_SIZE);
1813 
1814 	mutex_unlock(&dev->struct_mutex);
1815 
1816 out:
1817 	return 0;
1818 }
1819 
1820 static int i915_vbt(struct seq_file *m, void *unused)
1821 {
1822 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1823 
1824 	if (opregion->vbt)
1825 		seq_write(m, opregion->vbt, opregion->vbt_size);
1826 
1827 	return 0;
1828 }
1829 
1830 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1831 {
1832 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1833 	struct drm_device *dev = &dev_priv->drm;
1834 	struct intel_framebuffer *fbdev_fb = NULL;
1835 	struct drm_framebuffer *drm_fb;
1836 	int ret;
1837 
1838 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1839 	if (ret)
1840 		return ret;
1841 
1842 #ifdef CONFIG_DRM_FBDEV_EMULATION
1843 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1844 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1845 
1846 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1847 			   fbdev_fb->base.width,
1848 			   fbdev_fb->base.height,
1849 			   fbdev_fb->base.format->depth,
1850 			   fbdev_fb->base.format->cpp[0] * 8,
1851 			   fbdev_fb->base.modifier,
1852 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1853 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1854 		seq_putc(m, '\n');
1855 	}
1856 #endif
1857 
1858 	mutex_lock(&dev->mode_config.fb_lock);
1859 	drm_for_each_fb(drm_fb, dev) {
1860 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1861 		if (fb == fbdev_fb)
1862 			continue;
1863 
1864 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1865 			   fb->base.width,
1866 			   fb->base.height,
1867 			   fb->base.format->depth,
1868 			   fb->base.format->cpp[0] * 8,
1869 			   fb->base.modifier,
1870 			   drm_framebuffer_read_refcount(&fb->base));
1871 		describe_obj(m, intel_fb_obj(&fb->base));
1872 		seq_putc(m, '\n');
1873 	}
1874 	mutex_unlock(&dev->mode_config.fb_lock);
1875 	mutex_unlock(&dev->struct_mutex);
1876 
1877 	return 0;
1878 }
1879 
1880 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1881 {
1882 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1883 		   ring->space, ring->head, ring->tail, ring->emit);
1884 }
1885 
1886 static int i915_context_status(struct seq_file *m, void *unused)
1887 {
1888 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889 	struct drm_device *dev = &dev_priv->drm;
1890 	struct i915_gem_context *ctx;
1891 	int ret;
1892 
1893 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1894 	if (ret)
1895 		return ret;
1896 
1897 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1898 		struct i915_gem_engines_iter it;
1899 		struct intel_context *ce;
1900 
1901 		seq_puts(m, "HW context ");
1902 		if (!list_empty(&ctx->hw_id_link))
1903 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1904 				   atomic_read(&ctx->hw_id_pin_count));
1905 		if (ctx->pid) {
1906 			struct task_struct *task;
1907 
1908 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1909 			if (task) {
1910 				seq_printf(m, "(%s [%d]) ",
1911 					   task->comm, task->pid);
1912 				put_task_struct(task);
1913 			}
1914 		} else if (IS_ERR(ctx->file_priv)) {
1915 			seq_puts(m, "(deleted) ");
1916 		} else {
1917 			seq_puts(m, "(kernel) ");
1918 		}
1919 
1920 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1921 		seq_putc(m, '\n');
1922 
1923 		for_each_gem_engine(ce,
1924 				    i915_gem_context_lock_engines(ctx), it) {
1925 			seq_printf(m, "%s: ", ce->engine->name);
1926 			if (ce->state)
1927 				describe_obj(m, ce->state->obj);
1928 			if (ce->ring)
1929 				describe_ctx_ring(m, ce->ring);
1930 			seq_putc(m, '\n');
1931 		}
1932 		i915_gem_context_unlock_engines(ctx);
1933 
1934 		seq_putc(m, '\n');
1935 	}
1936 
1937 	mutex_unlock(&dev->struct_mutex);
1938 
1939 	return 0;
1940 }
1941 
1942 static const char *swizzle_string(unsigned swizzle)
1943 {
1944 	switch (swizzle) {
1945 	case I915_BIT_6_SWIZZLE_NONE:
1946 		return "none";
1947 	case I915_BIT_6_SWIZZLE_9:
1948 		return "bit9";
1949 	case I915_BIT_6_SWIZZLE_9_10:
1950 		return "bit9/bit10";
1951 	case I915_BIT_6_SWIZZLE_9_11:
1952 		return "bit9/bit11";
1953 	case I915_BIT_6_SWIZZLE_9_10_11:
1954 		return "bit9/bit10/bit11";
1955 	case I915_BIT_6_SWIZZLE_9_17:
1956 		return "bit9/bit17";
1957 	case I915_BIT_6_SWIZZLE_9_10_17:
1958 		return "bit9/bit10/bit17";
1959 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1960 		return "unknown";
1961 	}
1962 
1963 	return "bug";
1964 }
1965 
1966 static int i915_swizzle_info(struct seq_file *m, void *data)
1967 {
1968 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1969 	intel_wakeref_t wakeref;
1970 
1971 	wakeref = intel_runtime_pm_get(dev_priv);
1972 
1973 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1974 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1975 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1976 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1977 
1978 	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1979 		seq_printf(m, "DDC = 0x%08x\n",
1980 			   I915_READ(DCC));
1981 		seq_printf(m, "DDC2 = 0x%08x\n",
1982 			   I915_READ(DCC2));
1983 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1984 			   I915_READ16(C0DRB3));
1985 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1986 			   I915_READ16(C1DRB3));
1987 	} else if (INTEL_GEN(dev_priv) >= 6) {
1988 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1989 			   I915_READ(MAD_DIMM_C0));
1990 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1991 			   I915_READ(MAD_DIMM_C1));
1992 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1993 			   I915_READ(MAD_DIMM_C2));
1994 		seq_printf(m, "TILECTL = 0x%08x\n",
1995 			   I915_READ(TILECTL));
1996 		if (INTEL_GEN(dev_priv) >= 8)
1997 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1998 				   I915_READ(GAMTARBMODE));
1999 		else
2000 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2001 				   I915_READ(ARB_MODE));
2002 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2003 			   I915_READ(DISP_ARB_CTL));
2004 	}
2005 
2006 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2007 		seq_puts(m, "L-shaped memory detected\n");
2008 
2009 	intel_runtime_pm_put(dev_priv, wakeref);
2010 
2011 	return 0;
2012 }
2013 
2014 static const char *rps_power_to_str(unsigned int power)
2015 {
2016 	static const char * const strings[] = {
2017 		[LOW_POWER] = "low power",
2018 		[BETWEEN] = "mixed",
2019 		[HIGH_POWER] = "high power",
2020 	};
2021 
2022 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2023 		return "unknown";
2024 
2025 	return strings[power];
2026 }
2027 
2028 static int i915_rps_boost_info(struct seq_file *m, void *data)
2029 {
2030 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2031 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2032 	u32 act_freq = rps->cur_freq;
2033 	intel_wakeref_t wakeref;
2034 
2035 	with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2036 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2037 			vlv_punit_get(dev_priv);
2038 			act_freq = vlv_punit_read(dev_priv,
2039 						  PUNIT_REG_GPU_FREQ_STS);
2040 			vlv_punit_put(dev_priv);
2041 			act_freq = (act_freq >> 8) & 0xff;
2042 		} else {
2043 			act_freq = intel_get_cagf(dev_priv,
2044 						  I915_READ(GEN6_RPSTAT1));
2045 		}
2046 	}
2047 
2048 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2049 	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
2050 	seq_printf(m, "Boosts outstanding? %d\n",
2051 		   atomic_read(&rps->num_waiters));
2052 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2053 	seq_printf(m, "Frequency requested %d, actual %d\n",
2054 		   intel_gpu_freq(dev_priv, rps->cur_freq),
2055 		   intel_gpu_freq(dev_priv, act_freq));
2056 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2057 		   intel_gpu_freq(dev_priv, rps->min_freq),
2058 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2059 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2060 		   intel_gpu_freq(dev_priv, rps->max_freq));
2061 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2062 		   intel_gpu_freq(dev_priv, rps->idle_freq),
2063 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2064 		   intel_gpu_freq(dev_priv, rps->boost_freq));
2065 
2066 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
2067 
2068 	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
2069 		u32 rpup, rpupei;
2070 		u32 rpdown, rpdownei;
2071 
2072 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
2073 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2074 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2075 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2076 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2077 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
2078 
2079 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2080 			   rps_power_to_str(rps->power.mode));
2081 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2082 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2083 			   rps->power.up_threshold);
2084 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2085 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2086 			   rps->power.down_threshold);
2087 	} else {
2088 		seq_puts(m, "\nRPS Autotuning inactive\n");
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 static int i915_llc(struct seq_file *m, void *data)
2095 {
2096 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2097 	const bool edram = INTEL_GEN(dev_priv) > 8;
2098 
2099 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2100 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
2101 		   dev_priv->edram_size_mb);
2102 
2103 	return 0;
2104 }
2105 
2106 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2107 {
2108 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2109 	intel_wakeref_t wakeref;
2110 	struct drm_printer p;
2111 
2112 	if (!HAS_HUC(dev_priv))
2113 		return -ENODEV;
2114 
2115 	p = drm_seq_file_printer(m);
2116 	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2117 
2118 	with_intel_runtime_pm(dev_priv, wakeref)
2119 		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2120 
2121 	return 0;
2122 }
2123 
2124 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2125 {
2126 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2127 	intel_wakeref_t wakeref;
2128 	struct drm_printer p;
2129 
2130 	if (!HAS_GUC(dev_priv))
2131 		return -ENODEV;
2132 
2133 	p = drm_seq_file_printer(m);
2134 	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2135 
2136 	with_intel_runtime_pm(dev_priv, wakeref) {
2137 		u32 tmp = I915_READ(GUC_STATUS);
2138 		u32 i;
2139 
2140 		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2141 		seq_printf(m, "\tBootrom status = 0x%x\n",
2142 			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2143 		seq_printf(m, "\tuKernel status = 0x%x\n",
2144 			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2145 		seq_printf(m, "\tMIA Core status = 0x%x\n",
2146 			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2147 		seq_puts(m, "\nScratch registers:\n");
2148 		for (i = 0; i < 16; i++) {
2149 			seq_printf(m, "\t%2d: \t0x%x\n",
2150 				   i, I915_READ(SOFT_SCRATCH(i)));
2151 		}
2152 	}
2153 
2154 	return 0;
2155 }
2156 
2157 static const char *
2158 stringify_guc_log_type(enum guc_log_buffer_type type)
2159 {
2160 	switch (type) {
2161 	case GUC_ISR_LOG_BUFFER:
2162 		return "ISR";
2163 	case GUC_DPC_LOG_BUFFER:
2164 		return "DPC";
2165 	case GUC_CRASH_DUMP_LOG_BUFFER:
2166 		return "CRASH";
2167 	default:
2168 		MISSING_CASE(type);
2169 	}
2170 
2171 	return "";
2172 }
2173 
2174 static void i915_guc_log_info(struct seq_file *m,
2175 			      struct drm_i915_private *dev_priv)
2176 {
2177 	struct intel_guc_log *log = &dev_priv->guc.log;
2178 	enum guc_log_buffer_type type;
2179 
2180 	if (!intel_guc_log_relay_enabled(log)) {
2181 		seq_puts(m, "GuC log relay disabled\n");
2182 		return;
2183 	}
2184 
2185 	seq_puts(m, "GuC logging stats:\n");
2186 
2187 	seq_printf(m, "\tRelay full count: %u\n",
2188 		   log->relay.full_count);
2189 
2190 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2191 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2192 			   stringify_guc_log_type(type),
2193 			   log->stats[type].flush,
2194 			   log->stats[type].sampled_overflow);
2195 	}
2196 }
2197 
2198 static void i915_guc_client_info(struct seq_file *m,
2199 				 struct drm_i915_private *dev_priv,
2200 				 struct intel_guc_client *client)
2201 {
2202 	struct intel_engine_cs *engine;
2203 	enum intel_engine_id id;
2204 	u64 tot = 0;
2205 
2206 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2207 		client->priority, client->stage_id, client->proc_desc_offset);
2208 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2209 		client->doorbell_id, client->doorbell_offset);
2210 
2211 	for_each_engine(engine, dev_priv, id) {
2212 		u64 submissions = client->submissions[id];
2213 		tot += submissions;
2214 		seq_printf(m, "\tSubmissions: %llu %s\n",
2215 				submissions, engine->name);
2216 	}
2217 	seq_printf(m, "\tTotal: %llu\n", tot);
2218 }
2219 
2220 static int i915_guc_info(struct seq_file *m, void *data)
2221 {
2222 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2223 	const struct intel_guc *guc = &dev_priv->guc;
2224 
2225 	if (!USES_GUC(dev_priv))
2226 		return -ENODEV;
2227 
2228 	i915_guc_log_info(m, dev_priv);
2229 
2230 	if (!USES_GUC_SUBMISSION(dev_priv))
2231 		return 0;
2232 
2233 	GEM_BUG_ON(!guc->execbuf_client);
2234 
2235 	seq_printf(m, "\nDoorbell map:\n");
2236 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2237 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2238 
2239 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2240 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2241 	if (guc->preempt_client) {
2242 		seq_printf(m, "\nGuC preempt client @ %p:\n",
2243 			   guc->preempt_client);
2244 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
2245 	}
2246 
2247 	/* Add more as required ... */
2248 
2249 	return 0;
2250 }
2251 
2252 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2253 {
2254 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2255 	const struct intel_guc *guc = &dev_priv->guc;
2256 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2257 	struct intel_guc_client *client = guc->execbuf_client;
2258 	intel_engine_mask_t tmp;
2259 	int index;
2260 
2261 	if (!USES_GUC_SUBMISSION(dev_priv))
2262 		return -ENODEV;
2263 
2264 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2265 		struct intel_engine_cs *engine;
2266 
2267 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2268 			continue;
2269 
2270 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2271 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2272 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2273 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2274 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2275 		seq_printf(m, "\tEngines used: 0x%x\n",
2276 			   desc->engines_used);
2277 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2278 			   desc->db_trigger_phy,
2279 			   desc->db_trigger_cpu,
2280 			   desc->db_trigger_uk);
2281 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2282 			   desc->process_desc);
2283 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2284 			   desc->wq_addr, desc->wq_size);
2285 		seq_putc(m, '\n');
2286 
2287 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2288 			u32 guc_engine_id = engine->guc_id;
2289 			struct guc_execlist_context *lrc =
2290 						&desc->lrc[guc_engine_id];
2291 
2292 			seq_printf(m, "\t%s LRC:\n", engine->name);
2293 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2294 				   lrc->context_desc);
2295 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2296 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2297 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2298 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2299 			seq_putc(m, '\n');
2300 		}
2301 	}
2302 
2303 	return 0;
2304 }
2305 
2306 static int i915_guc_log_dump(struct seq_file *m, void *data)
2307 {
2308 	struct drm_info_node *node = m->private;
2309 	struct drm_i915_private *dev_priv = node_to_i915(node);
2310 	bool dump_load_err = !!node->info_ent->data;
2311 	struct drm_i915_gem_object *obj = NULL;
2312 	u32 *log;
2313 	int i = 0;
2314 
2315 	if (!HAS_GUC(dev_priv))
2316 		return -ENODEV;
2317 
2318 	if (dump_load_err)
2319 		obj = dev_priv->guc.load_err_log;
2320 	else if (dev_priv->guc.log.vma)
2321 		obj = dev_priv->guc.log.vma->obj;
2322 
2323 	if (!obj)
2324 		return 0;
2325 
2326 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2327 	if (IS_ERR(log)) {
2328 		DRM_DEBUG("Failed to pin object\n");
2329 		seq_puts(m, "(log data unaccessible)\n");
2330 		return PTR_ERR(log);
2331 	}
2332 
2333 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2334 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2335 			   *(log + i), *(log + i + 1),
2336 			   *(log + i + 2), *(log + i + 3));
2337 
2338 	seq_putc(m, '\n');
2339 
2340 	i915_gem_object_unpin_map(obj);
2341 
2342 	return 0;
2343 }
2344 
2345 static int i915_guc_log_level_get(void *data, u64 *val)
2346 {
2347 	struct drm_i915_private *dev_priv = data;
2348 
2349 	if (!USES_GUC(dev_priv))
2350 		return -ENODEV;
2351 
2352 	*val = intel_guc_log_get_level(&dev_priv->guc.log);
2353 
2354 	return 0;
2355 }
2356 
2357 static int i915_guc_log_level_set(void *data, u64 val)
2358 {
2359 	struct drm_i915_private *dev_priv = data;
2360 
2361 	if (!USES_GUC(dev_priv))
2362 		return -ENODEV;
2363 
2364 	return intel_guc_log_set_level(&dev_priv->guc.log, val);
2365 }
2366 
2367 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2368 			i915_guc_log_level_get, i915_guc_log_level_set,
2369 			"%lld\n");
2370 
2371 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2372 {
2373 	struct drm_i915_private *dev_priv = inode->i_private;
2374 
2375 	if (!USES_GUC(dev_priv))
2376 		return -ENODEV;
2377 
2378 	file->private_data = &dev_priv->guc.log;
2379 
2380 	return intel_guc_log_relay_open(&dev_priv->guc.log);
2381 }
2382 
2383 static ssize_t
2384 i915_guc_log_relay_write(struct file *filp,
2385 			 const char __user *ubuf,
2386 			 size_t cnt,
2387 			 loff_t *ppos)
2388 {
2389 	struct intel_guc_log *log = filp->private_data;
2390 
2391 	intel_guc_log_relay_flush(log);
2392 
2393 	return cnt;
2394 }
2395 
2396 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2397 {
2398 	struct drm_i915_private *dev_priv = inode->i_private;
2399 
2400 	intel_guc_log_relay_close(&dev_priv->guc.log);
2401 
2402 	return 0;
2403 }
2404 
2405 static const struct file_operations i915_guc_log_relay_fops = {
2406 	.owner = THIS_MODULE,
2407 	.open = i915_guc_log_relay_open,
2408 	.write = i915_guc_log_relay_write,
2409 	.release = i915_guc_log_relay_release,
2410 };
2411 
2412 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2413 {
2414 	u8 val;
2415 	static const char * const sink_status[] = {
2416 		"inactive",
2417 		"transition to active, capture and display",
2418 		"active, display from RFB",
2419 		"active, capture and display on sink device timings",
2420 		"transition to inactive, capture and display, timing re-sync",
2421 		"reserved",
2422 		"reserved",
2423 		"sink internal error",
2424 	};
2425 	struct drm_connector *connector = m->private;
2426 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2427 	struct intel_dp *intel_dp =
2428 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2429 	int ret;
2430 
2431 	if (!CAN_PSR(dev_priv)) {
2432 		seq_puts(m, "PSR Unsupported\n");
2433 		return -ENODEV;
2434 	}
2435 
2436 	if (connector->status != connector_status_connected)
2437 		return -ENODEV;
2438 
2439 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2440 
2441 	if (ret == 1) {
2442 		const char *str = "unknown";
2443 
2444 		val &= DP_PSR_SINK_STATE_MASK;
2445 		if (val < ARRAY_SIZE(sink_status))
2446 			str = sink_status[val];
2447 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2448 	} else {
2449 		return ret;
2450 	}
2451 
2452 	return 0;
2453 }
2454 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2455 
2456 static void
2457 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2458 {
2459 	u32 val, status_val;
2460 	const char *status = "unknown";
2461 
2462 	if (dev_priv->psr.psr2_enabled) {
2463 		static const char * const live_status[] = {
2464 			"IDLE",
2465 			"CAPTURE",
2466 			"CAPTURE_FS",
2467 			"SLEEP",
2468 			"BUFON_FW",
2469 			"ML_UP",
2470 			"SU_STANDBY",
2471 			"FAST_SLEEP",
2472 			"DEEP_SLEEP",
2473 			"BUF_ON",
2474 			"TG_ON"
2475 		};
2476 		val = I915_READ(EDP_PSR2_STATUS);
2477 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2478 			      EDP_PSR2_STATUS_STATE_SHIFT;
2479 		if (status_val < ARRAY_SIZE(live_status))
2480 			status = live_status[status_val];
2481 	} else {
2482 		static const char * const live_status[] = {
2483 			"IDLE",
2484 			"SRDONACK",
2485 			"SRDENT",
2486 			"BUFOFF",
2487 			"BUFON",
2488 			"AUXACK",
2489 			"SRDOFFACK",
2490 			"SRDENT_ON",
2491 		};
2492 		val = I915_READ(EDP_PSR_STATUS);
2493 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2494 			      EDP_PSR_STATUS_STATE_SHIFT;
2495 		if (status_val < ARRAY_SIZE(live_status))
2496 			status = live_status[status_val];
2497 	}
2498 
2499 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2500 }
2501 
2502 static int i915_edp_psr_status(struct seq_file *m, void *data)
2503 {
2504 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2505 	struct i915_psr *psr = &dev_priv->psr;
2506 	intel_wakeref_t wakeref;
2507 	const char *status;
2508 	bool enabled;
2509 	u32 val;
2510 
2511 	if (!HAS_PSR(dev_priv))
2512 		return -ENODEV;
2513 
2514 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2515 	if (psr->dp)
2516 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2517 	seq_puts(m, "\n");
2518 
2519 	if (!psr->sink_support)
2520 		return 0;
2521 
2522 	wakeref = intel_runtime_pm_get(dev_priv);
2523 	mutex_lock(&psr->lock);
2524 
2525 	if (psr->enabled)
2526 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2527 	else
2528 		status = "disabled";
2529 	seq_printf(m, "PSR mode: %s\n", status);
2530 
2531 	if (!psr->enabled)
2532 		goto unlock;
2533 
2534 	if (psr->psr2_enabled) {
2535 		val = I915_READ(EDP_PSR2_CTL);
2536 		enabled = val & EDP_PSR2_ENABLE;
2537 	} else {
2538 		val = I915_READ(EDP_PSR_CTL);
2539 		enabled = val & EDP_PSR_ENABLE;
2540 	}
2541 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2542 		   enableddisabled(enabled), val);
2543 	psr_source_status(dev_priv, m);
2544 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2545 		   psr->busy_frontbuffer_bits);
2546 
2547 	/*
2548 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2549 	 */
2550 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2551 		val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2552 		seq_printf(m, "Performance counter: %u\n", val);
2553 	}
2554 
2555 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2556 		seq_printf(m, "Last attempted entry at: %lld\n",
2557 			   psr->last_entry_attempt);
2558 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2559 	}
2560 
2561 	if (psr->psr2_enabled) {
2562 		u32 su_frames_val[3];
2563 		int frame;
2564 
2565 		/*
2566 		 * Reading all 3 registers before hand to minimize crossing a
2567 		 * frame boundary between register reads
2568 		 */
2569 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2570 			su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2571 
2572 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2573 
2574 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2575 			u32 su_blocks;
2576 
2577 			su_blocks = su_frames_val[frame / 3] &
2578 				    PSR2_SU_STATUS_MASK(frame);
2579 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2580 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2581 		}
2582 	}
2583 
2584 unlock:
2585 	mutex_unlock(&psr->lock);
2586 	intel_runtime_pm_put(dev_priv, wakeref);
2587 
2588 	return 0;
2589 }
2590 
2591 static int
2592 i915_edp_psr_debug_set(void *data, u64 val)
2593 {
2594 	struct drm_i915_private *dev_priv = data;
2595 	intel_wakeref_t wakeref;
2596 	int ret;
2597 
2598 	if (!CAN_PSR(dev_priv))
2599 		return -ENODEV;
2600 
2601 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2602 
2603 	wakeref = intel_runtime_pm_get(dev_priv);
2604 
2605 	ret = intel_psr_debug_set(dev_priv, val);
2606 
2607 	intel_runtime_pm_put(dev_priv, wakeref);
2608 
2609 	return ret;
2610 }
2611 
2612 static int
2613 i915_edp_psr_debug_get(void *data, u64 *val)
2614 {
2615 	struct drm_i915_private *dev_priv = data;
2616 
2617 	if (!CAN_PSR(dev_priv))
2618 		return -ENODEV;
2619 
2620 	*val = READ_ONCE(dev_priv->psr.debug);
2621 	return 0;
2622 }
2623 
2624 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2625 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2626 			"%llu\n");
2627 
2628 static int i915_energy_uJ(struct seq_file *m, void *data)
2629 {
2630 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2631 	unsigned long long power;
2632 	intel_wakeref_t wakeref;
2633 	u32 units;
2634 
2635 	if (INTEL_GEN(dev_priv) < 6)
2636 		return -ENODEV;
2637 
2638 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2639 		return -ENODEV;
2640 
2641 	units = (power & 0x1f00) >> 8;
2642 	with_intel_runtime_pm(dev_priv, wakeref)
2643 		power = I915_READ(MCH_SECP_NRG_STTS);
2644 
2645 	power = (1000000 * power) >> units; /* convert to uJ */
2646 	seq_printf(m, "%llu", power);
2647 
2648 	return 0;
2649 }
2650 
2651 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2652 {
2653 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2654 	struct pci_dev *pdev = dev_priv->drm.pdev;
2655 
2656 	if (!HAS_RUNTIME_PM(dev_priv))
2657 		seq_puts(m, "Runtime power management not supported\n");
2658 
2659 	seq_printf(m, "Runtime power status: %s\n",
2660 		   enableddisabled(!dev_priv->power_domains.wakeref));
2661 
2662 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2663 	seq_printf(m, "IRQs disabled: %s\n",
2664 		   yesno(!intel_irqs_enabled(dev_priv)));
2665 #ifdef CONFIG_PM
2666 	seq_printf(m, "Usage count: %d\n",
2667 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2668 #else
2669 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2670 #endif
2671 	seq_printf(m, "PCI device power state: %s [%d]\n",
2672 		   pci_power_name(pdev->current_state),
2673 		   pdev->current_state);
2674 
2675 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2676 		struct drm_printer p = drm_seq_file_printer(m);
2677 
2678 		print_intel_runtime_pm_wakeref(dev_priv, &p);
2679 	}
2680 
2681 	return 0;
2682 }
2683 
2684 static int i915_power_domain_info(struct seq_file *m, void *unused)
2685 {
2686 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2687 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2688 	int i;
2689 
2690 	mutex_lock(&power_domains->lock);
2691 
2692 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2693 	for (i = 0; i < power_domains->power_well_count; i++) {
2694 		struct i915_power_well *power_well;
2695 		enum intel_display_power_domain power_domain;
2696 
2697 		power_well = &power_domains->power_wells[i];
2698 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2699 			   power_well->count);
2700 
2701 		for_each_power_domain(power_domain, power_well->desc->domains)
2702 			seq_printf(m, "  %-23s %d\n",
2703 				 intel_display_power_domain_str(power_domain),
2704 				 power_domains->domain_use_count[power_domain]);
2705 	}
2706 
2707 	mutex_unlock(&power_domains->lock);
2708 
2709 	return 0;
2710 }
2711 
2712 static int i915_dmc_info(struct seq_file *m, void *unused)
2713 {
2714 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2715 	intel_wakeref_t wakeref;
2716 	struct intel_csr *csr;
2717 
2718 	if (!HAS_CSR(dev_priv))
2719 		return -ENODEV;
2720 
2721 	csr = &dev_priv->csr;
2722 
2723 	wakeref = intel_runtime_pm_get(dev_priv);
2724 
2725 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2726 	seq_printf(m, "path: %s\n", csr->fw_path);
2727 
2728 	if (!csr->dmc_payload)
2729 		goto out;
2730 
2731 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2732 		   CSR_VERSION_MINOR(csr->version));
2733 
2734 	if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2735 		goto out;
2736 
2737 	seq_printf(m, "DC3 -> DC5 count: %d\n",
2738 		   I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2739 						    SKL_CSR_DC3_DC5_COUNT));
2740 	if (!IS_GEN9_LP(dev_priv))
2741 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2742 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2743 
2744 out:
2745 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2746 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2747 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2748 
2749 	intel_runtime_pm_put(dev_priv, wakeref);
2750 
2751 	return 0;
2752 }
2753 
2754 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2755 				 struct drm_display_mode *mode)
2756 {
2757 	int i;
2758 
2759 	for (i = 0; i < tabs; i++)
2760 		seq_putc(m, '\t');
2761 
2762 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2763 }
2764 
2765 static void intel_encoder_info(struct seq_file *m,
2766 			       struct intel_crtc *intel_crtc,
2767 			       struct intel_encoder *intel_encoder)
2768 {
2769 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2770 	struct drm_device *dev = &dev_priv->drm;
2771 	struct drm_crtc *crtc = &intel_crtc->base;
2772 	struct intel_connector *intel_connector;
2773 	struct drm_encoder *encoder;
2774 
2775 	encoder = &intel_encoder->base;
2776 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2777 		   encoder->base.id, encoder->name);
2778 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2779 		struct drm_connector *connector = &intel_connector->base;
2780 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2781 			   connector->base.id,
2782 			   connector->name,
2783 			   drm_get_connector_status_name(connector->status));
2784 		if (connector->status == connector_status_connected) {
2785 			struct drm_display_mode *mode = &crtc->mode;
2786 			seq_printf(m, ", mode:\n");
2787 			intel_seq_print_mode(m, 2, mode);
2788 		} else {
2789 			seq_putc(m, '\n');
2790 		}
2791 	}
2792 }
2793 
2794 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2795 {
2796 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2797 	struct drm_device *dev = &dev_priv->drm;
2798 	struct drm_crtc *crtc = &intel_crtc->base;
2799 	struct intel_encoder *intel_encoder;
2800 	struct drm_plane_state *plane_state = crtc->primary->state;
2801 	struct drm_framebuffer *fb = plane_state->fb;
2802 
2803 	if (fb)
2804 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2805 			   fb->base.id, plane_state->src_x >> 16,
2806 			   plane_state->src_y >> 16, fb->width, fb->height);
2807 	else
2808 		seq_puts(m, "\tprimary plane disabled\n");
2809 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2810 		intel_encoder_info(m, intel_crtc, intel_encoder);
2811 }
2812 
2813 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2814 {
2815 	struct drm_display_mode *mode = panel->fixed_mode;
2816 
2817 	seq_printf(m, "\tfixed mode:\n");
2818 	intel_seq_print_mode(m, 2, mode);
2819 }
2820 
2821 static void intel_dp_info(struct seq_file *m,
2822 			  struct intel_connector *intel_connector)
2823 {
2824 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2825 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2826 
2827 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2828 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2829 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2830 		intel_panel_info(m, &intel_connector->panel);
2831 
2832 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2833 				&intel_dp->aux);
2834 }
2835 
2836 static void intel_dp_mst_info(struct seq_file *m,
2837 			  struct intel_connector *intel_connector)
2838 {
2839 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2840 	struct intel_dp_mst_encoder *intel_mst =
2841 		enc_to_mst(&intel_encoder->base);
2842 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2843 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2844 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2845 					intel_connector->port);
2846 
2847 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2848 }
2849 
2850 static void intel_hdmi_info(struct seq_file *m,
2851 			    struct intel_connector *intel_connector)
2852 {
2853 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2854 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2855 
2856 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2857 }
2858 
2859 static void intel_lvds_info(struct seq_file *m,
2860 			    struct intel_connector *intel_connector)
2861 {
2862 	intel_panel_info(m, &intel_connector->panel);
2863 }
2864 
2865 static void intel_connector_info(struct seq_file *m,
2866 				 struct drm_connector *connector)
2867 {
2868 	struct intel_connector *intel_connector = to_intel_connector(connector);
2869 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2870 	struct drm_display_mode *mode;
2871 
2872 	seq_printf(m, "connector %d: type %s, status: %s\n",
2873 		   connector->base.id, connector->name,
2874 		   drm_get_connector_status_name(connector->status));
2875 
2876 	if (connector->status == connector_status_disconnected)
2877 		return;
2878 
2879 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2880 		   connector->display_info.width_mm,
2881 		   connector->display_info.height_mm);
2882 	seq_printf(m, "\tsubpixel order: %s\n",
2883 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2884 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2885 
2886 	if (!intel_encoder)
2887 		return;
2888 
2889 	switch (connector->connector_type) {
2890 	case DRM_MODE_CONNECTOR_DisplayPort:
2891 	case DRM_MODE_CONNECTOR_eDP:
2892 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2893 			intel_dp_mst_info(m, intel_connector);
2894 		else
2895 			intel_dp_info(m, intel_connector);
2896 		break;
2897 	case DRM_MODE_CONNECTOR_LVDS:
2898 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2899 			intel_lvds_info(m, intel_connector);
2900 		break;
2901 	case DRM_MODE_CONNECTOR_HDMIA:
2902 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2903 		    intel_encoder->type == INTEL_OUTPUT_DDI)
2904 			intel_hdmi_info(m, intel_connector);
2905 		break;
2906 	default:
2907 		break;
2908 	}
2909 
2910 	seq_printf(m, "\tmodes:\n");
2911 	list_for_each_entry(mode, &connector->modes, head)
2912 		intel_seq_print_mode(m, 2, mode);
2913 }
2914 
2915 static const char *plane_type(enum drm_plane_type type)
2916 {
2917 	switch (type) {
2918 	case DRM_PLANE_TYPE_OVERLAY:
2919 		return "OVL";
2920 	case DRM_PLANE_TYPE_PRIMARY:
2921 		return "PRI";
2922 	case DRM_PLANE_TYPE_CURSOR:
2923 		return "CUR";
2924 	/*
2925 	 * Deliberately omitting default: to generate compiler warnings
2926 	 * when a new drm_plane_type gets added.
2927 	 */
2928 	}
2929 
2930 	return "unknown";
2931 }
2932 
2933 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2934 {
2935 	/*
2936 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2937 	 * will print them all to visualize if the values are misused
2938 	 */
2939 	snprintf(buf, bufsize,
2940 		 "%s%s%s%s%s%s(0x%08x)",
2941 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2942 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2943 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2944 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2945 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2946 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2947 		 rotation);
2948 }
2949 
2950 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2951 {
2952 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2953 	struct drm_device *dev = &dev_priv->drm;
2954 	struct intel_plane *intel_plane;
2955 
2956 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2957 		struct drm_plane_state *state;
2958 		struct drm_plane *plane = &intel_plane->base;
2959 		struct drm_format_name_buf format_name;
2960 		char rot_str[48];
2961 
2962 		if (!plane->state) {
2963 			seq_puts(m, "plane->state is NULL!\n");
2964 			continue;
2965 		}
2966 
2967 		state = plane->state;
2968 
2969 		if (state->fb) {
2970 			drm_get_format_name(state->fb->format->format,
2971 					    &format_name);
2972 		} else {
2973 			sprintf(format_name.str, "N/A");
2974 		}
2975 
2976 		plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2977 
2978 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2979 			   plane->base.id,
2980 			   plane_type(intel_plane->base.type),
2981 			   state->crtc_x, state->crtc_y,
2982 			   state->crtc_w, state->crtc_h,
2983 			   (state->src_x >> 16),
2984 			   ((state->src_x & 0xffff) * 15625) >> 10,
2985 			   (state->src_y >> 16),
2986 			   ((state->src_y & 0xffff) * 15625) >> 10,
2987 			   (state->src_w >> 16),
2988 			   ((state->src_w & 0xffff) * 15625) >> 10,
2989 			   (state->src_h >> 16),
2990 			   ((state->src_h & 0xffff) * 15625) >> 10,
2991 			   format_name.str,
2992 			   rot_str);
2993 	}
2994 }
2995 
2996 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2997 {
2998 	struct intel_crtc_state *pipe_config;
2999 	int num_scalers = intel_crtc->num_scalers;
3000 	int i;
3001 
3002 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3003 
3004 	/* Not all platformas have a scaler */
3005 	if (num_scalers) {
3006 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3007 			   num_scalers,
3008 			   pipe_config->scaler_state.scaler_users,
3009 			   pipe_config->scaler_state.scaler_id);
3010 
3011 		for (i = 0; i < num_scalers; i++) {
3012 			struct intel_scaler *sc =
3013 					&pipe_config->scaler_state.scalers[i];
3014 
3015 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3016 				   i, yesno(sc->in_use), sc->mode);
3017 		}
3018 		seq_puts(m, "\n");
3019 	} else {
3020 		seq_puts(m, "\tNo scalers available on this platform\n");
3021 	}
3022 }
3023 
3024 static int i915_display_info(struct seq_file *m, void *unused)
3025 {
3026 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3027 	struct drm_device *dev = &dev_priv->drm;
3028 	struct intel_crtc *crtc;
3029 	struct drm_connector *connector;
3030 	struct drm_connector_list_iter conn_iter;
3031 	intel_wakeref_t wakeref;
3032 
3033 	wakeref = intel_runtime_pm_get(dev_priv);
3034 
3035 	seq_printf(m, "CRTC info\n");
3036 	seq_printf(m, "---------\n");
3037 	for_each_intel_crtc(dev, crtc) {
3038 		struct intel_crtc_state *pipe_config;
3039 
3040 		drm_modeset_lock(&crtc->base.mutex, NULL);
3041 		pipe_config = to_intel_crtc_state(crtc->base.state);
3042 
3043 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3044 			   crtc->base.base.id, pipe_name(crtc->pipe),
3045 			   yesno(pipe_config->base.active),
3046 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3047 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3048 
3049 		if (pipe_config->base.active) {
3050 			struct intel_plane *cursor =
3051 				to_intel_plane(crtc->base.cursor);
3052 
3053 			intel_crtc_info(m, crtc);
3054 
3055 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3056 				   yesno(cursor->base.state->visible),
3057 				   cursor->base.state->crtc_x,
3058 				   cursor->base.state->crtc_y,
3059 				   cursor->base.state->crtc_w,
3060 				   cursor->base.state->crtc_h,
3061 				   cursor->cursor.base);
3062 			intel_scaler_info(m, crtc);
3063 			intel_plane_info(m, crtc);
3064 		}
3065 
3066 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3067 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3068 			   yesno(!crtc->pch_fifo_underrun_disabled));
3069 		drm_modeset_unlock(&crtc->base.mutex);
3070 	}
3071 
3072 	seq_printf(m, "\n");
3073 	seq_printf(m, "Connector info\n");
3074 	seq_printf(m, "--------------\n");
3075 	mutex_lock(&dev->mode_config.mutex);
3076 	drm_connector_list_iter_begin(dev, &conn_iter);
3077 	drm_for_each_connector_iter(connector, &conn_iter)
3078 		intel_connector_info(m, connector);
3079 	drm_connector_list_iter_end(&conn_iter);
3080 	mutex_unlock(&dev->mode_config.mutex);
3081 
3082 	intel_runtime_pm_put(dev_priv, wakeref);
3083 
3084 	return 0;
3085 }
3086 
3087 static int i915_engine_info(struct seq_file *m, void *unused)
3088 {
3089 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3090 	struct intel_engine_cs *engine;
3091 	intel_wakeref_t wakeref;
3092 	enum intel_engine_id id;
3093 	struct drm_printer p;
3094 
3095 	wakeref = intel_runtime_pm_get(dev_priv);
3096 
3097 	seq_printf(m, "GT awake? %s [%d]\n",
3098 		   yesno(dev_priv->gt.awake),
3099 		   atomic_read(&dev_priv->gt.wakeref.count));
3100 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3101 		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3102 
3103 	p = drm_seq_file_printer(m);
3104 	for_each_engine(engine, dev_priv, id)
3105 		intel_engine_dump(engine, &p, "%s\n", engine->name);
3106 
3107 	intel_runtime_pm_put(dev_priv, wakeref);
3108 
3109 	return 0;
3110 }
3111 
3112 static int i915_rcs_topology(struct seq_file *m, void *unused)
3113 {
3114 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3115 	struct drm_printer p = drm_seq_file_printer(m);
3116 
3117 	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3118 
3119 	return 0;
3120 }
3121 
3122 static int i915_shrinker_info(struct seq_file *m, void *unused)
3123 {
3124 	struct drm_i915_private *i915 = node_to_i915(m->private);
3125 
3126 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3127 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3128 
3129 	return 0;
3130 }
3131 
3132 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3133 {
3134 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3135 	struct drm_device *dev = &dev_priv->drm;
3136 	int i;
3137 
3138 	drm_modeset_lock_all(dev);
3139 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3140 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3141 
3142 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3143 			   pll->info->id);
3144 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3145 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3146 		seq_printf(m, " tracked hardware state:\n");
3147 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3148 		seq_printf(m, " dpll_md: 0x%08x\n",
3149 			   pll->state.hw_state.dpll_md);
3150 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3151 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3152 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3153 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3154 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3155 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3156 			   pll->state.hw_state.mg_refclkin_ctl);
3157 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3158 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
3159 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3160 			   pll->state.hw_state.mg_clktop2_hsclkctl);
3161 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
3162 			   pll->state.hw_state.mg_pll_div0);
3163 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
3164 			   pll->state.hw_state.mg_pll_div1);
3165 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
3166 			   pll->state.hw_state.mg_pll_lf);
3167 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3168 			   pll->state.hw_state.mg_pll_frac_lock);
3169 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3170 			   pll->state.hw_state.mg_pll_ssc);
3171 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
3172 			   pll->state.hw_state.mg_pll_bias);
3173 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3174 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
3175 	}
3176 	drm_modeset_unlock_all(dev);
3177 
3178 	return 0;
3179 }
3180 
3181 static int i915_wa_registers(struct seq_file *m, void *unused)
3182 {
3183 	struct drm_i915_private *i915 = node_to_i915(m->private);
3184 	const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
3185 	struct i915_wa *wa;
3186 	unsigned int i;
3187 
3188 	seq_printf(m, "Workarounds applied: %u\n", wal->count);
3189 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3190 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3191 			   i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3192 
3193 	return 0;
3194 }
3195 
3196 static int i915_ipc_status_show(struct seq_file *m, void *data)
3197 {
3198 	struct drm_i915_private *dev_priv = m->private;
3199 
3200 	seq_printf(m, "Isochronous Priority Control: %s\n",
3201 			yesno(dev_priv->ipc_enabled));
3202 	return 0;
3203 }
3204 
3205 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3206 {
3207 	struct drm_i915_private *dev_priv = inode->i_private;
3208 
3209 	if (!HAS_IPC(dev_priv))
3210 		return -ENODEV;
3211 
3212 	return single_open(file, i915_ipc_status_show, dev_priv);
3213 }
3214 
3215 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3216 				     size_t len, loff_t *offp)
3217 {
3218 	struct seq_file *m = file->private_data;
3219 	struct drm_i915_private *dev_priv = m->private;
3220 	intel_wakeref_t wakeref;
3221 	bool enable;
3222 	int ret;
3223 
3224 	ret = kstrtobool_from_user(ubuf, len, &enable);
3225 	if (ret < 0)
3226 		return ret;
3227 
3228 	with_intel_runtime_pm(dev_priv, wakeref) {
3229 		if (!dev_priv->ipc_enabled && enable)
3230 			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3231 		dev_priv->wm.distrust_bios_wm = true;
3232 		dev_priv->ipc_enabled = enable;
3233 		intel_enable_ipc(dev_priv);
3234 	}
3235 
3236 	return len;
3237 }
3238 
3239 static const struct file_operations i915_ipc_status_fops = {
3240 	.owner = THIS_MODULE,
3241 	.open = i915_ipc_status_open,
3242 	.read = seq_read,
3243 	.llseek = seq_lseek,
3244 	.release = single_release,
3245 	.write = i915_ipc_status_write
3246 };
3247 
3248 static int i915_ddb_info(struct seq_file *m, void *unused)
3249 {
3250 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3251 	struct drm_device *dev = &dev_priv->drm;
3252 	struct skl_ddb_entry *entry;
3253 	struct intel_crtc *crtc;
3254 
3255 	if (INTEL_GEN(dev_priv) < 9)
3256 		return -ENODEV;
3257 
3258 	drm_modeset_lock_all(dev);
3259 
3260 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3261 
3262 	for_each_intel_crtc(&dev_priv->drm, crtc) {
3263 		struct intel_crtc_state *crtc_state =
3264 			to_intel_crtc_state(crtc->base.state);
3265 		enum pipe pipe = crtc->pipe;
3266 		enum plane_id plane_id;
3267 
3268 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3269 
3270 		for_each_plane_id_on_crtc(crtc, plane_id) {
3271 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3272 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3273 				   entry->start, entry->end,
3274 				   skl_ddb_entry_size(entry));
3275 		}
3276 
3277 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3278 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3279 			   entry->end, skl_ddb_entry_size(entry));
3280 	}
3281 
3282 	drm_modeset_unlock_all(dev);
3283 
3284 	return 0;
3285 }
3286 
3287 static void drrs_status_per_crtc(struct seq_file *m,
3288 				 struct drm_device *dev,
3289 				 struct intel_crtc *intel_crtc)
3290 {
3291 	struct drm_i915_private *dev_priv = to_i915(dev);
3292 	struct i915_drrs *drrs = &dev_priv->drrs;
3293 	int vrefresh = 0;
3294 	struct drm_connector *connector;
3295 	struct drm_connector_list_iter conn_iter;
3296 
3297 	drm_connector_list_iter_begin(dev, &conn_iter);
3298 	drm_for_each_connector_iter(connector, &conn_iter) {
3299 		if (connector->state->crtc != &intel_crtc->base)
3300 			continue;
3301 
3302 		seq_printf(m, "%s:\n", connector->name);
3303 	}
3304 	drm_connector_list_iter_end(&conn_iter);
3305 
3306 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3307 		seq_puts(m, "\tVBT: DRRS_type: Static");
3308 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3309 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3310 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3311 		seq_puts(m, "\tVBT: DRRS_type: None");
3312 	else
3313 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3314 
3315 	seq_puts(m, "\n\n");
3316 
3317 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3318 		struct intel_panel *panel;
3319 
3320 		mutex_lock(&drrs->mutex);
3321 		/* DRRS Supported */
3322 		seq_puts(m, "\tDRRS Supported: Yes\n");
3323 
3324 		/* disable_drrs() will make drrs->dp NULL */
3325 		if (!drrs->dp) {
3326 			seq_puts(m, "Idleness DRRS: Disabled\n");
3327 			if (dev_priv->psr.enabled)
3328 				seq_puts(m,
3329 				"\tAs PSR is enabled, DRRS is not enabled\n");
3330 			mutex_unlock(&drrs->mutex);
3331 			return;
3332 		}
3333 
3334 		panel = &drrs->dp->attached_connector->panel;
3335 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3336 					drrs->busy_frontbuffer_bits);
3337 
3338 		seq_puts(m, "\n\t\t");
3339 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3340 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3341 			vrefresh = panel->fixed_mode->vrefresh;
3342 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3343 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3344 			vrefresh = panel->downclock_mode->vrefresh;
3345 		} else {
3346 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3347 						drrs->refresh_rate_type);
3348 			mutex_unlock(&drrs->mutex);
3349 			return;
3350 		}
3351 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3352 
3353 		seq_puts(m, "\n\t\t");
3354 		mutex_unlock(&drrs->mutex);
3355 	} else {
3356 		/* DRRS not supported. Print the VBT parameter*/
3357 		seq_puts(m, "\tDRRS Supported : No");
3358 	}
3359 	seq_puts(m, "\n");
3360 }
3361 
3362 static int i915_drrs_status(struct seq_file *m, void *unused)
3363 {
3364 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3365 	struct drm_device *dev = &dev_priv->drm;
3366 	struct intel_crtc *intel_crtc;
3367 	int active_crtc_cnt = 0;
3368 
3369 	drm_modeset_lock_all(dev);
3370 	for_each_intel_crtc(dev, intel_crtc) {
3371 		if (intel_crtc->base.state->active) {
3372 			active_crtc_cnt++;
3373 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3374 
3375 			drrs_status_per_crtc(m, dev, intel_crtc);
3376 		}
3377 	}
3378 	drm_modeset_unlock_all(dev);
3379 
3380 	if (!active_crtc_cnt)
3381 		seq_puts(m, "No active crtc found\n");
3382 
3383 	return 0;
3384 }
3385 
3386 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3387 {
3388 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3389 	struct drm_device *dev = &dev_priv->drm;
3390 	struct intel_encoder *intel_encoder;
3391 	struct intel_digital_port *intel_dig_port;
3392 	struct drm_connector *connector;
3393 	struct drm_connector_list_iter conn_iter;
3394 
3395 	drm_connector_list_iter_begin(dev, &conn_iter);
3396 	drm_for_each_connector_iter(connector, &conn_iter) {
3397 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3398 			continue;
3399 
3400 		intel_encoder = intel_attached_encoder(connector);
3401 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3402 			continue;
3403 
3404 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3405 		if (!intel_dig_port->dp.can_mst)
3406 			continue;
3407 
3408 		seq_printf(m, "MST Source Port %c\n",
3409 			   port_name(intel_dig_port->base.port));
3410 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3411 	}
3412 	drm_connector_list_iter_end(&conn_iter);
3413 
3414 	return 0;
3415 }
3416 
3417 static ssize_t i915_displayport_test_active_write(struct file *file,
3418 						  const char __user *ubuf,
3419 						  size_t len, loff_t *offp)
3420 {
3421 	char *input_buffer;
3422 	int status = 0;
3423 	struct drm_device *dev;
3424 	struct drm_connector *connector;
3425 	struct drm_connector_list_iter conn_iter;
3426 	struct intel_dp *intel_dp;
3427 	int val = 0;
3428 
3429 	dev = ((struct seq_file *)file->private_data)->private;
3430 
3431 	if (len == 0)
3432 		return 0;
3433 
3434 	input_buffer = memdup_user_nul(ubuf, len);
3435 	if (IS_ERR(input_buffer))
3436 		return PTR_ERR(input_buffer);
3437 
3438 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3439 
3440 	drm_connector_list_iter_begin(dev, &conn_iter);
3441 	drm_for_each_connector_iter(connector, &conn_iter) {
3442 		struct intel_encoder *encoder;
3443 
3444 		if (connector->connector_type !=
3445 		    DRM_MODE_CONNECTOR_DisplayPort)
3446 			continue;
3447 
3448 		encoder = to_intel_encoder(connector->encoder);
3449 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3450 			continue;
3451 
3452 		if (encoder && connector->status == connector_status_connected) {
3453 			intel_dp = enc_to_intel_dp(&encoder->base);
3454 			status = kstrtoint(input_buffer, 10, &val);
3455 			if (status < 0)
3456 				break;
3457 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3458 			/* To prevent erroneous activation of the compliance
3459 			 * testing code, only accept an actual value of 1 here
3460 			 */
3461 			if (val == 1)
3462 				intel_dp->compliance.test_active = 1;
3463 			else
3464 				intel_dp->compliance.test_active = 0;
3465 		}
3466 	}
3467 	drm_connector_list_iter_end(&conn_iter);
3468 	kfree(input_buffer);
3469 	if (status < 0)
3470 		return status;
3471 
3472 	*offp += len;
3473 	return len;
3474 }
3475 
3476 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3477 {
3478 	struct drm_i915_private *dev_priv = m->private;
3479 	struct drm_device *dev = &dev_priv->drm;
3480 	struct drm_connector *connector;
3481 	struct drm_connector_list_iter conn_iter;
3482 	struct intel_dp *intel_dp;
3483 
3484 	drm_connector_list_iter_begin(dev, &conn_iter);
3485 	drm_for_each_connector_iter(connector, &conn_iter) {
3486 		struct intel_encoder *encoder;
3487 
3488 		if (connector->connector_type !=
3489 		    DRM_MODE_CONNECTOR_DisplayPort)
3490 			continue;
3491 
3492 		encoder = to_intel_encoder(connector->encoder);
3493 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3494 			continue;
3495 
3496 		if (encoder && connector->status == connector_status_connected) {
3497 			intel_dp = enc_to_intel_dp(&encoder->base);
3498 			if (intel_dp->compliance.test_active)
3499 				seq_puts(m, "1");
3500 			else
3501 				seq_puts(m, "0");
3502 		} else
3503 			seq_puts(m, "0");
3504 	}
3505 	drm_connector_list_iter_end(&conn_iter);
3506 
3507 	return 0;
3508 }
3509 
3510 static int i915_displayport_test_active_open(struct inode *inode,
3511 					     struct file *file)
3512 {
3513 	return single_open(file, i915_displayport_test_active_show,
3514 			   inode->i_private);
3515 }
3516 
3517 static const struct file_operations i915_displayport_test_active_fops = {
3518 	.owner = THIS_MODULE,
3519 	.open = i915_displayport_test_active_open,
3520 	.read = seq_read,
3521 	.llseek = seq_lseek,
3522 	.release = single_release,
3523 	.write = i915_displayport_test_active_write
3524 };
3525 
3526 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3527 {
3528 	struct drm_i915_private *dev_priv = m->private;
3529 	struct drm_device *dev = &dev_priv->drm;
3530 	struct drm_connector *connector;
3531 	struct drm_connector_list_iter conn_iter;
3532 	struct intel_dp *intel_dp;
3533 
3534 	drm_connector_list_iter_begin(dev, &conn_iter);
3535 	drm_for_each_connector_iter(connector, &conn_iter) {
3536 		struct intel_encoder *encoder;
3537 
3538 		if (connector->connector_type !=
3539 		    DRM_MODE_CONNECTOR_DisplayPort)
3540 			continue;
3541 
3542 		encoder = to_intel_encoder(connector->encoder);
3543 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3544 			continue;
3545 
3546 		if (encoder && connector->status == connector_status_connected) {
3547 			intel_dp = enc_to_intel_dp(&encoder->base);
3548 			if (intel_dp->compliance.test_type ==
3549 			    DP_TEST_LINK_EDID_READ)
3550 				seq_printf(m, "%lx",
3551 					   intel_dp->compliance.test_data.edid);
3552 			else if (intel_dp->compliance.test_type ==
3553 				 DP_TEST_LINK_VIDEO_PATTERN) {
3554 				seq_printf(m, "hdisplay: %d\n",
3555 					   intel_dp->compliance.test_data.hdisplay);
3556 				seq_printf(m, "vdisplay: %d\n",
3557 					   intel_dp->compliance.test_data.vdisplay);
3558 				seq_printf(m, "bpc: %u\n",
3559 					   intel_dp->compliance.test_data.bpc);
3560 			}
3561 		} else
3562 			seq_puts(m, "0");
3563 	}
3564 	drm_connector_list_iter_end(&conn_iter);
3565 
3566 	return 0;
3567 }
3568 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3569 
3570 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3571 {
3572 	struct drm_i915_private *dev_priv = m->private;
3573 	struct drm_device *dev = &dev_priv->drm;
3574 	struct drm_connector *connector;
3575 	struct drm_connector_list_iter conn_iter;
3576 	struct intel_dp *intel_dp;
3577 
3578 	drm_connector_list_iter_begin(dev, &conn_iter);
3579 	drm_for_each_connector_iter(connector, &conn_iter) {
3580 		struct intel_encoder *encoder;
3581 
3582 		if (connector->connector_type !=
3583 		    DRM_MODE_CONNECTOR_DisplayPort)
3584 			continue;
3585 
3586 		encoder = to_intel_encoder(connector->encoder);
3587 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3588 			continue;
3589 
3590 		if (encoder && connector->status == connector_status_connected) {
3591 			intel_dp = enc_to_intel_dp(&encoder->base);
3592 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3593 		} else
3594 			seq_puts(m, "0");
3595 	}
3596 	drm_connector_list_iter_end(&conn_iter);
3597 
3598 	return 0;
3599 }
3600 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3601 
3602 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3603 {
3604 	struct drm_i915_private *dev_priv = m->private;
3605 	struct drm_device *dev = &dev_priv->drm;
3606 	int level;
3607 	int num_levels;
3608 
3609 	if (IS_CHERRYVIEW(dev_priv))
3610 		num_levels = 3;
3611 	else if (IS_VALLEYVIEW(dev_priv))
3612 		num_levels = 1;
3613 	else if (IS_G4X(dev_priv))
3614 		num_levels = 3;
3615 	else
3616 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3617 
3618 	drm_modeset_lock_all(dev);
3619 
3620 	for (level = 0; level < num_levels; level++) {
3621 		unsigned int latency = wm[level];
3622 
3623 		/*
3624 		 * - WM1+ latency values in 0.5us units
3625 		 * - latencies are in us on gen9/vlv/chv
3626 		 */
3627 		if (INTEL_GEN(dev_priv) >= 9 ||
3628 		    IS_VALLEYVIEW(dev_priv) ||
3629 		    IS_CHERRYVIEW(dev_priv) ||
3630 		    IS_G4X(dev_priv))
3631 			latency *= 10;
3632 		else if (level > 0)
3633 			latency *= 5;
3634 
3635 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3636 			   level, wm[level], latency / 10, latency % 10);
3637 	}
3638 
3639 	drm_modeset_unlock_all(dev);
3640 }
3641 
3642 static int pri_wm_latency_show(struct seq_file *m, void *data)
3643 {
3644 	struct drm_i915_private *dev_priv = m->private;
3645 	const u16 *latencies;
3646 
3647 	if (INTEL_GEN(dev_priv) >= 9)
3648 		latencies = dev_priv->wm.skl_latency;
3649 	else
3650 		latencies = dev_priv->wm.pri_latency;
3651 
3652 	wm_latency_show(m, latencies);
3653 
3654 	return 0;
3655 }
3656 
3657 static int spr_wm_latency_show(struct seq_file *m, void *data)
3658 {
3659 	struct drm_i915_private *dev_priv = m->private;
3660 	const u16 *latencies;
3661 
3662 	if (INTEL_GEN(dev_priv) >= 9)
3663 		latencies = dev_priv->wm.skl_latency;
3664 	else
3665 		latencies = dev_priv->wm.spr_latency;
3666 
3667 	wm_latency_show(m, latencies);
3668 
3669 	return 0;
3670 }
3671 
3672 static int cur_wm_latency_show(struct seq_file *m, void *data)
3673 {
3674 	struct drm_i915_private *dev_priv = m->private;
3675 	const u16 *latencies;
3676 
3677 	if (INTEL_GEN(dev_priv) >= 9)
3678 		latencies = dev_priv->wm.skl_latency;
3679 	else
3680 		latencies = dev_priv->wm.cur_latency;
3681 
3682 	wm_latency_show(m, latencies);
3683 
3684 	return 0;
3685 }
3686 
3687 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3688 {
3689 	struct drm_i915_private *dev_priv = inode->i_private;
3690 
3691 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3692 		return -ENODEV;
3693 
3694 	return single_open(file, pri_wm_latency_show, dev_priv);
3695 }
3696 
3697 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3698 {
3699 	struct drm_i915_private *dev_priv = inode->i_private;
3700 
3701 	if (HAS_GMCH(dev_priv))
3702 		return -ENODEV;
3703 
3704 	return single_open(file, spr_wm_latency_show, dev_priv);
3705 }
3706 
3707 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3708 {
3709 	struct drm_i915_private *dev_priv = inode->i_private;
3710 
3711 	if (HAS_GMCH(dev_priv))
3712 		return -ENODEV;
3713 
3714 	return single_open(file, cur_wm_latency_show, dev_priv);
3715 }
3716 
3717 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3718 				size_t len, loff_t *offp, u16 wm[8])
3719 {
3720 	struct seq_file *m = file->private_data;
3721 	struct drm_i915_private *dev_priv = m->private;
3722 	struct drm_device *dev = &dev_priv->drm;
3723 	u16 new[8] = { 0 };
3724 	int num_levels;
3725 	int level;
3726 	int ret;
3727 	char tmp[32];
3728 
3729 	if (IS_CHERRYVIEW(dev_priv))
3730 		num_levels = 3;
3731 	else if (IS_VALLEYVIEW(dev_priv))
3732 		num_levels = 1;
3733 	else if (IS_G4X(dev_priv))
3734 		num_levels = 3;
3735 	else
3736 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3737 
3738 	if (len >= sizeof(tmp))
3739 		return -EINVAL;
3740 
3741 	if (copy_from_user(tmp, ubuf, len))
3742 		return -EFAULT;
3743 
3744 	tmp[len] = '\0';
3745 
3746 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3747 		     &new[0], &new[1], &new[2], &new[3],
3748 		     &new[4], &new[5], &new[6], &new[7]);
3749 	if (ret != num_levels)
3750 		return -EINVAL;
3751 
3752 	drm_modeset_lock_all(dev);
3753 
3754 	for (level = 0; level < num_levels; level++)
3755 		wm[level] = new[level];
3756 
3757 	drm_modeset_unlock_all(dev);
3758 
3759 	return len;
3760 }
3761 
3762 
3763 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3764 				    size_t len, loff_t *offp)
3765 {
3766 	struct seq_file *m = file->private_data;
3767 	struct drm_i915_private *dev_priv = m->private;
3768 	u16 *latencies;
3769 
3770 	if (INTEL_GEN(dev_priv) >= 9)
3771 		latencies = dev_priv->wm.skl_latency;
3772 	else
3773 		latencies = dev_priv->wm.pri_latency;
3774 
3775 	return wm_latency_write(file, ubuf, len, offp, latencies);
3776 }
3777 
3778 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3779 				    size_t len, loff_t *offp)
3780 {
3781 	struct seq_file *m = file->private_data;
3782 	struct drm_i915_private *dev_priv = m->private;
3783 	u16 *latencies;
3784 
3785 	if (INTEL_GEN(dev_priv) >= 9)
3786 		latencies = dev_priv->wm.skl_latency;
3787 	else
3788 		latencies = dev_priv->wm.spr_latency;
3789 
3790 	return wm_latency_write(file, ubuf, len, offp, latencies);
3791 }
3792 
3793 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3794 				    size_t len, loff_t *offp)
3795 {
3796 	struct seq_file *m = file->private_data;
3797 	struct drm_i915_private *dev_priv = m->private;
3798 	u16 *latencies;
3799 
3800 	if (INTEL_GEN(dev_priv) >= 9)
3801 		latencies = dev_priv->wm.skl_latency;
3802 	else
3803 		latencies = dev_priv->wm.cur_latency;
3804 
3805 	return wm_latency_write(file, ubuf, len, offp, latencies);
3806 }
3807 
3808 static const struct file_operations i915_pri_wm_latency_fops = {
3809 	.owner = THIS_MODULE,
3810 	.open = pri_wm_latency_open,
3811 	.read = seq_read,
3812 	.llseek = seq_lseek,
3813 	.release = single_release,
3814 	.write = pri_wm_latency_write
3815 };
3816 
3817 static const struct file_operations i915_spr_wm_latency_fops = {
3818 	.owner = THIS_MODULE,
3819 	.open = spr_wm_latency_open,
3820 	.read = seq_read,
3821 	.llseek = seq_lseek,
3822 	.release = single_release,
3823 	.write = spr_wm_latency_write
3824 };
3825 
3826 static const struct file_operations i915_cur_wm_latency_fops = {
3827 	.owner = THIS_MODULE,
3828 	.open = cur_wm_latency_open,
3829 	.read = seq_read,
3830 	.llseek = seq_lseek,
3831 	.release = single_release,
3832 	.write = cur_wm_latency_write
3833 };
3834 
3835 static int
3836 i915_wedged_get(void *data, u64 *val)
3837 {
3838 	int ret = i915_terminally_wedged(data);
3839 
3840 	switch (ret) {
3841 	case -EIO:
3842 		*val = 1;
3843 		return 0;
3844 	case 0:
3845 		*val = 0;
3846 		return 0;
3847 	default:
3848 		return ret;
3849 	}
3850 }
3851 
3852 static int
3853 i915_wedged_set(void *data, u64 val)
3854 {
3855 	struct drm_i915_private *i915 = data;
3856 
3857 	/* Flush any previous reset before applying for a new one */
3858 	wait_event(i915->gpu_error.reset_queue,
3859 		   !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
3860 
3861 	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3862 			  "Manually set wedged engine mask = %llx", val);
3863 	return 0;
3864 }
3865 
3866 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3867 			i915_wedged_get, i915_wedged_set,
3868 			"%llu\n");
3869 
3870 #define DROP_UNBOUND	BIT(0)
3871 #define DROP_BOUND	BIT(1)
3872 #define DROP_RETIRE	BIT(2)
3873 #define DROP_ACTIVE	BIT(3)
3874 #define DROP_FREED	BIT(4)
3875 #define DROP_SHRINK_ALL	BIT(5)
3876 #define DROP_IDLE	BIT(6)
3877 #define DROP_RESET_ACTIVE	BIT(7)
3878 #define DROP_RESET_SEQNO	BIT(8)
3879 #define DROP_ALL (DROP_UNBOUND	| \
3880 		  DROP_BOUND	| \
3881 		  DROP_RETIRE	| \
3882 		  DROP_ACTIVE	| \
3883 		  DROP_FREED	| \
3884 		  DROP_SHRINK_ALL |\
3885 		  DROP_IDLE	| \
3886 		  DROP_RESET_ACTIVE | \
3887 		  DROP_RESET_SEQNO)
3888 static int
3889 i915_drop_caches_get(void *data, u64 *val)
3890 {
3891 	*val = DROP_ALL;
3892 
3893 	return 0;
3894 }
3895 
3896 static int
3897 i915_drop_caches_set(void *data, u64 val)
3898 {
3899 	struct drm_i915_private *i915 = data;
3900 
3901 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3902 		  val, val & DROP_ALL);
3903 
3904 	if (val & DROP_RESET_ACTIVE &&
3905 	    wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3906 		i915_gem_set_wedged(i915);
3907 
3908 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
3909 	 * on ioctls on -EAGAIN. */
3910 	if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3911 		int ret;
3912 
3913 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3914 		if (ret)
3915 			return ret;
3916 
3917 		/*
3918 		 * To finish the flush of the idle_worker, we must complete
3919 		 * the switch-to-kernel-context, which requires a double
3920 		 * pass through wait_for_idle: first queues the switch,
3921 		 * second waits for the switch.
3922 		 */
3923 		if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3924 			ret = i915_gem_wait_for_idle(i915,
3925 						     I915_WAIT_INTERRUPTIBLE |
3926 						     I915_WAIT_LOCKED,
3927 						     MAX_SCHEDULE_TIMEOUT);
3928 
3929 		if (ret == 0 && val & DROP_IDLE)
3930 			ret = i915_gem_wait_for_idle(i915,
3931 						     I915_WAIT_INTERRUPTIBLE |
3932 						     I915_WAIT_LOCKED,
3933 						     MAX_SCHEDULE_TIMEOUT);
3934 
3935 		if (val & DROP_RETIRE)
3936 			i915_retire_requests(i915);
3937 
3938 		mutex_unlock(&i915->drm.struct_mutex);
3939 	}
3940 
3941 	if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
3942 		i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3943 
3944 	fs_reclaim_acquire(GFP_KERNEL);
3945 	if (val & DROP_BOUND)
3946 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3947 
3948 	if (val & DROP_UNBOUND)
3949 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3950 
3951 	if (val & DROP_SHRINK_ALL)
3952 		i915_gem_shrink_all(i915);
3953 	fs_reclaim_release(GFP_KERNEL);
3954 
3955 	if (val & DROP_IDLE) {
3956 		flush_delayed_work(&i915->gem.retire_work);
3957 		flush_work(&i915->gem.idle_work);
3958 	}
3959 
3960 	if (val & DROP_FREED)
3961 		i915_gem_drain_freed_objects(i915);
3962 
3963 	return 0;
3964 }
3965 
3966 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3967 			i915_drop_caches_get, i915_drop_caches_set,
3968 			"0x%08llx\n");
3969 
3970 static int
3971 i915_cache_sharing_get(void *data, u64 *val)
3972 {
3973 	struct drm_i915_private *dev_priv = data;
3974 	intel_wakeref_t wakeref;
3975 	u32 snpcr = 0;
3976 
3977 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3978 		return -ENODEV;
3979 
3980 	with_intel_runtime_pm(dev_priv, wakeref)
3981 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3982 
3983 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3984 
3985 	return 0;
3986 }
3987 
3988 static int
3989 i915_cache_sharing_set(void *data, u64 val)
3990 {
3991 	struct drm_i915_private *dev_priv = data;
3992 	intel_wakeref_t wakeref;
3993 
3994 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3995 		return -ENODEV;
3996 
3997 	if (val > 3)
3998 		return -EINVAL;
3999 
4000 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4001 	with_intel_runtime_pm(dev_priv, wakeref) {
4002 		u32 snpcr;
4003 
4004 		/* Update the cache sharing policy here as well */
4005 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4006 		snpcr &= ~GEN6_MBC_SNPCR_MASK;
4007 		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4008 		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4009 	}
4010 
4011 	return 0;
4012 }
4013 
4014 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4015 			i915_cache_sharing_get, i915_cache_sharing_set,
4016 			"%llu\n");
4017 
4018 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4019 					  struct sseu_dev_info *sseu)
4020 {
4021 #define SS_MAX 2
4022 	const int ss_max = SS_MAX;
4023 	u32 sig1[SS_MAX], sig2[SS_MAX];
4024 	int ss;
4025 
4026 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4027 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4028 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4029 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4030 
4031 	for (ss = 0; ss < ss_max; ss++) {
4032 		unsigned int eu_cnt;
4033 
4034 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4035 			/* skip disabled subslice */
4036 			continue;
4037 
4038 		sseu->slice_mask = BIT(0);
4039 		sseu->subslice_mask[0] |= BIT(ss);
4040 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4041 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4042 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4043 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4044 		sseu->eu_total += eu_cnt;
4045 		sseu->eu_per_subslice = max_t(unsigned int,
4046 					      sseu->eu_per_subslice, eu_cnt);
4047 	}
4048 #undef SS_MAX
4049 }
4050 
4051 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4052 				     struct sseu_dev_info *sseu)
4053 {
4054 #define SS_MAX 6
4055 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4056 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4057 	int s, ss;
4058 
4059 	for (s = 0; s < info->sseu.max_slices; s++) {
4060 		/*
4061 		 * FIXME: Valid SS Mask respects the spec and read
4062 		 * only valid bits for those registers, excluding reserved
4063 		 * although this seems wrong because it would leave many
4064 		 * subslices without ACK.
4065 		 */
4066 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4067 			GEN10_PGCTL_VALID_SS_MASK(s);
4068 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4069 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4070 	}
4071 
4072 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4073 		     GEN9_PGCTL_SSA_EU19_ACK |
4074 		     GEN9_PGCTL_SSA_EU210_ACK |
4075 		     GEN9_PGCTL_SSA_EU311_ACK;
4076 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4077 		     GEN9_PGCTL_SSB_EU19_ACK |
4078 		     GEN9_PGCTL_SSB_EU210_ACK |
4079 		     GEN9_PGCTL_SSB_EU311_ACK;
4080 
4081 	for (s = 0; s < info->sseu.max_slices; s++) {
4082 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4083 			/* skip disabled slice */
4084 			continue;
4085 
4086 		sseu->slice_mask |= BIT(s);
4087 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4088 
4089 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4090 			unsigned int eu_cnt;
4091 
4092 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4093 				/* skip disabled subslice */
4094 				continue;
4095 
4096 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4097 					       eu_mask[ss % 2]);
4098 			sseu->eu_total += eu_cnt;
4099 			sseu->eu_per_subslice = max_t(unsigned int,
4100 						      sseu->eu_per_subslice,
4101 						      eu_cnt);
4102 		}
4103 	}
4104 #undef SS_MAX
4105 }
4106 
4107 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4108 				    struct sseu_dev_info *sseu)
4109 {
4110 #define SS_MAX 3
4111 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4112 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4113 	int s, ss;
4114 
4115 	for (s = 0; s < info->sseu.max_slices; s++) {
4116 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4117 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4118 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4119 	}
4120 
4121 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4122 		     GEN9_PGCTL_SSA_EU19_ACK |
4123 		     GEN9_PGCTL_SSA_EU210_ACK |
4124 		     GEN9_PGCTL_SSA_EU311_ACK;
4125 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4126 		     GEN9_PGCTL_SSB_EU19_ACK |
4127 		     GEN9_PGCTL_SSB_EU210_ACK |
4128 		     GEN9_PGCTL_SSB_EU311_ACK;
4129 
4130 	for (s = 0; s < info->sseu.max_slices; s++) {
4131 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4132 			/* skip disabled slice */
4133 			continue;
4134 
4135 		sseu->slice_mask |= BIT(s);
4136 
4137 		if (IS_GEN9_BC(dev_priv))
4138 			sseu->subslice_mask[s] =
4139 				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4140 
4141 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4142 			unsigned int eu_cnt;
4143 
4144 			if (IS_GEN9_LP(dev_priv)) {
4145 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4146 					/* skip disabled subslice */
4147 					continue;
4148 
4149 				sseu->subslice_mask[s] |= BIT(ss);
4150 			}
4151 
4152 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4153 					       eu_mask[ss%2]);
4154 			sseu->eu_total += eu_cnt;
4155 			sseu->eu_per_subslice = max_t(unsigned int,
4156 						      sseu->eu_per_subslice,
4157 						      eu_cnt);
4158 		}
4159 	}
4160 #undef SS_MAX
4161 }
4162 
4163 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4164 					 struct sseu_dev_info *sseu)
4165 {
4166 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4167 	int s;
4168 
4169 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4170 
4171 	if (sseu->slice_mask) {
4172 		sseu->eu_per_subslice =
4173 			RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4174 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4175 			sseu->subslice_mask[s] =
4176 				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4177 		}
4178 		sseu->eu_total = sseu->eu_per_subslice *
4179 				 sseu_subslice_total(sseu);
4180 
4181 		/* subtract fused off EU(s) from enabled slice(s) */
4182 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4183 			u8 subslice_7eu =
4184 				RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4185 
4186 			sseu->eu_total -= hweight8(subslice_7eu);
4187 		}
4188 	}
4189 }
4190 
4191 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4192 				 const struct sseu_dev_info *sseu)
4193 {
4194 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4195 	const char *type = is_available_info ? "Available" : "Enabled";
4196 	int s;
4197 
4198 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4199 		   sseu->slice_mask);
4200 	seq_printf(m, "  %s Slice Total: %u\n", type,
4201 		   hweight8(sseu->slice_mask));
4202 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4203 		   sseu_subslice_total(sseu));
4204 	for (s = 0; s < fls(sseu->slice_mask); s++) {
4205 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4206 			   s, hweight8(sseu->subslice_mask[s]));
4207 	}
4208 	seq_printf(m, "  %s EU Total: %u\n", type,
4209 		   sseu->eu_total);
4210 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4211 		   sseu->eu_per_subslice);
4212 
4213 	if (!is_available_info)
4214 		return;
4215 
4216 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4217 	if (HAS_POOLED_EU(dev_priv))
4218 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4219 
4220 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4221 		   yesno(sseu->has_slice_pg));
4222 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4223 		   yesno(sseu->has_subslice_pg));
4224 	seq_printf(m, "  Has EU Power Gating: %s\n",
4225 		   yesno(sseu->has_eu_pg));
4226 }
4227 
4228 static int i915_sseu_status(struct seq_file *m, void *unused)
4229 {
4230 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4231 	struct sseu_dev_info sseu;
4232 	intel_wakeref_t wakeref;
4233 
4234 	if (INTEL_GEN(dev_priv) < 8)
4235 		return -ENODEV;
4236 
4237 	seq_puts(m, "SSEU Device Info\n");
4238 	i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4239 
4240 	seq_puts(m, "SSEU Device Status\n");
4241 	memset(&sseu, 0, sizeof(sseu));
4242 	sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4243 	sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4244 	sseu.max_eus_per_subslice =
4245 		RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4246 
4247 	with_intel_runtime_pm(dev_priv, wakeref) {
4248 		if (IS_CHERRYVIEW(dev_priv))
4249 			cherryview_sseu_device_status(dev_priv, &sseu);
4250 		else if (IS_BROADWELL(dev_priv))
4251 			broadwell_sseu_device_status(dev_priv, &sseu);
4252 		else if (IS_GEN(dev_priv, 9))
4253 			gen9_sseu_device_status(dev_priv, &sseu);
4254 		else if (INTEL_GEN(dev_priv) >= 10)
4255 			gen10_sseu_device_status(dev_priv, &sseu);
4256 	}
4257 
4258 	i915_print_sseu_info(m, false, &sseu);
4259 
4260 	return 0;
4261 }
4262 
4263 static int i915_forcewake_open(struct inode *inode, struct file *file)
4264 {
4265 	struct drm_i915_private *i915 = inode->i_private;
4266 
4267 	if (INTEL_GEN(i915) < 6)
4268 		return 0;
4269 
4270 	file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
4271 	intel_uncore_forcewake_user_get(&i915->uncore);
4272 
4273 	return 0;
4274 }
4275 
4276 static int i915_forcewake_release(struct inode *inode, struct file *file)
4277 {
4278 	struct drm_i915_private *i915 = inode->i_private;
4279 
4280 	if (INTEL_GEN(i915) < 6)
4281 		return 0;
4282 
4283 	intel_uncore_forcewake_user_put(&i915->uncore);
4284 	intel_runtime_pm_put(i915,
4285 			     (intel_wakeref_t)(uintptr_t)file->private_data);
4286 
4287 	return 0;
4288 }
4289 
4290 static const struct file_operations i915_forcewake_fops = {
4291 	.owner = THIS_MODULE,
4292 	.open = i915_forcewake_open,
4293 	.release = i915_forcewake_release,
4294 };
4295 
4296 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4297 {
4298 	struct drm_i915_private *dev_priv = m->private;
4299 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4300 
4301 	/* Synchronize with everything first in case there's been an HPD
4302 	 * storm, but we haven't finished handling it in the kernel yet
4303 	 */
4304 	synchronize_irq(dev_priv->drm.irq);
4305 	flush_work(&dev_priv->hotplug.dig_port_work);
4306 	flush_work(&dev_priv->hotplug.hotplug_work);
4307 
4308 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4309 	seq_printf(m, "Detected: %s\n",
4310 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4311 
4312 	return 0;
4313 }
4314 
4315 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4316 					const char __user *ubuf, size_t len,
4317 					loff_t *offp)
4318 {
4319 	struct seq_file *m = file->private_data;
4320 	struct drm_i915_private *dev_priv = m->private;
4321 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4322 	unsigned int new_threshold;
4323 	int i;
4324 	char *newline;
4325 	char tmp[16];
4326 
4327 	if (len >= sizeof(tmp))
4328 		return -EINVAL;
4329 
4330 	if (copy_from_user(tmp, ubuf, len))
4331 		return -EFAULT;
4332 
4333 	tmp[len] = '\0';
4334 
4335 	/* Strip newline, if any */
4336 	newline = strchr(tmp, '\n');
4337 	if (newline)
4338 		*newline = '\0';
4339 
4340 	if (strcmp(tmp, "reset") == 0)
4341 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4342 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4343 		return -EINVAL;
4344 
4345 	if (new_threshold > 0)
4346 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4347 			      new_threshold);
4348 	else
4349 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4350 
4351 	spin_lock_irq(&dev_priv->irq_lock);
4352 	hotplug->hpd_storm_threshold = new_threshold;
4353 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4354 	for_each_hpd_pin(i)
4355 		hotplug->stats[i].count = 0;
4356 	spin_unlock_irq(&dev_priv->irq_lock);
4357 
4358 	/* Re-enable hpd immediately if we were in an irq storm */
4359 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4360 
4361 	return len;
4362 }
4363 
4364 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4365 {
4366 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4367 }
4368 
4369 static const struct file_operations i915_hpd_storm_ctl_fops = {
4370 	.owner = THIS_MODULE,
4371 	.open = i915_hpd_storm_ctl_open,
4372 	.read = seq_read,
4373 	.llseek = seq_lseek,
4374 	.release = single_release,
4375 	.write = i915_hpd_storm_ctl_write
4376 };
4377 
4378 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4379 {
4380 	struct drm_i915_private *dev_priv = m->private;
4381 
4382 	seq_printf(m, "Enabled: %s\n",
4383 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4384 
4385 	return 0;
4386 }
4387 
4388 static int
4389 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4390 {
4391 	return single_open(file, i915_hpd_short_storm_ctl_show,
4392 			   inode->i_private);
4393 }
4394 
4395 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4396 					      const char __user *ubuf,
4397 					      size_t len, loff_t *offp)
4398 {
4399 	struct seq_file *m = file->private_data;
4400 	struct drm_i915_private *dev_priv = m->private;
4401 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4402 	char *newline;
4403 	char tmp[16];
4404 	int i;
4405 	bool new_state;
4406 
4407 	if (len >= sizeof(tmp))
4408 		return -EINVAL;
4409 
4410 	if (copy_from_user(tmp, ubuf, len))
4411 		return -EFAULT;
4412 
4413 	tmp[len] = '\0';
4414 
4415 	/* Strip newline, if any */
4416 	newline = strchr(tmp, '\n');
4417 	if (newline)
4418 		*newline = '\0';
4419 
4420 	/* Reset to the "default" state for this system */
4421 	if (strcmp(tmp, "reset") == 0)
4422 		new_state = !HAS_DP_MST(dev_priv);
4423 	else if (kstrtobool(tmp, &new_state) != 0)
4424 		return -EINVAL;
4425 
4426 	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4427 		      new_state ? "En" : "Dis");
4428 
4429 	spin_lock_irq(&dev_priv->irq_lock);
4430 	hotplug->hpd_short_storm_enabled = new_state;
4431 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4432 	for_each_hpd_pin(i)
4433 		hotplug->stats[i].count = 0;
4434 	spin_unlock_irq(&dev_priv->irq_lock);
4435 
4436 	/* Re-enable hpd immediately if we were in an irq storm */
4437 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4438 
4439 	return len;
4440 }
4441 
4442 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4443 	.owner = THIS_MODULE,
4444 	.open = i915_hpd_short_storm_ctl_open,
4445 	.read = seq_read,
4446 	.llseek = seq_lseek,
4447 	.release = single_release,
4448 	.write = i915_hpd_short_storm_ctl_write,
4449 };
4450 
4451 static int i915_drrs_ctl_set(void *data, u64 val)
4452 {
4453 	struct drm_i915_private *dev_priv = data;
4454 	struct drm_device *dev = &dev_priv->drm;
4455 	struct intel_crtc *crtc;
4456 
4457 	if (INTEL_GEN(dev_priv) < 7)
4458 		return -ENODEV;
4459 
4460 	for_each_intel_crtc(dev, crtc) {
4461 		struct drm_connector_list_iter conn_iter;
4462 		struct intel_crtc_state *crtc_state;
4463 		struct drm_connector *connector;
4464 		struct drm_crtc_commit *commit;
4465 		int ret;
4466 
4467 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4468 		if (ret)
4469 			return ret;
4470 
4471 		crtc_state = to_intel_crtc_state(crtc->base.state);
4472 
4473 		if (!crtc_state->base.active ||
4474 		    !crtc_state->has_drrs)
4475 			goto out;
4476 
4477 		commit = crtc_state->base.commit;
4478 		if (commit) {
4479 			ret = wait_for_completion_interruptible(&commit->hw_done);
4480 			if (ret)
4481 				goto out;
4482 		}
4483 
4484 		drm_connector_list_iter_begin(dev, &conn_iter);
4485 		drm_for_each_connector_iter(connector, &conn_iter) {
4486 			struct intel_encoder *encoder;
4487 			struct intel_dp *intel_dp;
4488 
4489 			if (!(crtc_state->base.connector_mask &
4490 			      drm_connector_mask(connector)))
4491 				continue;
4492 
4493 			encoder = intel_attached_encoder(connector);
4494 			if (encoder->type != INTEL_OUTPUT_EDP)
4495 				continue;
4496 
4497 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4498 						val ? "en" : "dis", val);
4499 
4500 			intel_dp = enc_to_intel_dp(&encoder->base);
4501 			if (val)
4502 				intel_edp_drrs_enable(intel_dp,
4503 						      crtc_state);
4504 			else
4505 				intel_edp_drrs_disable(intel_dp,
4506 						       crtc_state);
4507 		}
4508 		drm_connector_list_iter_end(&conn_iter);
4509 
4510 out:
4511 		drm_modeset_unlock(&crtc->base.mutex);
4512 		if (ret)
4513 			return ret;
4514 	}
4515 
4516 	return 0;
4517 }
4518 
4519 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4520 
4521 static ssize_t
4522 i915_fifo_underrun_reset_write(struct file *filp,
4523 			       const char __user *ubuf,
4524 			       size_t cnt, loff_t *ppos)
4525 {
4526 	struct drm_i915_private *dev_priv = filp->private_data;
4527 	struct intel_crtc *intel_crtc;
4528 	struct drm_device *dev = &dev_priv->drm;
4529 	int ret;
4530 	bool reset;
4531 
4532 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4533 	if (ret)
4534 		return ret;
4535 
4536 	if (!reset)
4537 		return cnt;
4538 
4539 	for_each_intel_crtc(dev, intel_crtc) {
4540 		struct drm_crtc_commit *commit;
4541 		struct intel_crtc_state *crtc_state;
4542 
4543 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4544 		if (ret)
4545 			return ret;
4546 
4547 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4548 		commit = crtc_state->base.commit;
4549 		if (commit) {
4550 			ret = wait_for_completion_interruptible(&commit->hw_done);
4551 			if (!ret)
4552 				ret = wait_for_completion_interruptible(&commit->flip_done);
4553 		}
4554 
4555 		if (!ret && crtc_state->base.active) {
4556 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4557 				      pipe_name(intel_crtc->pipe));
4558 
4559 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4560 		}
4561 
4562 		drm_modeset_unlock(&intel_crtc->base.mutex);
4563 
4564 		if (ret)
4565 			return ret;
4566 	}
4567 
4568 	ret = intel_fbc_reset_underrun(dev_priv);
4569 	if (ret)
4570 		return ret;
4571 
4572 	return cnt;
4573 }
4574 
4575 static const struct file_operations i915_fifo_underrun_reset_ops = {
4576 	.owner = THIS_MODULE,
4577 	.open = simple_open,
4578 	.write = i915_fifo_underrun_reset_write,
4579 	.llseek = default_llseek,
4580 };
4581 
4582 static const struct drm_info_list i915_debugfs_list[] = {
4583 	{"i915_capabilities", i915_capabilities, 0},
4584 	{"i915_gem_objects", i915_gem_object_info, 0},
4585 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4586 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4587 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4588 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4589 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4590 	{"i915_guc_info", i915_guc_info, 0},
4591 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4592 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4593 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4594 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4595 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4596 	{"i915_frequency_info", i915_frequency_info, 0},
4597 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4598 	{"i915_reset_info", i915_reset_info, 0},
4599 	{"i915_drpc_info", i915_drpc_info, 0},
4600 	{"i915_emon_status", i915_emon_status, 0},
4601 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4602 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4603 	{"i915_fbc_status", i915_fbc_status, 0},
4604 	{"i915_ips_status", i915_ips_status, 0},
4605 	{"i915_sr_status", i915_sr_status, 0},
4606 	{"i915_opregion", i915_opregion, 0},
4607 	{"i915_vbt", i915_vbt, 0},
4608 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4609 	{"i915_context_status", i915_context_status, 0},
4610 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4611 	{"i915_swizzle_info", i915_swizzle_info, 0},
4612 	{"i915_llc", i915_llc, 0},
4613 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4614 	{"i915_energy_uJ", i915_energy_uJ, 0},
4615 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4616 	{"i915_power_domain_info", i915_power_domain_info, 0},
4617 	{"i915_dmc_info", i915_dmc_info, 0},
4618 	{"i915_display_info", i915_display_info, 0},
4619 	{"i915_engine_info", i915_engine_info, 0},
4620 	{"i915_rcs_topology", i915_rcs_topology, 0},
4621 	{"i915_shrinker_info", i915_shrinker_info, 0},
4622 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4623 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4624 	{"i915_wa_registers", i915_wa_registers, 0},
4625 	{"i915_ddb_info", i915_ddb_info, 0},
4626 	{"i915_sseu_status", i915_sseu_status, 0},
4627 	{"i915_drrs_status", i915_drrs_status, 0},
4628 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4629 };
4630 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4631 
4632 static const struct i915_debugfs_files {
4633 	const char *name;
4634 	const struct file_operations *fops;
4635 } i915_debugfs_files[] = {
4636 	{"i915_wedged", &i915_wedged_fops},
4637 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4638 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4639 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4640 	{"i915_error_state", &i915_error_state_fops},
4641 	{"i915_gpu_info", &i915_gpu_info_fops},
4642 #endif
4643 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4644 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4645 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4646 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4647 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4648 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4649 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4650 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4651 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4652 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4653 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4654 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4655 	{"i915_ipc_status", &i915_ipc_status_fops},
4656 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4657 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4658 };
4659 
4660 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4661 {
4662 	struct drm_minor *minor = dev_priv->drm.primary;
4663 	struct dentry *ent;
4664 	int i;
4665 
4666 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4667 				  minor->debugfs_root, to_i915(minor->dev),
4668 				  &i915_forcewake_fops);
4669 	if (!ent)
4670 		return -ENOMEM;
4671 
4672 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4673 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4674 					  S_IRUGO | S_IWUSR,
4675 					  minor->debugfs_root,
4676 					  to_i915(minor->dev),
4677 					  i915_debugfs_files[i].fops);
4678 		if (!ent)
4679 			return -ENOMEM;
4680 	}
4681 
4682 	return drm_debugfs_create_files(i915_debugfs_list,
4683 					I915_DEBUGFS_ENTRIES,
4684 					minor->debugfs_root, minor);
4685 }
4686 
4687 struct dpcd_block {
4688 	/* DPCD dump start address. */
4689 	unsigned int offset;
4690 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4691 	unsigned int end;
4692 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4693 	size_t size;
4694 	/* Only valid for eDP. */
4695 	bool edp;
4696 };
4697 
4698 static const struct dpcd_block i915_dpcd_debug[] = {
4699 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4700 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4701 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4702 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4703 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4704 	{ .offset = DP_SET_POWER },
4705 	{ .offset = DP_EDP_DPCD_REV },
4706 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4707 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4708 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4709 };
4710 
4711 static int i915_dpcd_show(struct seq_file *m, void *data)
4712 {
4713 	struct drm_connector *connector = m->private;
4714 	struct intel_dp *intel_dp =
4715 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4716 	u8 buf[16];
4717 	ssize_t err;
4718 	int i;
4719 
4720 	if (connector->status != connector_status_connected)
4721 		return -ENODEV;
4722 
4723 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4724 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4725 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4726 
4727 		if (b->edp &&
4728 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4729 			continue;
4730 
4731 		/* low tech for now */
4732 		if (WARN_ON(size > sizeof(buf)))
4733 			continue;
4734 
4735 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4736 		if (err < 0)
4737 			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4738 		else
4739 			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4740 	}
4741 
4742 	return 0;
4743 }
4744 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4745 
4746 static int i915_panel_show(struct seq_file *m, void *data)
4747 {
4748 	struct drm_connector *connector = m->private;
4749 	struct intel_dp *intel_dp =
4750 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4751 
4752 	if (connector->status != connector_status_connected)
4753 		return -ENODEV;
4754 
4755 	seq_printf(m, "Panel power up delay: %d\n",
4756 		   intel_dp->panel_power_up_delay);
4757 	seq_printf(m, "Panel power down delay: %d\n",
4758 		   intel_dp->panel_power_down_delay);
4759 	seq_printf(m, "Backlight on delay: %d\n",
4760 		   intel_dp->backlight_on_delay);
4761 	seq_printf(m, "Backlight off delay: %d\n",
4762 		   intel_dp->backlight_off_delay);
4763 
4764 	return 0;
4765 }
4766 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4767 
4768 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4769 {
4770 	struct drm_connector *connector = m->private;
4771 	struct intel_connector *intel_connector = to_intel_connector(connector);
4772 	bool hdcp_cap, hdcp2_cap;
4773 
4774 	if (connector->status != connector_status_connected)
4775 		return -ENODEV;
4776 
4777 	/* HDCP is supported by connector */
4778 	if (!intel_connector->hdcp.shim)
4779 		return -EINVAL;
4780 
4781 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4782 		   connector->base.id);
4783 	hdcp_cap = intel_hdcp_capable(intel_connector);
4784 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
4785 
4786 	if (hdcp_cap)
4787 		seq_puts(m, "HDCP1.4 ");
4788 	if (hdcp2_cap)
4789 		seq_puts(m, "HDCP2.2 ");
4790 
4791 	if (!hdcp_cap && !hdcp2_cap)
4792 		seq_puts(m, "None");
4793 	seq_puts(m, "\n");
4794 
4795 	return 0;
4796 }
4797 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4798 
4799 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4800 {
4801 	struct drm_connector *connector = m->private;
4802 	struct drm_device *dev = connector->dev;
4803 	struct drm_crtc *crtc;
4804 	struct intel_dp *intel_dp;
4805 	struct drm_modeset_acquire_ctx ctx;
4806 	struct intel_crtc_state *crtc_state = NULL;
4807 	int ret = 0;
4808 	bool try_again = false;
4809 
4810 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4811 
4812 	do {
4813 		try_again = false;
4814 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4815 				       &ctx);
4816 		if (ret) {
4817 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4818 				try_again = true;
4819 				continue;
4820 			}
4821 			break;
4822 		}
4823 		crtc = connector->state->crtc;
4824 		if (connector->status != connector_status_connected || !crtc) {
4825 			ret = -ENODEV;
4826 			break;
4827 		}
4828 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
4829 		if (ret == -EDEADLK) {
4830 			ret = drm_modeset_backoff(&ctx);
4831 			if (!ret) {
4832 				try_again = true;
4833 				continue;
4834 			}
4835 			break;
4836 		} else if (ret) {
4837 			break;
4838 		}
4839 		intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4840 		crtc_state = to_intel_crtc_state(crtc->state);
4841 		seq_printf(m, "DSC_Enabled: %s\n",
4842 			   yesno(crtc_state->dsc_params.compression_enable));
4843 		seq_printf(m, "DSC_Sink_Support: %s\n",
4844 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4845 		seq_printf(m, "Force_DSC_Enable: %s\n",
4846 			   yesno(intel_dp->force_dsc_en));
4847 		if (!intel_dp_is_edp(intel_dp))
4848 			seq_printf(m, "FEC_Sink_Support: %s\n",
4849 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4850 	} while (try_again);
4851 
4852 	drm_modeset_drop_locks(&ctx);
4853 	drm_modeset_acquire_fini(&ctx);
4854 
4855 	return ret;
4856 }
4857 
4858 static ssize_t i915_dsc_fec_support_write(struct file *file,
4859 					  const char __user *ubuf,
4860 					  size_t len, loff_t *offp)
4861 {
4862 	bool dsc_enable = false;
4863 	int ret;
4864 	struct drm_connector *connector =
4865 		((struct seq_file *)file->private_data)->private;
4866 	struct intel_encoder *encoder = intel_attached_encoder(connector);
4867 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4868 
4869 	if (len == 0)
4870 		return 0;
4871 
4872 	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4873 			 len);
4874 
4875 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4876 	if (ret < 0)
4877 		return ret;
4878 
4879 	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4880 			 (dsc_enable) ? "true" : "false");
4881 	intel_dp->force_dsc_en = dsc_enable;
4882 
4883 	*offp += len;
4884 	return len;
4885 }
4886 
4887 static int i915_dsc_fec_support_open(struct inode *inode,
4888 				     struct file *file)
4889 {
4890 	return single_open(file, i915_dsc_fec_support_show,
4891 			   inode->i_private);
4892 }
4893 
4894 static const struct file_operations i915_dsc_fec_support_fops = {
4895 	.owner = THIS_MODULE,
4896 	.open = i915_dsc_fec_support_open,
4897 	.read = seq_read,
4898 	.llseek = seq_lseek,
4899 	.release = single_release,
4900 	.write = i915_dsc_fec_support_write
4901 };
4902 
4903 /**
4904  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4905  * @connector: pointer to a registered drm_connector
4906  *
4907  * Cleanup will be done by drm_connector_unregister() through a call to
4908  * drm_debugfs_connector_remove().
4909  *
4910  * Returns 0 on success, negative error codes on error.
4911  */
4912 int i915_debugfs_connector_add(struct drm_connector *connector)
4913 {
4914 	struct dentry *root = connector->debugfs_entry;
4915 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4916 
4917 	/* The connector must have been registered beforehands. */
4918 	if (!root)
4919 		return -ENODEV;
4920 
4921 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4922 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4923 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4924 				    connector, &i915_dpcd_fops);
4925 
4926 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4927 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4928 				    connector, &i915_panel_fops);
4929 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4930 				    connector, &i915_psr_sink_status_fops);
4931 	}
4932 
4933 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4934 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4935 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4936 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4937 				    connector, &i915_hdcp_sink_capability_fops);
4938 	}
4939 
4940 	if (INTEL_GEN(dev_priv) >= 10 &&
4941 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4942 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4943 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4944 				    connector, &i915_dsc_fec_support_fops);
4945 
4946 	return 0;
4947 }
4948