xref: /linux/drivers/gpu/drm/i915/i915_debugfs.c (revision 0add53713b1c07a1c71e27a20e21eb7c180b4e7b)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 
34 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
35 {
36 	return to_i915(node->minor->dev);
37 }
38 
39 static __always_inline void seq_print_param(struct seq_file *m,
40 					    const char *name,
41 					    const char *type,
42 					    const void *x)
43 {
44 	if (!__builtin_strcmp(type, "bool"))
45 		seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
46 	else if (!__builtin_strcmp(type, "int"))
47 		seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
48 	else if (!__builtin_strcmp(type, "unsigned int"))
49 		seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
50 	else if (!__builtin_strcmp(type, "char *"))
51 		seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
52 	else
53 		BUILD_BUG();
54 }
55 
56 static int i915_capabilities(struct seq_file *m, void *data)
57 {
58 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
59 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
60 
61 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
62 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
63 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
64 
65 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
66 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
67 #undef PRINT_FLAG
68 
69 	kernel_param_lock(THIS_MODULE);
70 #define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
71 	I915_PARAMS_FOR_EACH(PRINT_PARAM);
72 #undef PRINT_PARAM
73 	kernel_param_unlock(THIS_MODULE);
74 
75 	return 0;
76 }
77 
78 static char get_active_flag(struct drm_i915_gem_object *obj)
79 {
80 	return i915_gem_object_is_active(obj) ? '*' : ' ';
81 }
82 
83 static char get_pin_flag(struct drm_i915_gem_object *obj)
84 {
85 	return obj->pin_display ? 'p' : ' ';
86 }
87 
88 static char get_tiling_flag(struct drm_i915_gem_object *obj)
89 {
90 	switch (i915_gem_object_get_tiling(obj)) {
91 	default:
92 	case I915_TILING_NONE: return ' ';
93 	case I915_TILING_X: return 'X';
94 	case I915_TILING_Y: return 'Y';
95 	}
96 }
97 
98 static char get_global_flag(struct drm_i915_gem_object *obj)
99 {
100 	return !list_empty(&obj->userfault_link) ? 'g' : ' ';
101 }
102 
103 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
104 {
105 	return obj->mm.mapping ? 'M' : ' ';
106 }
107 
108 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
109 {
110 	u64 size = 0;
111 	struct i915_vma *vma;
112 
113 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
114 		if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
115 			size += vma->node.size;
116 	}
117 
118 	return size;
119 }
120 
121 static void
122 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
123 {
124 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
125 	struct intel_engine_cs *engine;
126 	struct i915_vma *vma;
127 	unsigned int frontbuffer_bits;
128 	int pin_count = 0;
129 
130 	lockdep_assert_held(&obj->base.dev->struct_mutex);
131 
132 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
133 		   &obj->base,
134 		   get_active_flag(obj),
135 		   get_pin_flag(obj),
136 		   get_tiling_flag(obj),
137 		   get_global_flag(obj),
138 		   get_pin_mapped_flag(obj),
139 		   obj->base.size / 1024,
140 		   obj->base.read_domains,
141 		   obj->base.write_domain,
142 		   i915_cache_level_str(dev_priv, obj->cache_level),
143 		   obj->mm.dirty ? " dirty" : "",
144 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
145 	if (obj->base.name)
146 		seq_printf(m, " (name: %d)", obj->base.name);
147 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
148 		if (i915_vma_is_pinned(vma))
149 			pin_count++;
150 	}
151 	seq_printf(m, " (pinned x %d)", pin_count);
152 	if (obj->pin_display)
153 		seq_printf(m, " (display)");
154 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
155 		if (!drm_mm_node_allocated(&vma->node))
156 			continue;
157 
158 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
159 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
160 			   vma->node.start, vma->node.size);
161 		if (i915_vma_is_ggtt(vma)) {
162 			switch (vma->ggtt_view.type) {
163 			case I915_GGTT_VIEW_NORMAL:
164 				seq_puts(m, ", normal");
165 				break;
166 
167 			case I915_GGTT_VIEW_PARTIAL:
168 				seq_printf(m, ", partial [%08llx+%x]",
169 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
170 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
171 				break;
172 
173 			case I915_GGTT_VIEW_ROTATED:
174 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
175 					   vma->ggtt_view.rotated.plane[0].width,
176 					   vma->ggtt_view.rotated.plane[0].height,
177 					   vma->ggtt_view.rotated.plane[0].stride,
178 					   vma->ggtt_view.rotated.plane[0].offset,
179 					   vma->ggtt_view.rotated.plane[1].width,
180 					   vma->ggtt_view.rotated.plane[1].height,
181 					   vma->ggtt_view.rotated.plane[1].stride,
182 					   vma->ggtt_view.rotated.plane[1].offset);
183 				break;
184 
185 			default:
186 				MISSING_CASE(vma->ggtt_view.type);
187 				break;
188 			}
189 		}
190 		if (vma->fence)
191 			seq_printf(m, " , fence: %d%s",
192 				   vma->fence->id,
193 				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
194 		seq_puts(m, ")");
195 	}
196 	if (obj->stolen)
197 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
198 
199 	engine = i915_gem_object_last_write_engine(obj);
200 	if (engine)
201 		seq_printf(m, " (%s)", engine->name);
202 
203 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
204 	if (frontbuffer_bits)
205 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
206 }
207 
208 static int obj_rank_by_stolen(const void *A, const void *B)
209 {
210 	const struct drm_i915_gem_object *a =
211 		*(const struct drm_i915_gem_object **)A;
212 	const struct drm_i915_gem_object *b =
213 		*(const struct drm_i915_gem_object **)B;
214 
215 	if (a->stolen->start < b->stolen->start)
216 		return -1;
217 	if (a->stolen->start > b->stolen->start)
218 		return 1;
219 	return 0;
220 }
221 
222 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
223 {
224 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
225 	struct drm_device *dev = &dev_priv->drm;
226 	struct drm_i915_gem_object **objects;
227 	struct drm_i915_gem_object *obj;
228 	u64 total_obj_size, total_gtt_size;
229 	unsigned long total, count, n;
230 	int ret;
231 
232 	total = READ_ONCE(dev_priv->mm.object_count);
233 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
234 	if (!objects)
235 		return -ENOMEM;
236 
237 	ret = mutex_lock_interruptible(&dev->struct_mutex);
238 	if (ret)
239 		goto out;
240 
241 	total_obj_size = total_gtt_size = count = 0;
242 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
243 		if (count == total)
244 			break;
245 
246 		if (obj->stolen == NULL)
247 			continue;
248 
249 		objects[count++] = obj;
250 		total_obj_size += obj->base.size;
251 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
252 
253 	}
254 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
255 		if (count == total)
256 			break;
257 
258 		if (obj->stolen == NULL)
259 			continue;
260 
261 		objects[count++] = obj;
262 		total_obj_size += obj->base.size;
263 	}
264 
265 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
266 
267 	seq_puts(m, "Stolen:\n");
268 	for (n = 0; n < count; n++) {
269 		seq_puts(m, "   ");
270 		describe_obj(m, objects[n]);
271 		seq_putc(m, '\n');
272 	}
273 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
274 		   count, total_obj_size, total_gtt_size);
275 
276 	mutex_unlock(&dev->struct_mutex);
277 out:
278 	kvfree(objects);
279 	return ret;
280 }
281 
282 struct file_stats {
283 	struct drm_i915_file_private *file_priv;
284 	unsigned long count;
285 	u64 total, unbound;
286 	u64 global, shared;
287 	u64 active, inactive;
288 };
289 
290 static int per_file_stats(int id, void *ptr, void *data)
291 {
292 	struct drm_i915_gem_object *obj = ptr;
293 	struct file_stats *stats = data;
294 	struct i915_vma *vma;
295 
296 	lockdep_assert_held(&obj->base.dev->struct_mutex);
297 
298 	stats->count++;
299 	stats->total += obj->base.size;
300 	if (!obj->bind_count)
301 		stats->unbound += obj->base.size;
302 	if (obj->base.name || obj->base.dma_buf)
303 		stats->shared += obj->base.size;
304 
305 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
306 		if (!drm_mm_node_allocated(&vma->node))
307 			continue;
308 
309 		if (i915_vma_is_ggtt(vma)) {
310 			stats->global += vma->node.size;
311 		} else {
312 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
313 
314 			if (ppgtt->base.file != stats->file_priv)
315 				continue;
316 		}
317 
318 		if (i915_vma_is_active(vma))
319 			stats->active += vma->node.size;
320 		else
321 			stats->inactive += vma->node.size;
322 	}
323 
324 	return 0;
325 }
326 
327 #define print_file_stats(m, name, stats) do { \
328 	if (stats.count) \
329 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
330 			   name, \
331 			   stats.count, \
332 			   stats.total, \
333 			   stats.active, \
334 			   stats.inactive, \
335 			   stats.global, \
336 			   stats.shared, \
337 			   stats.unbound); \
338 } while (0)
339 
340 static void print_batch_pool_stats(struct seq_file *m,
341 				   struct drm_i915_private *dev_priv)
342 {
343 	struct drm_i915_gem_object *obj;
344 	struct file_stats stats;
345 	struct intel_engine_cs *engine;
346 	enum intel_engine_id id;
347 	int j;
348 
349 	memset(&stats, 0, sizeof(stats));
350 
351 	for_each_engine(engine, dev_priv, id) {
352 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
353 			list_for_each_entry(obj,
354 					    &engine->batch_pool.cache_list[j],
355 					    batch_pool_link)
356 				per_file_stats(0, obj, &stats);
357 		}
358 	}
359 
360 	print_file_stats(m, "[k]batch pool", stats);
361 }
362 
363 static int per_file_ctx_stats(int id, void *ptr, void *data)
364 {
365 	struct i915_gem_context *ctx = ptr;
366 	int n;
367 
368 	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
369 		if (ctx->engine[n].state)
370 			per_file_stats(0, ctx->engine[n].state->obj, data);
371 		if (ctx->engine[n].ring)
372 			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
373 	}
374 
375 	return 0;
376 }
377 
378 static void print_context_stats(struct seq_file *m,
379 				struct drm_i915_private *dev_priv)
380 {
381 	struct drm_device *dev = &dev_priv->drm;
382 	struct file_stats stats;
383 	struct drm_file *file;
384 
385 	memset(&stats, 0, sizeof(stats));
386 
387 	mutex_lock(&dev->struct_mutex);
388 	if (dev_priv->kernel_context)
389 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
390 
391 	list_for_each_entry(file, &dev->filelist, lhead) {
392 		struct drm_i915_file_private *fpriv = file->driver_priv;
393 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
394 	}
395 	mutex_unlock(&dev->struct_mutex);
396 
397 	print_file_stats(m, "[k]contexts", stats);
398 }
399 
400 static int i915_gem_object_info(struct seq_file *m, void *data)
401 {
402 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
403 	struct drm_device *dev = &dev_priv->drm;
404 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
405 	u32 count, mapped_count, purgeable_count, dpy_count;
406 	u64 size, mapped_size, purgeable_size, dpy_size;
407 	struct drm_i915_gem_object *obj;
408 	struct drm_file *file;
409 	int ret;
410 
411 	ret = mutex_lock_interruptible(&dev->struct_mutex);
412 	if (ret)
413 		return ret;
414 
415 	seq_printf(m, "%u objects, %llu bytes\n",
416 		   dev_priv->mm.object_count,
417 		   dev_priv->mm.object_memory);
418 
419 	size = count = 0;
420 	mapped_size = mapped_count = 0;
421 	purgeable_size = purgeable_count = 0;
422 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
423 		size += obj->base.size;
424 		++count;
425 
426 		if (obj->mm.madv == I915_MADV_DONTNEED) {
427 			purgeable_size += obj->base.size;
428 			++purgeable_count;
429 		}
430 
431 		if (obj->mm.mapping) {
432 			mapped_count++;
433 			mapped_size += obj->base.size;
434 		}
435 	}
436 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
437 
438 	size = count = dpy_size = dpy_count = 0;
439 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
440 		size += obj->base.size;
441 		++count;
442 
443 		if (obj->pin_display) {
444 			dpy_size += obj->base.size;
445 			++dpy_count;
446 		}
447 
448 		if (obj->mm.madv == I915_MADV_DONTNEED) {
449 			purgeable_size += obj->base.size;
450 			++purgeable_count;
451 		}
452 
453 		if (obj->mm.mapping) {
454 			mapped_count++;
455 			mapped_size += obj->base.size;
456 		}
457 	}
458 	seq_printf(m, "%u bound objects, %llu bytes\n",
459 		   count, size);
460 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
461 		   purgeable_count, purgeable_size);
462 	seq_printf(m, "%u mapped objects, %llu bytes\n",
463 		   mapped_count, mapped_size);
464 	seq_printf(m, "%u display objects (pinned), %llu bytes\n",
465 		   dpy_count, dpy_size);
466 
467 	seq_printf(m, "%llu [%llu] gtt total\n",
468 		   ggtt->base.total, ggtt->mappable_end);
469 
470 	seq_putc(m, '\n');
471 	print_batch_pool_stats(m, dev_priv);
472 	mutex_unlock(&dev->struct_mutex);
473 
474 	mutex_lock(&dev->filelist_mutex);
475 	print_context_stats(m, dev_priv);
476 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
477 		struct file_stats stats;
478 		struct drm_i915_file_private *file_priv = file->driver_priv;
479 		struct drm_i915_gem_request *request;
480 		struct task_struct *task;
481 
482 		mutex_lock(&dev->struct_mutex);
483 
484 		memset(&stats, 0, sizeof(stats));
485 		stats.file_priv = file->driver_priv;
486 		spin_lock(&file->table_lock);
487 		idr_for_each(&file->object_idr, per_file_stats, &stats);
488 		spin_unlock(&file->table_lock);
489 		/*
490 		 * Although we have a valid reference on file->pid, that does
491 		 * not guarantee that the task_struct who called get_pid() is
492 		 * still alive (e.g. get_pid(current) => fork() => exit()).
493 		 * Therefore, we need to protect this ->comm access using RCU.
494 		 */
495 		request = list_first_entry_or_null(&file_priv->mm.request_list,
496 						   struct drm_i915_gem_request,
497 						   client_link);
498 		rcu_read_lock();
499 		task = pid_task(request && request->ctx->pid ?
500 				request->ctx->pid : file->pid,
501 				PIDTYPE_PID);
502 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
503 		rcu_read_unlock();
504 
505 		mutex_unlock(&dev->struct_mutex);
506 	}
507 	mutex_unlock(&dev->filelist_mutex);
508 
509 	return 0;
510 }
511 
512 static int i915_gem_gtt_info(struct seq_file *m, void *data)
513 {
514 	struct drm_info_node *node = m->private;
515 	struct drm_i915_private *dev_priv = node_to_i915(node);
516 	struct drm_device *dev = &dev_priv->drm;
517 	bool show_pin_display_only = !!node->info_ent->data;
518 	struct drm_i915_gem_object *obj;
519 	u64 total_obj_size, total_gtt_size;
520 	int count, ret;
521 
522 	ret = mutex_lock_interruptible(&dev->struct_mutex);
523 	if (ret)
524 		return ret;
525 
526 	total_obj_size = total_gtt_size = count = 0;
527 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
528 		if (show_pin_display_only && !obj->pin_display)
529 			continue;
530 
531 		seq_puts(m, "   ");
532 		describe_obj(m, obj);
533 		seq_putc(m, '\n');
534 		total_obj_size += obj->base.size;
535 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
536 		count++;
537 	}
538 
539 	mutex_unlock(&dev->struct_mutex);
540 
541 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
542 		   count, total_obj_size, total_gtt_size);
543 
544 	return 0;
545 }
546 
547 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
548 {
549 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
550 	struct drm_device *dev = &dev_priv->drm;
551 	struct drm_i915_gem_object *obj;
552 	struct intel_engine_cs *engine;
553 	enum intel_engine_id id;
554 	int total = 0;
555 	int ret, j;
556 
557 	ret = mutex_lock_interruptible(&dev->struct_mutex);
558 	if (ret)
559 		return ret;
560 
561 	for_each_engine(engine, dev_priv, id) {
562 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
563 			int count;
564 
565 			count = 0;
566 			list_for_each_entry(obj,
567 					    &engine->batch_pool.cache_list[j],
568 					    batch_pool_link)
569 				count++;
570 			seq_printf(m, "%s cache[%d]: %d objects\n",
571 				   engine->name, j, count);
572 
573 			list_for_each_entry(obj,
574 					    &engine->batch_pool.cache_list[j],
575 					    batch_pool_link) {
576 				seq_puts(m, "   ");
577 				describe_obj(m, obj);
578 				seq_putc(m, '\n');
579 			}
580 
581 			total += count;
582 		}
583 	}
584 
585 	seq_printf(m, "total: %d\n", total);
586 
587 	mutex_unlock(&dev->struct_mutex);
588 
589 	return 0;
590 }
591 
592 static void print_request(struct seq_file *m,
593 			  struct drm_i915_gem_request *rq,
594 			  const char *prefix)
595 {
596 	seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
597 		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
598 		   rq->priotree.priority,
599 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
600 		   rq->timeline->common->name);
601 }
602 
603 static int i915_gem_request_info(struct seq_file *m, void *data)
604 {
605 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
606 	struct drm_device *dev = &dev_priv->drm;
607 	struct drm_i915_gem_request *req;
608 	struct intel_engine_cs *engine;
609 	enum intel_engine_id id;
610 	int ret, any;
611 
612 	ret = mutex_lock_interruptible(&dev->struct_mutex);
613 	if (ret)
614 		return ret;
615 
616 	any = 0;
617 	for_each_engine(engine, dev_priv, id) {
618 		int count;
619 
620 		count = 0;
621 		list_for_each_entry(req, &engine->timeline->requests, link)
622 			count++;
623 		if (count == 0)
624 			continue;
625 
626 		seq_printf(m, "%s requests: %d\n", engine->name, count);
627 		list_for_each_entry(req, &engine->timeline->requests, link)
628 			print_request(m, req, "    ");
629 
630 		any++;
631 	}
632 	mutex_unlock(&dev->struct_mutex);
633 
634 	if (any == 0)
635 		seq_puts(m, "No requests\n");
636 
637 	return 0;
638 }
639 
640 static void i915_ring_seqno_info(struct seq_file *m,
641 				 struct intel_engine_cs *engine)
642 {
643 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
644 	struct rb_node *rb;
645 
646 	seq_printf(m, "Current sequence (%s): %x\n",
647 		   engine->name, intel_engine_get_seqno(engine));
648 
649 	spin_lock_irq(&b->rb_lock);
650 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
651 		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
652 
653 		seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
654 			   engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
655 	}
656 	spin_unlock_irq(&b->rb_lock);
657 }
658 
659 static int i915_gem_seqno_info(struct seq_file *m, void *data)
660 {
661 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
662 	struct intel_engine_cs *engine;
663 	enum intel_engine_id id;
664 
665 	for_each_engine(engine, dev_priv, id)
666 		i915_ring_seqno_info(m, engine);
667 
668 	return 0;
669 }
670 
671 
672 static int i915_interrupt_info(struct seq_file *m, void *data)
673 {
674 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
675 	struct intel_engine_cs *engine;
676 	enum intel_engine_id id;
677 	int i, pipe;
678 
679 	intel_runtime_pm_get(dev_priv);
680 
681 	if (IS_CHERRYVIEW(dev_priv)) {
682 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
683 			   I915_READ(GEN8_MASTER_IRQ));
684 
685 		seq_printf(m, "Display IER:\t%08x\n",
686 			   I915_READ(VLV_IER));
687 		seq_printf(m, "Display IIR:\t%08x\n",
688 			   I915_READ(VLV_IIR));
689 		seq_printf(m, "Display IIR_RW:\t%08x\n",
690 			   I915_READ(VLV_IIR_RW));
691 		seq_printf(m, "Display IMR:\t%08x\n",
692 			   I915_READ(VLV_IMR));
693 		for_each_pipe(dev_priv, pipe) {
694 			enum intel_display_power_domain power_domain;
695 
696 			power_domain = POWER_DOMAIN_PIPE(pipe);
697 			if (!intel_display_power_get_if_enabled(dev_priv,
698 								power_domain)) {
699 				seq_printf(m, "Pipe %c power disabled\n",
700 					   pipe_name(pipe));
701 				continue;
702 			}
703 
704 			seq_printf(m, "Pipe %c stat:\t%08x\n",
705 				   pipe_name(pipe),
706 				   I915_READ(PIPESTAT(pipe)));
707 
708 			intel_display_power_put(dev_priv, power_domain);
709 		}
710 
711 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
712 		seq_printf(m, "Port hotplug:\t%08x\n",
713 			   I915_READ(PORT_HOTPLUG_EN));
714 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
715 			   I915_READ(VLV_DPFLIPSTAT));
716 		seq_printf(m, "DPINVGTT:\t%08x\n",
717 			   I915_READ(DPINVGTT));
718 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
719 
720 		for (i = 0; i < 4; i++) {
721 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
722 				   i, I915_READ(GEN8_GT_IMR(i)));
723 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
724 				   i, I915_READ(GEN8_GT_IIR(i)));
725 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
726 				   i, I915_READ(GEN8_GT_IER(i)));
727 		}
728 
729 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
730 			   I915_READ(GEN8_PCU_IMR));
731 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
732 			   I915_READ(GEN8_PCU_IIR));
733 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
734 			   I915_READ(GEN8_PCU_IER));
735 	} else if (INTEL_GEN(dev_priv) >= 8) {
736 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
737 			   I915_READ(GEN8_MASTER_IRQ));
738 
739 		for (i = 0; i < 4; i++) {
740 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
741 				   i, I915_READ(GEN8_GT_IMR(i)));
742 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
743 				   i, I915_READ(GEN8_GT_IIR(i)));
744 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
745 				   i, I915_READ(GEN8_GT_IER(i)));
746 		}
747 
748 		for_each_pipe(dev_priv, pipe) {
749 			enum intel_display_power_domain power_domain;
750 
751 			power_domain = POWER_DOMAIN_PIPE(pipe);
752 			if (!intel_display_power_get_if_enabled(dev_priv,
753 								power_domain)) {
754 				seq_printf(m, "Pipe %c power disabled\n",
755 					   pipe_name(pipe));
756 				continue;
757 			}
758 			seq_printf(m, "Pipe %c IMR:\t%08x\n",
759 				   pipe_name(pipe),
760 				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
761 			seq_printf(m, "Pipe %c IIR:\t%08x\n",
762 				   pipe_name(pipe),
763 				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
764 			seq_printf(m, "Pipe %c IER:\t%08x\n",
765 				   pipe_name(pipe),
766 				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
767 
768 			intel_display_power_put(dev_priv, power_domain);
769 		}
770 
771 		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
772 			   I915_READ(GEN8_DE_PORT_IMR));
773 		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
774 			   I915_READ(GEN8_DE_PORT_IIR));
775 		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
776 			   I915_READ(GEN8_DE_PORT_IER));
777 
778 		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
779 			   I915_READ(GEN8_DE_MISC_IMR));
780 		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
781 			   I915_READ(GEN8_DE_MISC_IIR));
782 		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
783 			   I915_READ(GEN8_DE_MISC_IER));
784 
785 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
786 			   I915_READ(GEN8_PCU_IMR));
787 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
788 			   I915_READ(GEN8_PCU_IIR));
789 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
790 			   I915_READ(GEN8_PCU_IER));
791 	} else if (IS_VALLEYVIEW(dev_priv)) {
792 		seq_printf(m, "Display IER:\t%08x\n",
793 			   I915_READ(VLV_IER));
794 		seq_printf(m, "Display IIR:\t%08x\n",
795 			   I915_READ(VLV_IIR));
796 		seq_printf(m, "Display IIR_RW:\t%08x\n",
797 			   I915_READ(VLV_IIR_RW));
798 		seq_printf(m, "Display IMR:\t%08x\n",
799 			   I915_READ(VLV_IMR));
800 		for_each_pipe(dev_priv, pipe) {
801 			enum intel_display_power_domain power_domain;
802 
803 			power_domain = POWER_DOMAIN_PIPE(pipe);
804 			if (!intel_display_power_get_if_enabled(dev_priv,
805 								power_domain)) {
806 				seq_printf(m, "Pipe %c power disabled\n",
807 					   pipe_name(pipe));
808 				continue;
809 			}
810 
811 			seq_printf(m, "Pipe %c stat:\t%08x\n",
812 				   pipe_name(pipe),
813 				   I915_READ(PIPESTAT(pipe)));
814 			intel_display_power_put(dev_priv, power_domain);
815 		}
816 
817 		seq_printf(m, "Master IER:\t%08x\n",
818 			   I915_READ(VLV_MASTER_IER));
819 
820 		seq_printf(m, "Render IER:\t%08x\n",
821 			   I915_READ(GTIER));
822 		seq_printf(m, "Render IIR:\t%08x\n",
823 			   I915_READ(GTIIR));
824 		seq_printf(m, "Render IMR:\t%08x\n",
825 			   I915_READ(GTIMR));
826 
827 		seq_printf(m, "PM IER:\t\t%08x\n",
828 			   I915_READ(GEN6_PMIER));
829 		seq_printf(m, "PM IIR:\t\t%08x\n",
830 			   I915_READ(GEN6_PMIIR));
831 		seq_printf(m, "PM IMR:\t\t%08x\n",
832 			   I915_READ(GEN6_PMIMR));
833 
834 		seq_printf(m, "Port hotplug:\t%08x\n",
835 			   I915_READ(PORT_HOTPLUG_EN));
836 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
837 			   I915_READ(VLV_DPFLIPSTAT));
838 		seq_printf(m, "DPINVGTT:\t%08x\n",
839 			   I915_READ(DPINVGTT));
840 
841 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
842 		seq_printf(m, "Interrupt enable:    %08x\n",
843 			   I915_READ(IER));
844 		seq_printf(m, "Interrupt identity:  %08x\n",
845 			   I915_READ(IIR));
846 		seq_printf(m, "Interrupt mask:      %08x\n",
847 			   I915_READ(IMR));
848 		for_each_pipe(dev_priv, pipe)
849 			seq_printf(m, "Pipe %c stat:         %08x\n",
850 				   pipe_name(pipe),
851 				   I915_READ(PIPESTAT(pipe)));
852 	} else {
853 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
854 			   I915_READ(DEIER));
855 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
856 			   I915_READ(DEIIR));
857 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
858 			   I915_READ(DEIMR));
859 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
860 			   I915_READ(SDEIER));
861 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
862 			   I915_READ(SDEIIR));
863 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
864 			   I915_READ(SDEIMR));
865 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
866 			   I915_READ(GTIER));
867 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
868 			   I915_READ(GTIIR));
869 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
870 			   I915_READ(GTIMR));
871 	}
872 	for_each_engine(engine, dev_priv, id) {
873 		if (INTEL_GEN(dev_priv) >= 6) {
874 			seq_printf(m,
875 				   "Graphics Interrupt mask (%s):	%08x\n",
876 				   engine->name, I915_READ_IMR(engine));
877 		}
878 		i915_ring_seqno_info(m, engine);
879 	}
880 	intel_runtime_pm_put(dev_priv);
881 
882 	return 0;
883 }
884 
885 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
886 {
887 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
888 	struct drm_device *dev = &dev_priv->drm;
889 	int i, ret;
890 
891 	ret = mutex_lock_interruptible(&dev->struct_mutex);
892 	if (ret)
893 		return ret;
894 
895 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
896 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
897 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
898 
899 		seq_printf(m, "Fence %d, pin count = %d, object = ",
900 			   i, dev_priv->fence_regs[i].pin_count);
901 		if (!vma)
902 			seq_puts(m, "unused");
903 		else
904 			describe_obj(m, vma->obj);
905 		seq_putc(m, '\n');
906 	}
907 
908 	mutex_unlock(&dev->struct_mutex);
909 	return 0;
910 }
911 
912 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
913 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
914 			      size_t count, loff_t *pos)
915 {
916 	struct i915_gpu_state *error = file->private_data;
917 	struct drm_i915_error_state_buf str;
918 	ssize_t ret;
919 	loff_t tmp;
920 
921 	if (!error)
922 		return 0;
923 
924 	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
925 	if (ret)
926 		return ret;
927 
928 	ret = i915_error_state_to_str(&str, error);
929 	if (ret)
930 		goto out;
931 
932 	tmp = 0;
933 	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
934 	if (ret < 0)
935 		goto out;
936 
937 	*pos = str.start + ret;
938 out:
939 	i915_error_state_buf_release(&str);
940 	return ret;
941 }
942 
943 static int gpu_state_release(struct inode *inode, struct file *file)
944 {
945 	i915_gpu_state_put(file->private_data);
946 	return 0;
947 }
948 
949 static int i915_gpu_info_open(struct inode *inode, struct file *file)
950 {
951 	struct drm_i915_private *i915 = inode->i_private;
952 	struct i915_gpu_state *gpu;
953 
954 	intel_runtime_pm_get(i915);
955 	gpu = i915_capture_gpu_state(i915);
956 	intel_runtime_pm_put(i915);
957 	if (!gpu)
958 		return -ENOMEM;
959 
960 	file->private_data = gpu;
961 	return 0;
962 }
963 
964 static const struct file_operations i915_gpu_info_fops = {
965 	.owner = THIS_MODULE,
966 	.open = i915_gpu_info_open,
967 	.read = gpu_state_read,
968 	.llseek = default_llseek,
969 	.release = gpu_state_release,
970 };
971 
972 static ssize_t
973 i915_error_state_write(struct file *filp,
974 		       const char __user *ubuf,
975 		       size_t cnt,
976 		       loff_t *ppos)
977 {
978 	struct i915_gpu_state *error = filp->private_data;
979 
980 	if (!error)
981 		return 0;
982 
983 	DRM_DEBUG_DRIVER("Resetting error state\n");
984 	i915_reset_error_state(error->i915);
985 
986 	return cnt;
987 }
988 
989 static int i915_error_state_open(struct inode *inode, struct file *file)
990 {
991 	file->private_data = i915_first_error_state(inode->i_private);
992 	return 0;
993 }
994 
995 static const struct file_operations i915_error_state_fops = {
996 	.owner = THIS_MODULE,
997 	.open = i915_error_state_open,
998 	.read = gpu_state_read,
999 	.write = i915_error_state_write,
1000 	.llseek = default_llseek,
1001 	.release = gpu_state_release,
1002 };
1003 #endif
1004 
1005 static int
1006 i915_next_seqno_set(void *data, u64 val)
1007 {
1008 	struct drm_i915_private *dev_priv = data;
1009 	struct drm_device *dev = &dev_priv->drm;
1010 	int ret;
1011 
1012 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1013 	if (ret)
1014 		return ret;
1015 
1016 	ret = i915_gem_set_global_seqno(dev, val);
1017 	mutex_unlock(&dev->struct_mutex);
1018 
1019 	return ret;
1020 }
1021 
1022 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1023 			NULL, i915_next_seqno_set,
1024 			"0x%llx\n");
1025 
1026 static int i915_frequency_info(struct seq_file *m, void *unused)
1027 {
1028 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1029 	int ret = 0;
1030 
1031 	intel_runtime_pm_get(dev_priv);
1032 
1033 	if (IS_GEN5(dev_priv)) {
1034 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1035 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1036 
1037 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1038 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1039 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1040 			   MEMSTAT_VID_SHIFT);
1041 		seq_printf(m, "Current P-state: %d\n",
1042 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1043 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1044 		u32 freq_sts;
1045 
1046 		mutex_lock(&dev_priv->rps.hw_lock);
1047 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1048 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1049 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1050 
1051 		seq_printf(m, "actual GPU freq: %d MHz\n",
1052 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1053 
1054 		seq_printf(m, "current GPU freq: %d MHz\n",
1055 			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1056 
1057 		seq_printf(m, "max GPU freq: %d MHz\n",
1058 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1059 
1060 		seq_printf(m, "min GPU freq: %d MHz\n",
1061 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1062 
1063 		seq_printf(m, "idle GPU freq: %d MHz\n",
1064 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1065 
1066 		seq_printf(m,
1067 			   "efficient (RPe) frequency: %d MHz\n",
1068 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1069 		mutex_unlock(&dev_priv->rps.hw_lock);
1070 	} else if (INTEL_GEN(dev_priv) >= 6) {
1071 		u32 rp_state_limits;
1072 		u32 gt_perf_status;
1073 		u32 rp_state_cap;
1074 		u32 rpmodectl, rpinclimit, rpdeclimit;
1075 		u32 rpstat, cagf, reqf;
1076 		u32 rpupei, rpcurup, rpprevup;
1077 		u32 rpdownei, rpcurdown, rpprevdown;
1078 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1079 		int max_freq;
1080 
1081 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1082 		if (IS_GEN9_LP(dev_priv)) {
1083 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1084 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1085 		} else {
1086 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1087 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1088 		}
1089 
1090 		/* RPSTAT1 is in the GT power well */
1091 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1092 
1093 		reqf = I915_READ(GEN6_RPNSWREQ);
1094 		if (INTEL_GEN(dev_priv) >= 9)
1095 			reqf >>= 23;
1096 		else {
1097 			reqf &= ~GEN6_TURBO_DISABLE;
1098 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1099 				reqf >>= 24;
1100 			else
1101 				reqf >>= 25;
1102 		}
1103 		reqf = intel_gpu_freq(dev_priv, reqf);
1104 
1105 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1106 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1107 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1108 
1109 		rpstat = I915_READ(GEN6_RPSTAT1);
1110 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1111 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1112 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1113 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1114 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1115 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1116 		if (INTEL_GEN(dev_priv) >= 9)
1117 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1118 		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1119 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1120 		else
1121 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1122 		cagf = intel_gpu_freq(dev_priv, cagf);
1123 
1124 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1125 
1126 		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1127 			pm_ier = I915_READ(GEN6_PMIER);
1128 			pm_imr = I915_READ(GEN6_PMIMR);
1129 			pm_isr = I915_READ(GEN6_PMISR);
1130 			pm_iir = I915_READ(GEN6_PMIIR);
1131 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1132 		} else {
1133 			pm_ier = I915_READ(GEN8_GT_IER(2));
1134 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1135 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1136 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1137 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1138 		}
1139 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1140 			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1141 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1142 			   dev_priv->rps.pm_intrmsk_mbz);
1143 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1144 		seq_printf(m, "Render p-state ratio: %d\n",
1145 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1146 		seq_printf(m, "Render p-state VID: %d\n",
1147 			   gt_perf_status & 0xff);
1148 		seq_printf(m, "Render p-state limit: %d\n",
1149 			   rp_state_limits & 0xff);
1150 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1151 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1152 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1153 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1154 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1155 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1156 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1157 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1158 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1159 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1160 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1161 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1162 		seq_printf(m, "Up threshold: %d%%\n",
1163 			   dev_priv->rps.up_threshold);
1164 
1165 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1166 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1167 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1168 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1169 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1170 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1171 		seq_printf(m, "Down threshold: %d%%\n",
1172 			   dev_priv->rps.down_threshold);
1173 
1174 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1175 			    rp_state_cap >> 16) & 0xff;
1176 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1177 			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1178 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1179 			   intel_gpu_freq(dev_priv, max_freq));
1180 
1181 		max_freq = (rp_state_cap & 0xff00) >> 8;
1182 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1183 			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1184 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1185 			   intel_gpu_freq(dev_priv, max_freq));
1186 
1187 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1188 			    rp_state_cap >> 0) & 0xff;
1189 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1190 			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1191 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1192 			   intel_gpu_freq(dev_priv, max_freq));
1193 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1194 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1195 
1196 		seq_printf(m, "Current freq: %d MHz\n",
1197 			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1198 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1199 		seq_printf(m, "Idle freq: %d MHz\n",
1200 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1201 		seq_printf(m, "Min freq: %d MHz\n",
1202 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1203 		seq_printf(m, "Boost freq: %d MHz\n",
1204 			   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
1205 		seq_printf(m, "Max freq: %d MHz\n",
1206 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1207 		seq_printf(m,
1208 			   "efficient (RPe) frequency: %d MHz\n",
1209 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1210 	} else {
1211 		seq_puts(m, "no P-state info available\n");
1212 	}
1213 
1214 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1215 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1216 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1217 
1218 	intel_runtime_pm_put(dev_priv);
1219 	return ret;
1220 }
1221 
1222 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1223 			       struct seq_file *m,
1224 			       struct intel_instdone *instdone)
1225 {
1226 	int slice;
1227 	int subslice;
1228 
1229 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1230 		   instdone->instdone);
1231 
1232 	if (INTEL_GEN(dev_priv) <= 3)
1233 		return;
1234 
1235 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1236 		   instdone->slice_common);
1237 
1238 	if (INTEL_GEN(dev_priv) <= 6)
1239 		return;
1240 
1241 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1242 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1243 			   slice, subslice, instdone->sampler[slice][subslice]);
1244 
1245 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1246 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1247 			   slice, subslice, instdone->row[slice][subslice]);
1248 }
1249 
1250 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1251 {
1252 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1253 	struct intel_engine_cs *engine;
1254 	u64 acthd[I915_NUM_ENGINES];
1255 	u32 seqno[I915_NUM_ENGINES];
1256 	struct intel_instdone instdone;
1257 	enum intel_engine_id id;
1258 
1259 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1260 		seq_puts(m, "Wedged\n");
1261 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1262 		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1263 	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1264 		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1265 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1266 		seq_puts(m, "Waiter holding struct mutex\n");
1267 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1268 		seq_puts(m, "struct_mutex blocked for reset\n");
1269 
1270 	if (!i915.enable_hangcheck) {
1271 		seq_puts(m, "Hangcheck disabled\n");
1272 		return 0;
1273 	}
1274 
1275 	intel_runtime_pm_get(dev_priv);
1276 
1277 	for_each_engine(engine, dev_priv, id) {
1278 		acthd[id] = intel_engine_get_active_head(engine);
1279 		seqno[id] = intel_engine_get_seqno(engine);
1280 	}
1281 
1282 	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1283 
1284 	intel_runtime_pm_put(dev_priv);
1285 
1286 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1287 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1288 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1289 					    jiffies));
1290 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1291 		seq_puts(m, "Hangcheck active, work pending\n");
1292 	else
1293 		seq_puts(m, "Hangcheck inactive\n");
1294 
1295 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1296 
1297 	for_each_engine(engine, dev_priv, id) {
1298 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
1299 		struct rb_node *rb;
1300 
1301 		seq_printf(m, "%s:\n", engine->name);
1302 		seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
1303 			   engine->hangcheck.seqno, seqno[id],
1304 			   intel_engine_last_submit(engine),
1305 			   engine->timeline->inflight_seqnos);
1306 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1307 			   yesno(intel_engine_has_waiter(engine)),
1308 			   yesno(test_bit(engine->id,
1309 					  &dev_priv->gpu_error.missed_irq_rings)),
1310 			   yesno(engine->hangcheck.stalled));
1311 
1312 		spin_lock_irq(&b->rb_lock);
1313 		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1314 			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1315 
1316 			seq_printf(m, "\t%s [%d] waiting for %x\n",
1317 				   w->tsk->comm, w->tsk->pid, w->seqno);
1318 		}
1319 		spin_unlock_irq(&b->rb_lock);
1320 
1321 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1322 			   (long long)engine->hangcheck.acthd,
1323 			   (long long)acthd[id]);
1324 		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1325 			   hangcheck_action_to_str(engine->hangcheck.action),
1326 			   engine->hangcheck.action,
1327 			   jiffies_to_msecs(jiffies -
1328 					    engine->hangcheck.action_timestamp));
1329 
1330 		if (engine->id == RCS) {
1331 			seq_puts(m, "\tinstdone read =\n");
1332 
1333 			i915_instdone_info(dev_priv, m, &instdone);
1334 
1335 			seq_puts(m, "\tinstdone accu =\n");
1336 
1337 			i915_instdone_info(dev_priv, m,
1338 					   &engine->hangcheck.instdone);
1339 		}
1340 	}
1341 
1342 	return 0;
1343 }
1344 
1345 static int i915_reset_info(struct seq_file *m, void *unused)
1346 {
1347 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1348 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1349 	struct intel_engine_cs *engine;
1350 	enum intel_engine_id id;
1351 
1352 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1353 
1354 	for_each_engine(engine, dev_priv, id) {
1355 		seq_printf(m, "%s = %u\n", engine->name,
1356 			   i915_reset_engine_count(error, engine));
1357 	}
1358 
1359 	return 0;
1360 }
1361 
1362 static int ironlake_drpc_info(struct seq_file *m)
1363 {
1364 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1365 	u32 rgvmodectl, rstdbyctl;
1366 	u16 crstandvid;
1367 
1368 	rgvmodectl = I915_READ(MEMMODECTL);
1369 	rstdbyctl = I915_READ(RSTDBYCTL);
1370 	crstandvid = I915_READ16(CRSTANDVID);
1371 
1372 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1373 	seq_printf(m, "Boost freq: %d\n",
1374 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1375 		   MEMMODE_BOOST_FREQ_SHIFT);
1376 	seq_printf(m, "HW control enabled: %s\n",
1377 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1378 	seq_printf(m, "SW control enabled: %s\n",
1379 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1380 	seq_printf(m, "Gated voltage change: %s\n",
1381 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1382 	seq_printf(m, "Starting frequency: P%d\n",
1383 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1384 	seq_printf(m, "Max P-state: P%d\n",
1385 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1386 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1387 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1388 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1389 	seq_printf(m, "Render standby enabled: %s\n",
1390 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1391 	seq_puts(m, "Current RS state: ");
1392 	switch (rstdbyctl & RSX_STATUS_MASK) {
1393 	case RSX_STATUS_ON:
1394 		seq_puts(m, "on\n");
1395 		break;
1396 	case RSX_STATUS_RC1:
1397 		seq_puts(m, "RC1\n");
1398 		break;
1399 	case RSX_STATUS_RC1E:
1400 		seq_puts(m, "RC1E\n");
1401 		break;
1402 	case RSX_STATUS_RS1:
1403 		seq_puts(m, "RS1\n");
1404 		break;
1405 	case RSX_STATUS_RS2:
1406 		seq_puts(m, "RS2 (RC6)\n");
1407 		break;
1408 	case RSX_STATUS_RS3:
1409 		seq_puts(m, "RC3 (RC6+)\n");
1410 		break;
1411 	default:
1412 		seq_puts(m, "unknown\n");
1413 		break;
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 static int i915_forcewake_domains(struct seq_file *m, void *data)
1420 {
1421 	struct drm_i915_private *i915 = node_to_i915(m->private);
1422 	struct intel_uncore_forcewake_domain *fw_domain;
1423 	unsigned int tmp;
1424 
1425 	for_each_fw_domain(fw_domain, i915, tmp)
1426 		seq_printf(m, "%s.wake_count = %u\n",
1427 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1428 			   READ_ONCE(fw_domain->wake_count));
1429 
1430 	return 0;
1431 }
1432 
1433 static void print_rc6_res(struct seq_file *m,
1434 			  const char *title,
1435 			  const i915_reg_t reg)
1436 {
1437 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1438 
1439 	seq_printf(m, "%s %u (%llu us)\n",
1440 		   title, I915_READ(reg),
1441 		   intel_rc6_residency_us(dev_priv, reg));
1442 }
1443 
1444 static int vlv_drpc_info(struct seq_file *m)
1445 {
1446 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1447 	u32 rpmodectl1, rcctl1, pw_status;
1448 
1449 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1450 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1451 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1452 
1453 	seq_printf(m, "Video Turbo Mode: %s\n",
1454 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1455 	seq_printf(m, "Turbo enabled: %s\n",
1456 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1457 	seq_printf(m, "HW control enabled: %s\n",
1458 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1459 	seq_printf(m, "SW control enabled: %s\n",
1460 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1461 			  GEN6_RP_MEDIA_SW_MODE));
1462 	seq_printf(m, "RC6 Enabled: %s\n",
1463 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1464 					GEN6_RC_CTL_EI_MODE(1))));
1465 	seq_printf(m, "Render Power Well: %s\n",
1466 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1467 	seq_printf(m, "Media Power Well: %s\n",
1468 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1469 
1470 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1471 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1472 
1473 	return i915_forcewake_domains(m, NULL);
1474 }
1475 
1476 static int gen6_drpc_info(struct seq_file *m)
1477 {
1478 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1479 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1480 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1481 	unsigned forcewake_count;
1482 	int count = 0;
1483 
1484 	forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
1485 	if (forcewake_count) {
1486 		seq_puts(m, "RC information inaccurate because somebody "
1487 			    "holds a forcewake reference \n");
1488 	} else {
1489 		/* NB: we cannot use forcewake, else we read the wrong values */
1490 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1491 			udelay(10);
1492 		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1493 	}
1494 
1495 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1496 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1497 
1498 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1499 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1500 	if (INTEL_GEN(dev_priv) >= 9) {
1501 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1502 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1503 	}
1504 
1505 	mutex_lock(&dev_priv->rps.hw_lock);
1506 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1507 	mutex_unlock(&dev_priv->rps.hw_lock);
1508 
1509 	seq_printf(m, "Video Turbo Mode: %s\n",
1510 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1511 	seq_printf(m, "HW control enabled: %s\n",
1512 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1513 	seq_printf(m, "SW control enabled: %s\n",
1514 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1515 			  GEN6_RP_MEDIA_SW_MODE));
1516 	seq_printf(m, "RC1e Enabled: %s\n",
1517 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1518 	seq_printf(m, "RC6 Enabled: %s\n",
1519 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1520 	if (INTEL_GEN(dev_priv) >= 9) {
1521 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1522 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1523 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1524 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1525 	}
1526 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1527 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1528 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1529 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1530 	seq_puts(m, "Current RC state: ");
1531 	switch (gt_core_status & GEN6_RCn_MASK) {
1532 	case GEN6_RC0:
1533 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1534 			seq_puts(m, "Core Power Down\n");
1535 		else
1536 			seq_puts(m, "on\n");
1537 		break;
1538 	case GEN6_RC3:
1539 		seq_puts(m, "RC3\n");
1540 		break;
1541 	case GEN6_RC6:
1542 		seq_puts(m, "RC6\n");
1543 		break;
1544 	case GEN6_RC7:
1545 		seq_puts(m, "RC7\n");
1546 		break;
1547 	default:
1548 		seq_puts(m, "Unknown\n");
1549 		break;
1550 	}
1551 
1552 	seq_printf(m, "Core Power Down: %s\n",
1553 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1554 	if (INTEL_GEN(dev_priv) >= 9) {
1555 		seq_printf(m, "Render Power Well: %s\n",
1556 			(gen9_powergate_status &
1557 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1558 		seq_printf(m, "Media Power Well: %s\n",
1559 			(gen9_powergate_status &
1560 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1561 	}
1562 
1563 	/* Not exactly sure what this is */
1564 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1565 		      GEN6_GT_GFX_RC6_LOCKED);
1566 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1567 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1568 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1569 
1570 	seq_printf(m, "RC6   voltage: %dmV\n",
1571 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1572 	seq_printf(m, "RC6+  voltage: %dmV\n",
1573 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1574 	seq_printf(m, "RC6++ voltage: %dmV\n",
1575 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1576 	return i915_forcewake_domains(m, NULL);
1577 }
1578 
1579 static int i915_drpc_info(struct seq_file *m, void *unused)
1580 {
1581 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1582 	int err;
1583 
1584 	intel_runtime_pm_get(dev_priv);
1585 
1586 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1587 		err = vlv_drpc_info(m);
1588 	else if (INTEL_GEN(dev_priv) >= 6)
1589 		err = gen6_drpc_info(m);
1590 	else
1591 		err = ironlake_drpc_info(m);
1592 
1593 	intel_runtime_pm_put(dev_priv);
1594 
1595 	return err;
1596 }
1597 
1598 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1599 {
1600 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1601 
1602 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1603 		   dev_priv->fb_tracking.busy_bits);
1604 
1605 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1606 		   dev_priv->fb_tracking.flip_bits);
1607 
1608 	return 0;
1609 }
1610 
1611 static int i915_fbc_status(struct seq_file *m, void *unused)
1612 {
1613 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1614 
1615 	if (!HAS_FBC(dev_priv)) {
1616 		seq_puts(m, "FBC unsupported on this chipset\n");
1617 		return 0;
1618 	}
1619 
1620 	intel_runtime_pm_get(dev_priv);
1621 	mutex_lock(&dev_priv->fbc.lock);
1622 
1623 	if (intel_fbc_is_active(dev_priv))
1624 		seq_puts(m, "FBC enabled\n");
1625 	else
1626 		seq_printf(m, "FBC disabled: %s\n",
1627 			   dev_priv->fbc.no_fbc_reason);
1628 
1629 	if (intel_fbc_is_active(dev_priv)) {
1630 		u32 mask;
1631 
1632 		if (INTEL_GEN(dev_priv) >= 8)
1633 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1634 		else if (INTEL_GEN(dev_priv) >= 7)
1635 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1636 		else if (INTEL_GEN(dev_priv) >= 5)
1637 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1638 		else if (IS_G4X(dev_priv))
1639 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1640 		else
1641 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1642 							FBC_STAT_COMPRESSED);
1643 
1644 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1645 	}
1646 
1647 	mutex_unlock(&dev_priv->fbc.lock);
1648 	intel_runtime_pm_put(dev_priv);
1649 
1650 	return 0;
1651 }
1652 
1653 static int i915_fbc_false_color_get(void *data, u64 *val)
1654 {
1655 	struct drm_i915_private *dev_priv = data;
1656 
1657 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1658 		return -ENODEV;
1659 
1660 	*val = dev_priv->fbc.false_color;
1661 
1662 	return 0;
1663 }
1664 
1665 static int i915_fbc_false_color_set(void *data, u64 val)
1666 {
1667 	struct drm_i915_private *dev_priv = data;
1668 	u32 reg;
1669 
1670 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1671 		return -ENODEV;
1672 
1673 	mutex_lock(&dev_priv->fbc.lock);
1674 
1675 	reg = I915_READ(ILK_DPFC_CONTROL);
1676 	dev_priv->fbc.false_color = val;
1677 
1678 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1679 		   (reg | FBC_CTL_FALSE_COLOR) :
1680 		   (reg & ~FBC_CTL_FALSE_COLOR));
1681 
1682 	mutex_unlock(&dev_priv->fbc.lock);
1683 	return 0;
1684 }
1685 
1686 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1687 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1688 			"%llu\n");
1689 
1690 static int i915_ips_status(struct seq_file *m, void *unused)
1691 {
1692 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1693 
1694 	if (!HAS_IPS(dev_priv)) {
1695 		seq_puts(m, "not supported\n");
1696 		return 0;
1697 	}
1698 
1699 	intel_runtime_pm_get(dev_priv);
1700 
1701 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1702 		   yesno(i915.enable_ips));
1703 
1704 	if (INTEL_GEN(dev_priv) >= 8) {
1705 		seq_puts(m, "Currently: unknown\n");
1706 	} else {
1707 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1708 			seq_puts(m, "Currently: enabled\n");
1709 		else
1710 			seq_puts(m, "Currently: disabled\n");
1711 	}
1712 
1713 	intel_runtime_pm_put(dev_priv);
1714 
1715 	return 0;
1716 }
1717 
1718 static int i915_sr_status(struct seq_file *m, void *unused)
1719 {
1720 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1721 	bool sr_enabled = false;
1722 
1723 	intel_runtime_pm_get(dev_priv);
1724 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1725 
1726 	if (INTEL_GEN(dev_priv) >= 9)
1727 		/* no global SR status; inspect per-plane WM */;
1728 	else if (HAS_PCH_SPLIT(dev_priv))
1729 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1730 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1731 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1732 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1733 	else if (IS_I915GM(dev_priv))
1734 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1735 	else if (IS_PINEVIEW(dev_priv))
1736 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1737 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1738 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1739 
1740 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1741 	intel_runtime_pm_put(dev_priv);
1742 
1743 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1744 
1745 	return 0;
1746 }
1747 
1748 static int i915_emon_status(struct seq_file *m, void *unused)
1749 {
1750 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1751 	struct drm_device *dev = &dev_priv->drm;
1752 	unsigned long temp, chipset, gfx;
1753 	int ret;
1754 
1755 	if (!IS_GEN5(dev_priv))
1756 		return -ENODEV;
1757 
1758 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1759 	if (ret)
1760 		return ret;
1761 
1762 	temp = i915_mch_val(dev_priv);
1763 	chipset = i915_chipset_val(dev_priv);
1764 	gfx = i915_gfx_val(dev_priv);
1765 	mutex_unlock(&dev->struct_mutex);
1766 
1767 	seq_printf(m, "GMCH temp: %ld\n", temp);
1768 	seq_printf(m, "Chipset power: %ld\n", chipset);
1769 	seq_printf(m, "GFX power: %ld\n", gfx);
1770 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1771 
1772 	return 0;
1773 }
1774 
1775 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1776 {
1777 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1778 	int ret = 0;
1779 	int gpu_freq, ia_freq;
1780 	unsigned int max_gpu_freq, min_gpu_freq;
1781 
1782 	if (!HAS_LLC(dev_priv)) {
1783 		seq_puts(m, "unsupported on this chipset\n");
1784 		return 0;
1785 	}
1786 
1787 	intel_runtime_pm_get(dev_priv);
1788 
1789 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1790 	if (ret)
1791 		goto out;
1792 
1793 	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
1794 		/* Convert GT frequency to 50 HZ units */
1795 		min_gpu_freq =
1796 			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1797 		max_gpu_freq =
1798 			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1799 	} else {
1800 		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1801 		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1802 	}
1803 
1804 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1805 
1806 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1807 		ia_freq = gpu_freq;
1808 		sandybridge_pcode_read(dev_priv,
1809 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1810 				       &ia_freq);
1811 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1812 			   intel_gpu_freq(dev_priv, (gpu_freq *
1813 						     (IS_GEN9_BC(dev_priv) ||
1814 						      IS_CANNONLAKE(dev_priv) ?
1815 						      GEN9_FREQ_SCALER : 1))),
1816 			   ((ia_freq >> 0) & 0xff) * 100,
1817 			   ((ia_freq >> 8) & 0xff) * 100);
1818 	}
1819 
1820 	mutex_unlock(&dev_priv->rps.hw_lock);
1821 
1822 out:
1823 	intel_runtime_pm_put(dev_priv);
1824 	return ret;
1825 }
1826 
1827 static int i915_opregion(struct seq_file *m, void *unused)
1828 {
1829 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1830 	struct drm_device *dev = &dev_priv->drm;
1831 	struct intel_opregion *opregion = &dev_priv->opregion;
1832 	int ret;
1833 
1834 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1835 	if (ret)
1836 		goto out;
1837 
1838 	if (opregion->header)
1839 		seq_write(m, opregion->header, OPREGION_SIZE);
1840 
1841 	mutex_unlock(&dev->struct_mutex);
1842 
1843 out:
1844 	return 0;
1845 }
1846 
1847 static int i915_vbt(struct seq_file *m, void *unused)
1848 {
1849 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1850 
1851 	if (opregion->vbt)
1852 		seq_write(m, opregion->vbt, opregion->vbt_size);
1853 
1854 	return 0;
1855 }
1856 
1857 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1858 {
1859 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1860 	struct drm_device *dev = &dev_priv->drm;
1861 	struct intel_framebuffer *fbdev_fb = NULL;
1862 	struct drm_framebuffer *drm_fb;
1863 	int ret;
1864 
1865 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1866 	if (ret)
1867 		return ret;
1868 
1869 #ifdef CONFIG_DRM_FBDEV_EMULATION
1870 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1871 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1872 
1873 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1874 			   fbdev_fb->base.width,
1875 			   fbdev_fb->base.height,
1876 			   fbdev_fb->base.format->depth,
1877 			   fbdev_fb->base.format->cpp[0] * 8,
1878 			   fbdev_fb->base.modifier,
1879 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1880 		describe_obj(m, fbdev_fb->obj);
1881 		seq_putc(m, '\n');
1882 	}
1883 #endif
1884 
1885 	mutex_lock(&dev->mode_config.fb_lock);
1886 	drm_for_each_fb(drm_fb, dev) {
1887 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1888 		if (fb == fbdev_fb)
1889 			continue;
1890 
1891 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1892 			   fb->base.width,
1893 			   fb->base.height,
1894 			   fb->base.format->depth,
1895 			   fb->base.format->cpp[0] * 8,
1896 			   fb->base.modifier,
1897 			   drm_framebuffer_read_refcount(&fb->base));
1898 		describe_obj(m, fb->obj);
1899 		seq_putc(m, '\n');
1900 	}
1901 	mutex_unlock(&dev->mode_config.fb_lock);
1902 	mutex_unlock(&dev->struct_mutex);
1903 
1904 	return 0;
1905 }
1906 
1907 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1908 {
1909 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1910 		   ring->space, ring->head, ring->tail);
1911 }
1912 
1913 static int i915_context_status(struct seq_file *m, void *unused)
1914 {
1915 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1916 	struct drm_device *dev = &dev_priv->drm;
1917 	struct intel_engine_cs *engine;
1918 	struct i915_gem_context *ctx;
1919 	enum intel_engine_id id;
1920 	int ret;
1921 
1922 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1923 	if (ret)
1924 		return ret;
1925 
1926 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1927 		seq_printf(m, "HW context %u ", ctx->hw_id);
1928 		if (ctx->pid) {
1929 			struct task_struct *task;
1930 
1931 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1932 			if (task) {
1933 				seq_printf(m, "(%s [%d]) ",
1934 					   task->comm, task->pid);
1935 				put_task_struct(task);
1936 			}
1937 		} else if (IS_ERR(ctx->file_priv)) {
1938 			seq_puts(m, "(deleted) ");
1939 		} else {
1940 			seq_puts(m, "(kernel) ");
1941 		}
1942 
1943 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1944 		seq_putc(m, '\n');
1945 
1946 		for_each_engine(engine, dev_priv, id) {
1947 			struct intel_context *ce = &ctx->engine[engine->id];
1948 
1949 			seq_printf(m, "%s: ", engine->name);
1950 			seq_putc(m, ce->initialised ? 'I' : 'i');
1951 			if (ce->state)
1952 				describe_obj(m, ce->state->obj);
1953 			if (ce->ring)
1954 				describe_ctx_ring(m, ce->ring);
1955 			seq_putc(m, '\n');
1956 		}
1957 
1958 		seq_putc(m, '\n');
1959 	}
1960 
1961 	mutex_unlock(&dev->struct_mutex);
1962 
1963 	return 0;
1964 }
1965 
1966 static void i915_dump_lrc_obj(struct seq_file *m,
1967 			      struct i915_gem_context *ctx,
1968 			      struct intel_engine_cs *engine)
1969 {
1970 	struct i915_vma *vma = ctx->engine[engine->id].state;
1971 	struct page *page;
1972 	int j;
1973 
1974 	seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
1975 
1976 	if (!vma) {
1977 		seq_puts(m, "\tFake context\n");
1978 		return;
1979 	}
1980 
1981 	if (vma->flags & I915_VMA_GLOBAL_BIND)
1982 		seq_printf(m, "\tBound in GGTT at 0x%08x\n",
1983 			   i915_ggtt_offset(vma));
1984 
1985 	if (i915_gem_object_pin_pages(vma->obj)) {
1986 		seq_puts(m, "\tFailed to get pages for context object\n\n");
1987 		return;
1988 	}
1989 
1990 	page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
1991 	if (page) {
1992 		u32 *reg_state = kmap_atomic(page);
1993 
1994 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1995 			seq_printf(m,
1996 				   "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1997 				   j * 4,
1998 				   reg_state[j], reg_state[j + 1],
1999 				   reg_state[j + 2], reg_state[j + 3]);
2000 		}
2001 		kunmap_atomic(reg_state);
2002 	}
2003 
2004 	i915_gem_object_unpin_pages(vma->obj);
2005 	seq_putc(m, '\n');
2006 }
2007 
2008 static int i915_dump_lrc(struct seq_file *m, void *unused)
2009 {
2010 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2011 	struct drm_device *dev = &dev_priv->drm;
2012 	struct intel_engine_cs *engine;
2013 	struct i915_gem_context *ctx;
2014 	enum intel_engine_id id;
2015 	int ret;
2016 
2017 	if (!i915.enable_execlists) {
2018 		seq_printf(m, "Logical Ring Contexts are disabled\n");
2019 		return 0;
2020 	}
2021 
2022 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2023 	if (ret)
2024 		return ret;
2025 
2026 	list_for_each_entry(ctx, &dev_priv->contexts.list, link)
2027 		for_each_engine(engine, dev_priv, id)
2028 			i915_dump_lrc_obj(m, ctx, engine);
2029 
2030 	mutex_unlock(&dev->struct_mutex);
2031 
2032 	return 0;
2033 }
2034 
2035 static const char *swizzle_string(unsigned swizzle)
2036 {
2037 	switch (swizzle) {
2038 	case I915_BIT_6_SWIZZLE_NONE:
2039 		return "none";
2040 	case I915_BIT_6_SWIZZLE_9:
2041 		return "bit9";
2042 	case I915_BIT_6_SWIZZLE_9_10:
2043 		return "bit9/bit10";
2044 	case I915_BIT_6_SWIZZLE_9_11:
2045 		return "bit9/bit11";
2046 	case I915_BIT_6_SWIZZLE_9_10_11:
2047 		return "bit9/bit10/bit11";
2048 	case I915_BIT_6_SWIZZLE_9_17:
2049 		return "bit9/bit17";
2050 	case I915_BIT_6_SWIZZLE_9_10_17:
2051 		return "bit9/bit10/bit17";
2052 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2053 		return "unknown";
2054 	}
2055 
2056 	return "bug";
2057 }
2058 
2059 static int i915_swizzle_info(struct seq_file *m, void *data)
2060 {
2061 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2062 
2063 	intel_runtime_pm_get(dev_priv);
2064 
2065 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2066 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2067 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2068 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2069 
2070 	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2071 		seq_printf(m, "DDC = 0x%08x\n",
2072 			   I915_READ(DCC));
2073 		seq_printf(m, "DDC2 = 0x%08x\n",
2074 			   I915_READ(DCC2));
2075 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2076 			   I915_READ16(C0DRB3));
2077 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2078 			   I915_READ16(C1DRB3));
2079 	} else if (INTEL_GEN(dev_priv) >= 6) {
2080 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2081 			   I915_READ(MAD_DIMM_C0));
2082 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2083 			   I915_READ(MAD_DIMM_C1));
2084 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2085 			   I915_READ(MAD_DIMM_C2));
2086 		seq_printf(m, "TILECTL = 0x%08x\n",
2087 			   I915_READ(TILECTL));
2088 		if (INTEL_GEN(dev_priv) >= 8)
2089 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2090 				   I915_READ(GAMTARBMODE));
2091 		else
2092 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2093 				   I915_READ(ARB_MODE));
2094 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2095 			   I915_READ(DISP_ARB_CTL));
2096 	}
2097 
2098 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2099 		seq_puts(m, "L-shaped memory detected\n");
2100 
2101 	intel_runtime_pm_put(dev_priv);
2102 
2103 	return 0;
2104 }
2105 
2106 static int per_file_ctx(int id, void *ptr, void *data)
2107 {
2108 	struct i915_gem_context *ctx = ptr;
2109 	struct seq_file *m = data;
2110 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2111 
2112 	if (!ppgtt) {
2113 		seq_printf(m, "  no ppgtt for context %d\n",
2114 			   ctx->user_handle);
2115 		return 0;
2116 	}
2117 
2118 	if (i915_gem_context_is_default(ctx))
2119 		seq_puts(m, "  default context:\n");
2120 	else
2121 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2122 	ppgtt->debug_dump(ppgtt, m);
2123 
2124 	return 0;
2125 }
2126 
2127 static void gen8_ppgtt_info(struct seq_file *m,
2128 			    struct drm_i915_private *dev_priv)
2129 {
2130 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2131 	struct intel_engine_cs *engine;
2132 	enum intel_engine_id id;
2133 	int i;
2134 
2135 	if (!ppgtt)
2136 		return;
2137 
2138 	for_each_engine(engine, dev_priv, id) {
2139 		seq_printf(m, "%s\n", engine->name);
2140 		for (i = 0; i < 4; i++) {
2141 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2142 			pdp <<= 32;
2143 			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2144 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2145 		}
2146 	}
2147 }
2148 
2149 static void gen6_ppgtt_info(struct seq_file *m,
2150 			    struct drm_i915_private *dev_priv)
2151 {
2152 	struct intel_engine_cs *engine;
2153 	enum intel_engine_id id;
2154 
2155 	if (IS_GEN6(dev_priv))
2156 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2157 
2158 	for_each_engine(engine, dev_priv, id) {
2159 		seq_printf(m, "%s\n", engine->name);
2160 		if (IS_GEN7(dev_priv))
2161 			seq_printf(m, "GFX_MODE: 0x%08x\n",
2162 				   I915_READ(RING_MODE_GEN7(engine)));
2163 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2164 			   I915_READ(RING_PP_DIR_BASE(engine)));
2165 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2166 			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2167 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2168 			   I915_READ(RING_PP_DIR_DCLV(engine)));
2169 	}
2170 	if (dev_priv->mm.aliasing_ppgtt) {
2171 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2172 
2173 		seq_puts(m, "aliasing PPGTT:\n");
2174 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2175 
2176 		ppgtt->debug_dump(ppgtt, m);
2177 	}
2178 
2179 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2180 }
2181 
2182 static int i915_ppgtt_info(struct seq_file *m, void *data)
2183 {
2184 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2185 	struct drm_device *dev = &dev_priv->drm;
2186 	struct drm_file *file;
2187 	int ret;
2188 
2189 	mutex_lock(&dev->filelist_mutex);
2190 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2191 	if (ret)
2192 		goto out_unlock;
2193 
2194 	intel_runtime_pm_get(dev_priv);
2195 
2196 	if (INTEL_GEN(dev_priv) >= 8)
2197 		gen8_ppgtt_info(m, dev_priv);
2198 	else if (INTEL_GEN(dev_priv) >= 6)
2199 		gen6_ppgtt_info(m, dev_priv);
2200 
2201 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2202 		struct drm_i915_file_private *file_priv = file->driver_priv;
2203 		struct task_struct *task;
2204 
2205 		task = get_pid_task(file->pid, PIDTYPE_PID);
2206 		if (!task) {
2207 			ret = -ESRCH;
2208 			goto out_rpm;
2209 		}
2210 		seq_printf(m, "\nproc: %s\n", task->comm);
2211 		put_task_struct(task);
2212 		idr_for_each(&file_priv->context_idr, per_file_ctx,
2213 			     (void *)(unsigned long)m);
2214 	}
2215 
2216 out_rpm:
2217 	intel_runtime_pm_put(dev_priv);
2218 	mutex_unlock(&dev->struct_mutex);
2219 out_unlock:
2220 	mutex_unlock(&dev->filelist_mutex);
2221 	return ret;
2222 }
2223 
2224 static int count_irq_waiters(struct drm_i915_private *i915)
2225 {
2226 	struct intel_engine_cs *engine;
2227 	enum intel_engine_id id;
2228 	int count = 0;
2229 
2230 	for_each_engine(engine, i915, id)
2231 		count += intel_engine_has_waiter(engine);
2232 
2233 	return count;
2234 }
2235 
2236 static const char *rps_power_to_str(unsigned int power)
2237 {
2238 	static const char * const strings[] = {
2239 		[LOW_POWER] = "low power",
2240 		[BETWEEN] = "mixed",
2241 		[HIGH_POWER] = "high power",
2242 	};
2243 
2244 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2245 		return "unknown";
2246 
2247 	return strings[power];
2248 }
2249 
2250 static int i915_rps_boost_info(struct seq_file *m, void *data)
2251 {
2252 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2253 	struct drm_device *dev = &dev_priv->drm;
2254 	struct drm_file *file;
2255 
2256 	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2257 	seq_printf(m, "GPU busy? %s [%d requests]\n",
2258 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2259 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2260 	seq_printf(m, "Boosts outstanding? %d\n",
2261 		   atomic_read(&dev_priv->rps.num_waiters));
2262 	seq_printf(m, "Frequency requested %d\n",
2263 		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
2264 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2265 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2266 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2267 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2268 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2269 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2270 		   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
2271 		   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2272 		   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
2273 
2274 	mutex_lock(&dev->filelist_mutex);
2275 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2276 		struct drm_i915_file_private *file_priv = file->driver_priv;
2277 		struct task_struct *task;
2278 
2279 		rcu_read_lock();
2280 		task = pid_task(file->pid, PIDTYPE_PID);
2281 		seq_printf(m, "%s [%d]: %d boosts\n",
2282 			   task ? task->comm : "<unknown>",
2283 			   task ? task->pid : -1,
2284 			   atomic_read(&file_priv->rps.boosts));
2285 		rcu_read_unlock();
2286 	}
2287 	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2288 		   atomic_read(&dev_priv->rps.boosts));
2289 	mutex_unlock(&dev->filelist_mutex);
2290 
2291 	if (INTEL_GEN(dev_priv) >= 6 &&
2292 	    dev_priv->rps.enabled &&
2293 	    dev_priv->gt.active_requests) {
2294 		u32 rpup, rpupei;
2295 		u32 rpdown, rpdownei;
2296 
2297 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2298 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2299 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2300 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2301 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2302 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2303 
2304 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2305 			   rps_power_to_str(dev_priv->rps.power));
2306 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2307 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2308 			   dev_priv->rps.up_threshold);
2309 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2310 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2311 			   dev_priv->rps.down_threshold);
2312 	} else {
2313 		seq_puts(m, "\nRPS Autotuning inactive\n");
2314 	}
2315 
2316 	return 0;
2317 }
2318 
2319 static int i915_llc(struct seq_file *m, void *data)
2320 {
2321 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2322 	const bool edram = INTEL_GEN(dev_priv) > 8;
2323 
2324 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2325 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2326 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2327 
2328 	return 0;
2329 }
2330 
2331 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2332 {
2333 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2334 	struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
2335 
2336 	if (!HAS_HUC_UCODE(dev_priv))
2337 		return 0;
2338 
2339 	seq_puts(m, "HuC firmware status:\n");
2340 	seq_printf(m, "\tpath: %s\n", huc_fw->path);
2341 	seq_printf(m, "\tfetch: %s\n",
2342 		intel_uc_fw_status_repr(huc_fw->fetch_status));
2343 	seq_printf(m, "\tload: %s\n",
2344 		intel_uc_fw_status_repr(huc_fw->load_status));
2345 	seq_printf(m, "\tversion wanted: %d.%d\n",
2346 		huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
2347 	seq_printf(m, "\tversion found: %d.%d\n",
2348 		huc_fw->major_ver_found, huc_fw->minor_ver_found);
2349 	seq_printf(m, "\theader: offset is %d; size = %d\n",
2350 		huc_fw->header_offset, huc_fw->header_size);
2351 	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2352 		huc_fw->ucode_offset, huc_fw->ucode_size);
2353 	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2354 		huc_fw->rsa_offset, huc_fw->rsa_size);
2355 
2356 	intel_runtime_pm_get(dev_priv);
2357 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2358 	intel_runtime_pm_put(dev_priv);
2359 
2360 	return 0;
2361 }
2362 
2363 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2364 {
2365 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2366 	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
2367 	u32 tmp, i;
2368 
2369 	if (!HAS_GUC_UCODE(dev_priv))
2370 		return 0;
2371 
2372 	seq_printf(m, "GuC firmware status:\n");
2373 	seq_printf(m, "\tpath: %s\n",
2374 		guc_fw->path);
2375 	seq_printf(m, "\tfetch: %s\n",
2376 		intel_uc_fw_status_repr(guc_fw->fetch_status));
2377 	seq_printf(m, "\tload: %s\n",
2378 		intel_uc_fw_status_repr(guc_fw->load_status));
2379 	seq_printf(m, "\tversion wanted: %d.%d\n",
2380 		guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
2381 	seq_printf(m, "\tversion found: %d.%d\n",
2382 		guc_fw->major_ver_found, guc_fw->minor_ver_found);
2383 	seq_printf(m, "\theader: offset is %d; size = %d\n",
2384 		guc_fw->header_offset, guc_fw->header_size);
2385 	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2386 		guc_fw->ucode_offset, guc_fw->ucode_size);
2387 	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2388 		guc_fw->rsa_offset, guc_fw->rsa_size);
2389 
2390 	intel_runtime_pm_get(dev_priv);
2391 
2392 	tmp = I915_READ(GUC_STATUS);
2393 
2394 	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2395 	seq_printf(m, "\tBootrom status = 0x%x\n",
2396 		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2397 	seq_printf(m, "\tuKernel status = 0x%x\n",
2398 		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2399 	seq_printf(m, "\tMIA Core status = 0x%x\n",
2400 		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2401 	seq_puts(m, "\nScratch registers:\n");
2402 	for (i = 0; i < 16; i++)
2403 		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2404 
2405 	intel_runtime_pm_put(dev_priv);
2406 
2407 	return 0;
2408 }
2409 
2410 static void i915_guc_log_info(struct seq_file *m,
2411 			      struct drm_i915_private *dev_priv)
2412 {
2413 	struct intel_guc *guc = &dev_priv->guc;
2414 
2415 	seq_puts(m, "\nGuC logging stats:\n");
2416 
2417 	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
2418 		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2419 		   guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2420 
2421 	seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
2422 		   guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2423 		   guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2424 
2425 	seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2426 		   guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2427 		   guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2428 
2429 	seq_printf(m, "\tTotal flush interrupt count: %u\n",
2430 		   guc->log.flush_interrupt_count);
2431 
2432 	seq_printf(m, "\tCapture miss count: %u\n",
2433 		   guc->log.capture_miss_count);
2434 }
2435 
2436 static void i915_guc_client_info(struct seq_file *m,
2437 				 struct drm_i915_private *dev_priv,
2438 				 struct i915_guc_client *client)
2439 {
2440 	struct intel_engine_cs *engine;
2441 	enum intel_engine_id id;
2442 	uint64_t tot = 0;
2443 
2444 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2445 		client->priority, client->stage_id, client->proc_desc_offset);
2446 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
2447 		client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
2448 	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2449 		client->wq_size, client->wq_offset, client->wq_tail);
2450 
2451 	seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2452 
2453 	for_each_engine(engine, dev_priv, id) {
2454 		u64 submissions = client->submissions[id];
2455 		tot += submissions;
2456 		seq_printf(m, "\tSubmissions: %llu %s\n",
2457 				submissions, engine->name);
2458 	}
2459 	seq_printf(m, "\tTotal: %llu\n", tot);
2460 }
2461 
2462 static bool check_guc_submission(struct seq_file *m)
2463 {
2464 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2465 	const struct intel_guc *guc = &dev_priv->guc;
2466 
2467 	if (!guc->execbuf_client) {
2468 		seq_printf(m, "GuC submission %s\n",
2469 			   HAS_GUC_SCHED(dev_priv) ?
2470 			   "disabled" :
2471 			   "not supported");
2472 		return false;
2473 	}
2474 
2475 	return true;
2476 }
2477 
2478 static int i915_guc_info(struct seq_file *m, void *data)
2479 {
2480 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2481 	const struct intel_guc *guc = &dev_priv->guc;
2482 
2483 	if (!check_guc_submission(m))
2484 		return 0;
2485 
2486 	seq_printf(m, "Doorbell map:\n");
2487 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2488 	seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2489 
2490 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2491 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2492 
2493 	i915_guc_log_info(m, dev_priv);
2494 
2495 	/* Add more as required ... */
2496 
2497 	return 0;
2498 }
2499 
2500 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2501 {
2502 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2503 	const struct intel_guc *guc = &dev_priv->guc;
2504 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2505 	struct i915_guc_client *client = guc->execbuf_client;
2506 	unsigned int tmp;
2507 	int index;
2508 
2509 	if (!check_guc_submission(m))
2510 		return 0;
2511 
2512 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2513 		struct intel_engine_cs *engine;
2514 
2515 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2516 			continue;
2517 
2518 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2519 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2520 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2521 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2522 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2523 		seq_printf(m, "\tEngines used: 0x%x\n",
2524 			   desc->engines_used);
2525 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2526 			   desc->db_trigger_phy,
2527 			   desc->db_trigger_cpu,
2528 			   desc->db_trigger_uk);
2529 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2530 			   desc->process_desc);
2531 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2532 			   desc->wq_addr, desc->wq_size);
2533 		seq_putc(m, '\n');
2534 
2535 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2536 			u32 guc_engine_id = engine->guc_id;
2537 			struct guc_execlist_context *lrc =
2538 						&desc->lrc[guc_engine_id];
2539 
2540 			seq_printf(m, "\t%s LRC:\n", engine->name);
2541 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2542 				   lrc->context_desc);
2543 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2544 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2545 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2546 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2547 			seq_putc(m, '\n');
2548 		}
2549 	}
2550 
2551 	return 0;
2552 }
2553 
2554 static int i915_guc_log_dump(struct seq_file *m, void *data)
2555 {
2556 	struct drm_info_node *node = m->private;
2557 	struct drm_i915_private *dev_priv = node_to_i915(node);
2558 	bool dump_load_err = !!node->info_ent->data;
2559 	struct drm_i915_gem_object *obj = NULL;
2560 	u32 *log;
2561 	int i = 0;
2562 
2563 	if (dump_load_err)
2564 		obj = dev_priv->guc.load_err_log;
2565 	else if (dev_priv->guc.log.vma)
2566 		obj = dev_priv->guc.log.vma->obj;
2567 
2568 	if (!obj)
2569 		return 0;
2570 
2571 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2572 	if (IS_ERR(log)) {
2573 		DRM_DEBUG("Failed to pin object\n");
2574 		seq_puts(m, "(log data unaccessible)\n");
2575 		return PTR_ERR(log);
2576 	}
2577 
2578 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2579 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2580 			   *(log + i), *(log + i + 1),
2581 			   *(log + i + 2), *(log + i + 3));
2582 
2583 	seq_putc(m, '\n');
2584 
2585 	i915_gem_object_unpin_map(obj);
2586 
2587 	return 0;
2588 }
2589 
2590 static int i915_guc_log_control_get(void *data, u64 *val)
2591 {
2592 	struct drm_i915_private *dev_priv = data;
2593 
2594 	if (!dev_priv->guc.log.vma)
2595 		return -EINVAL;
2596 
2597 	*val = i915.guc_log_level;
2598 
2599 	return 0;
2600 }
2601 
2602 static int i915_guc_log_control_set(void *data, u64 val)
2603 {
2604 	struct drm_i915_private *dev_priv = data;
2605 	int ret;
2606 
2607 	if (!dev_priv->guc.log.vma)
2608 		return -EINVAL;
2609 
2610 	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
2611 	if (ret)
2612 		return ret;
2613 
2614 	intel_runtime_pm_get(dev_priv);
2615 	ret = i915_guc_log_control(dev_priv, val);
2616 	intel_runtime_pm_put(dev_priv);
2617 
2618 	mutex_unlock(&dev_priv->drm.struct_mutex);
2619 	return ret;
2620 }
2621 
2622 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2623 			i915_guc_log_control_get, i915_guc_log_control_set,
2624 			"%lld\n");
2625 
2626 static const char *psr2_live_status(u32 val)
2627 {
2628 	static const char * const live_status[] = {
2629 		"IDLE",
2630 		"CAPTURE",
2631 		"CAPTURE_FS",
2632 		"SLEEP",
2633 		"BUFON_FW",
2634 		"ML_UP",
2635 		"SU_STANDBY",
2636 		"FAST_SLEEP",
2637 		"DEEP_SLEEP",
2638 		"BUF_ON",
2639 		"TG_ON"
2640 	};
2641 
2642 	val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2643 	if (val < ARRAY_SIZE(live_status))
2644 		return live_status[val];
2645 
2646 	return "unknown";
2647 }
2648 
2649 static int i915_edp_psr_status(struct seq_file *m, void *data)
2650 {
2651 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2652 	u32 psrperf = 0;
2653 	u32 stat[3];
2654 	enum pipe pipe;
2655 	bool enabled = false;
2656 
2657 	if (!HAS_PSR(dev_priv)) {
2658 		seq_puts(m, "PSR not supported\n");
2659 		return 0;
2660 	}
2661 
2662 	intel_runtime_pm_get(dev_priv);
2663 
2664 	mutex_lock(&dev_priv->psr.lock);
2665 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2666 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2667 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2668 	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2669 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2670 		   dev_priv->psr.busy_frontbuffer_bits);
2671 	seq_printf(m, "Re-enable work scheduled: %s\n",
2672 		   yesno(work_busy(&dev_priv->psr.work.work)));
2673 
2674 	if (HAS_DDI(dev_priv)) {
2675 		if (dev_priv->psr.psr2_support)
2676 			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2677 		else
2678 			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2679 	} else {
2680 		for_each_pipe(dev_priv, pipe) {
2681 			enum transcoder cpu_transcoder =
2682 				intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2683 			enum intel_display_power_domain power_domain;
2684 
2685 			power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2686 			if (!intel_display_power_get_if_enabled(dev_priv,
2687 								power_domain))
2688 				continue;
2689 
2690 			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2691 				VLV_EDP_PSR_CURR_STATE_MASK;
2692 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2693 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2694 				enabled = true;
2695 
2696 			intel_display_power_put(dev_priv, power_domain);
2697 		}
2698 	}
2699 
2700 	seq_printf(m, "Main link in standby mode: %s\n",
2701 		   yesno(dev_priv->psr.link_standby));
2702 
2703 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2704 
2705 	if (!HAS_DDI(dev_priv))
2706 		for_each_pipe(dev_priv, pipe) {
2707 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2708 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2709 				seq_printf(m, " pipe %c", pipe_name(pipe));
2710 		}
2711 	seq_puts(m, "\n");
2712 
2713 	/*
2714 	 * VLV/CHV PSR has no kind of performance counter
2715 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2716 	 */
2717 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2718 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2719 			EDP_PSR_PERF_CNT_MASK;
2720 
2721 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2722 	}
2723 	if (dev_priv->psr.psr2_support) {
2724 		u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
2725 
2726 		seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
2727 			   psr2, psr2_live_status(psr2));
2728 	}
2729 	mutex_unlock(&dev_priv->psr.lock);
2730 
2731 	intel_runtime_pm_put(dev_priv);
2732 	return 0;
2733 }
2734 
2735 static int i915_sink_crc(struct seq_file *m, void *data)
2736 {
2737 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2738 	struct drm_device *dev = &dev_priv->drm;
2739 	struct intel_connector *connector;
2740 	struct drm_connector_list_iter conn_iter;
2741 	struct intel_dp *intel_dp = NULL;
2742 	int ret;
2743 	u8 crc[6];
2744 
2745 	drm_modeset_lock_all(dev);
2746 	drm_connector_list_iter_begin(dev, &conn_iter);
2747 	for_each_intel_connector_iter(connector, &conn_iter) {
2748 		struct drm_crtc *crtc;
2749 
2750 		if (!connector->base.state->best_encoder)
2751 			continue;
2752 
2753 		crtc = connector->base.state->crtc;
2754 		if (!crtc->state->active)
2755 			continue;
2756 
2757 		if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2758 			continue;
2759 
2760 		intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
2761 
2762 		ret = intel_dp_sink_crc(intel_dp, crc);
2763 		if (ret)
2764 			goto out;
2765 
2766 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2767 			   crc[0], crc[1], crc[2],
2768 			   crc[3], crc[4], crc[5]);
2769 		goto out;
2770 	}
2771 	ret = -ENODEV;
2772 out:
2773 	drm_connector_list_iter_end(&conn_iter);
2774 	drm_modeset_unlock_all(dev);
2775 	return ret;
2776 }
2777 
2778 static int i915_energy_uJ(struct seq_file *m, void *data)
2779 {
2780 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2781 	unsigned long long power;
2782 	u32 units;
2783 
2784 	if (INTEL_GEN(dev_priv) < 6)
2785 		return -ENODEV;
2786 
2787 	intel_runtime_pm_get(dev_priv);
2788 
2789 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2790 		intel_runtime_pm_put(dev_priv);
2791 		return -ENODEV;
2792 	}
2793 
2794 	units = (power & 0x1f00) >> 8;
2795 	power = I915_READ(MCH_SECP_NRG_STTS);
2796 	power = (1000000 * power) >> units; /* convert to uJ */
2797 
2798 	intel_runtime_pm_put(dev_priv);
2799 
2800 	seq_printf(m, "%llu", power);
2801 
2802 	return 0;
2803 }
2804 
2805 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2806 {
2807 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2808 	struct pci_dev *pdev = dev_priv->drm.pdev;
2809 
2810 	if (!HAS_RUNTIME_PM(dev_priv))
2811 		seq_puts(m, "Runtime power management not supported\n");
2812 
2813 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2814 	seq_printf(m, "IRQs disabled: %s\n",
2815 		   yesno(!intel_irqs_enabled(dev_priv)));
2816 #ifdef CONFIG_PM
2817 	seq_printf(m, "Usage count: %d\n",
2818 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2819 #else
2820 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2821 #endif
2822 	seq_printf(m, "PCI device power state: %s [%d]\n",
2823 		   pci_power_name(pdev->current_state),
2824 		   pdev->current_state);
2825 
2826 	return 0;
2827 }
2828 
2829 static int i915_power_domain_info(struct seq_file *m, void *unused)
2830 {
2831 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2832 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2833 	int i;
2834 
2835 	mutex_lock(&power_domains->lock);
2836 
2837 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2838 	for (i = 0; i < power_domains->power_well_count; i++) {
2839 		struct i915_power_well *power_well;
2840 		enum intel_display_power_domain power_domain;
2841 
2842 		power_well = &power_domains->power_wells[i];
2843 		seq_printf(m, "%-25s %d\n", power_well->name,
2844 			   power_well->count);
2845 
2846 		for_each_power_domain(power_domain, power_well->domains)
2847 			seq_printf(m, "  %-23s %d\n",
2848 				 intel_display_power_domain_str(power_domain),
2849 				 power_domains->domain_use_count[power_domain]);
2850 	}
2851 
2852 	mutex_unlock(&power_domains->lock);
2853 
2854 	return 0;
2855 }
2856 
2857 static int i915_dmc_info(struct seq_file *m, void *unused)
2858 {
2859 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2860 	struct intel_csr *csr;
2861 
2862 	if (!HAS_CSR(dev_priv)) {
2863 		seq_puts(m, "not supported\n");
2864 		return 0;
2865 	}
2866 
2867 	csr = &dev_priv->csr;
2868 
2869 	intel_runtime_pm_get(dev_priv);
2870 
2871 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2872 	seq_printf(m, "path: %s\n", csr->fw_path);
2873 
2874 	if (!csr->dmc_payload)
2875 		goto out;
2876 
2877 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2878 		   CSR_VERSION_MINOR(csr->version));
2879 
2880 	if (IS_KABYLAKE(dev_priv) ||
2881 	    (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2882 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2883 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2884 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2885 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2886 	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2887 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2888 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2889 	}
2890 
2891 out:
2892 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2893 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2894 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2895 
2896 	intel_runtime_pm_put(dev_priv);
2897 
2898 	return 0;
2899 }
2900 
2901 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2902 				 struct drm_display_mode *mode)
2903 {
2904 	int i;
2905 
2906 	for (i = 0; i < tabs; i++)
2907 		seq_putc(m, '\t');
2908 
2909 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2910 		   mode->base.id, mode->name,
2911 		   mode->vrefresh, mode->clock,
2912 		   mode->hdisplay, mode->hsync_start,
2913 		   mode->hsync_end, mode->htotal,
2914 		   mode->vdisplay, mode->vsync_start,
2915 		   mode->vsync_end, mode->vtotal,
2916 		   mode->type, mode->flags);
2917 }
2918 
2919 static void intel_encoder_info(struct seq_file *m,
2920 			       struct intel_crtc *intel_crtc,
2921 			       struct intel_encoder *intel_encoder)
2922 {
2923 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2924 	struct drm_device *dev = &dev_priv->drm;
2925 	struct drm_crtc *crtc = &intel_crtc->base;
2926 	struct intel_connector *intel_connector;
2927 	struct drm_encoder *encoder;
2928 
2929 	encoder = &intel_encoder->base;
2930 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2931 		   encoder->base.id, encoder->name);
2932 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2933 		struct drm_connector *connector = &intel_connector->base;
2934 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2935 			   connector->base.id,
2936 			   connector->name,
2937 			   drm_get_connector_status_name(connector->status));
2938 		if (connector->status == connector_status_connected) {
2939 			struct drm_display_mode *mode = &crtc->mode;
2940 			seq_printf(m, ", mode:\n");
2941 			intel_seq_print_mode(m, 2, mode);
2942 		} else {
2943 			seq_putc(m, '\n');
2944 		}
2945 	}
2946 }
2947 
2948 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2949 {
2950 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2951 	struct drm_device *dev = &dev_priv->drm;
2952 	struct drm_crtc *crtc = &intel_crtc->base;
2953 	struct intel_encoder *intel_encoder;
2954 	struct drm_plane_state *plane_state = crtc->primary->state;
2955 	struct drm_framebuffer *fb = plane_state->fb;
2956 
2957 	if (fb)
2958 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2959 			   fb->base.id, plane_state->src_x >> 16,
2960 			   plane_state->src_y >> 16, fb->width, fb->height);
2961 	else
2962 		seq_puts(m, "\tprimary plane disabled\n");
2963 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2964 		intel_encoder_info(m, intel_crtc, intel_encoder);
2965 }
2966 
2967 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2968 {
2969 	struct drm_display_mode *mode = panel->fixed_mode;
2970 
2971 	seq_printf(m, "\tfixed mode:\n");
2972 	intel_seq_print_mode(m, 2, mode);
2973 }
2974 
2975 static void intel_dp_info(struct seq_file *m,
2976 			  struct intel_connector *intel_connector)
2977 {
2978 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2979 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2980 
2981 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2982 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2983 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2984 		intel_panel_info(m, &intel_connector->panel);
2985 
2986 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2987 				&intel_dp->aux);
2988 }
2989 
2990 static void intel_dp_mst_info(struct seq_file *m,
2991 			  struct intel_connector *intel_connector)
2992 {
2993 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2994 	struct intel_dp_mst_encoder *intel_mst =
2995 		enc_to_mst(&intel_encoder->base);
2996 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2997 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2998 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2999 					intel_connector->port);
3000 
3001 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3002 }
3003 
3004 static void intel_hdmi_info(struct seq_file *m,
3005 			    struct intel_connector *intel_connector)
3006 {
3007 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3008 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3009 
3010 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3011 }
3012 
3013 static void intel_lvds_info(struct seq_file *m,
3014 			    struct intel_connector *intel_connector)
3015 {
3016 	intel_panel_info(m, &intel_connector->panel);
3017 }
3018 
3019 static void intel_connector_info(struct seq_file *m,
3020 				 struct drm_connector *connector)
3021 {
3022 	struct intel_connector *intel_connector = to_intel_connector(connector);
3023 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3024 	struct drm_display_mode *mode;
3025 
3026 	seq_printf(m, "connector %d: type %s, status: %s\n",
3027 		   connector->base.id, connector->name,
3028 		   drm_get_connector_status_name(connector->status));
3029 	if (connector->status == connector_status_connected) {
3030 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
3031 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3032 			   connector->display_info.width_mm,
3033 			   connector->display_info.height_mm);
3034 		seq_printf(m, "\tsubpixel order: %s\n",
3035 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3036 		seq_printf(m, "\tCEA rev: %d\n",
3037 			   connector->display_info.cea_rev);
3038 	}
3039 
3040 	if (!intel_encoder)
3041 		return;
3042 
3043 	switch (connector->connector_type) {
3044 	case DRM_MODE_CONNECTOR_DisplayPort:
3045 	case DRM_MODE_CONNECTOR_eDP:
3046 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3047 			intel_dp_mst_info(m, intel_connector);
3048 		else
3049 			intel_dp_info(m, intel_connector);
3050 		break;
3051 	case DRM_MODE_CONNECTOR_LVDS:
3052 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3053 			intel_lvds_info(m, intel_connector);
3054 		break;
3055 	case DRM_MODE_CONNECTOR_HDMIA:
3056 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3057 		    intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
3058 			intel_hdmi_info(m, intel_connector);
3059 		break;
3060 	default:
3061 		break;
3062 	}
3063 
3064 	seq_printf(m, "\tmodes:\n");
3065 	list_for_each_entry(mode, &connector->modes, head)
3066 		intel_seq_print_mode(m, 2, mode);
3067 }
3068 
3069 static const char *plane_type(enum drm_plane_type type)
3070 {
3071 	switch (type) {
3072 	case DRM_PLANE_TYPE_OVERLAY:
3073 		return "OVL";
3074 	case DRM_PLANE_TYPE_PRIMARY:
3075 		return "PRI";
3076 	case DRM_PLANE_TYPE_CURSOR:
3077 		return "CUR";
3078 	/*
3079 	 * Deliberately omitting default: to generate compiler warnings
3080 	 * when a new drm_plane_type gets added.
3081 	 */
3082 	}
3083 
3084 	return "unknown";
3085 }
3086 
3087 static const char *plane_rotation(unsigned int rotation)
3088 {
3089 	static char buf[48];
3090 	/*
3091 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3092 	 * will print them all to visualize if the values are misused
3093 	 */
3094 	snprintf(buf, sizeof(buf),
3095 		 "%s%s%s%s%s%s(0x%08x)",
3096 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3097 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3098 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3099 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3100 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3101 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3102 		 rotation);
3103 
3104 	return buf;
3105 }
3106 
3107 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3108 {
3109 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3110 	struct drm_device *dev = &dev_priv->drm;
3111 	struct intel_plane *intel_plane;
3112 
3113 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3114 		struct drm_plane_state *state;
3115 		struct drm_plane *plane = &intel_plane->base;
3116 		struct drm_format_name_buf format_name;
3117 
3118 		if (!plane->state) {
3119 			seq_puts(m, "plane->state is NULL!\n");
3120 			continue;
3121 		}
3122 
3123 		state = plane->state;
3124 
3125 		if (state->fb) {
3126 			drm_get_format_name(state->fb->format->format,
3127 					    &format_name);
3128 		} else {
3129 			sprintf(format_name.str, "N/A");
3130 		}
3131 
3132 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3133 			   plane->base.id,
3134 			   plane_type(intel_plane->base.type),
3135 			   state->crtc_x, state->crtc_y,
3136 			   state->crtc_w, state->crtc_h,
3137 			   (state->src_x >> 16),
3138 			   ((state->src_x & 0xffff) * 15625) >> 10,
3139 			   (state->src_y >> 16),
3140 			   ((state->src_y & 0xffff) * 15625) >> 10,
3141 			   (state->src_w >> 16),
3142 			   ((state->src_w & 0xffff) * 15625) >> 10,
3143 			   (state->src_h >> 16),
3144 			   ((state->src_h & 0xffff) * 15625) >> 10,
3145 			   format_name.str,
3146 			   plane_rotation(state->rotation));
3147 	}
3148 }
3149 
3150 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3151 {
3152 	struct intel_crtc_state *pipe_config;
3153 	int num_scalers = intel_crtc->num_scalers;
3154 	int i;
3155 
3156 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3157 
3158 	/* Not all platformas have a scaler */
3159 	if (num_scalers) {
3160 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3161 			   num_scalers,
3162 			   pipe_config->scaler_state.scaler_users,
3163 			   pipe_config->scaler_state.scaler_id);
3164 
3165 		for (i = 0; i < num_scalers; i++) {
3166 			struct intel_scaler *sc =
3167 					&pipe_config->scaler_state.scalers[i];
3168 
3169 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3170 				   i, yesno(sc->in_use), sc->mode);
3171 		}
3172 		seq_puts(m, "\n");
3173 	} else {
3174 		seq_puts(m, "\tNo scalers available on this platform\n");
3175 	}
3176 }
3177 
3178 static int i915_display_info(struct seq_file *m, void *unused)
3179 {
3180 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3181 	struct drm_device *dev = &dev_priv->drm;
3182 	struct intel_crtc *crtc;
3183 	struct drm_connector *connector;
3184 	struct drm_connector_list_iter conn_iter;
3185 
3186 	intel_runtime_pm_get(dev_priv);
3187 	seq_printf(m, "CRTC info\n");
3188 	seq_printf(m, "---------\n");
3189 	for_each_intel_crtc(dev, crtc) {
3190 		struct intel_crtc_state *pipe_config;
3191 
3192 		drm_modeset_lock(&crtc->base.mutex, NULL);
3193 		pipe_config = to_intel_crtc_state(crtc->base.state);
3194 
3195 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3196 			   crtc->base.base.id, pipe_name(crtc->pipe),
3197 			   yesno(pipe_config->base.active),
3198 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3199 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3200 
3201 		if (pipe_config->base.active) {
3202 			struct intel_plane *cursor =
3203 				to_intel_plane(crtc->base.cursor);
3204 
3205 			intel_crtc_info(m, crtc);
3206 
3207 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3208 				   yesno(cursor->base.state->visible),
3209 				   cursor->base.state->crtc_x,
3210 				   cursor->base.state->crtc_y,
3211 				   cursor->base.state->crtc_w,
3212 				   cursor->base.state->crtc_h,
3213 				   cursor->cursor.base);
3214 			intel_scaler_info(m, crtc);
3215 			intel_plane_info(m, crtc);
3216 		}
3217 
3218 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3219 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3220 			   yesno(!crtc->pch_fifo_underrun_disabled));
3221 		drm_modeset_unlock(&crtc->base.mutex);
3222 	}
3223 
3224 	seq_printf(m, "\n");
3225 	seq_printf(m, "Connector info\n");
3226 	seq_printf(m, "--------------\n");
3227 	mutex_lock(&dev->mode_config.mutex);
3228 	drm_connector_list_iter_begin(dev, &conn_iter);
3229 	drm_for_each_connector_iter(connector, &conn_iter)
3230 		intel_connector_info(m, connector);
3231 	drm_connector_list_iter_end(&conn_iter);
3232 	mutex_unlock(&dev->mode_config.mutex);
3233 
3234 	intel_runtime_pm_put(dev_priv);
3235 
3236 	return 0;
3237 }
3238 
3239 static int i915_engine_info(struct seq_file *m, void *unused)
3240 {
3241 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3242 	struct i915_gpu_error *error = &dev_priv->gpu_error;
3243 	struct intel_engine_cs *engine;
3244 	enum intel_engine_id id;
3245 
3246 	intel_runtime_pm_get(dev_priv);
3247 
3248 	seq_printf(m, "GT awake? %s\n",
3249 		   yesno(dev_priv->gt.awake));
3250 	seq_printf(m, "Global active requests: %d\n",
3251 		   dev_priv->gt.active_requests);
3252 
3253 	for_each_engine(engine, dev_priv, id) {
3254 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
3255 		struct drm_i915_gem_request *rq;
3256 		struct rb_node *rb;
3257 		u64 addr;
3258 
3259 		seq_printf(m, "%s\n", engine->name);
3260 		seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
3261 			   intel_engine_get_seqno(engine),
3262 			   intel_engine_last_submit(engine),
3263 			   engine->hangcheck.seqno,
3264 			   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
3265 			   engine->timeline->inflight_seqnos);
3266 		seq_printf(m, "\tReset count: %d\n",
3267 			   i915_reset_engine_count(error, engine));
3268 
3269 		rcu_read_lock();
3270 
3271 		seq_printf(m, "\tRequests:\n");
3272 
3273 		rq = list_first_entry(&engine->timeline->requests,
3274 				      struct drm_i915_gem_request, link);
3275 		if (&rq->link != &engine->timeline->requests)
3276 			print_request(m, rq, "\t\tfirst  ");
3277 
3278 		rq = list_last_entry(&engine->timeline->requests,
3279 				     struct drm_i915_gem_request, link);
3280 		if (&rq->link != &engine->timeline->requests)
3281 			print_request(m, rq, "\t\tlast   ");
3282 
3283 		rq = i915_gem_find_active_request(engine);
3284 		if (rq) {
3285 			print_request(m, rq, "\t\tactive ");
3286 			seq_printf(m,
3287 				   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
3288 				   rq->head, rq->postfix, rq->tail,
3289 				   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
3290 				   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
3291 		}
3292 
3293 		seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
3294 			   I915_READ(RING_START(engine->mmio_base)),
3295 			   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
3296 		seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
3297 			   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
3298 			   rq ? rq->ring->head : 0);
3299 		seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
3300 			   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
3301 			   rq ? rq->ring->tail : 0);
3302 		seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
3303 			   I915_READ(RING_CTL(engine->mmio_base)),
3304 			   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
3305 
3306 		rcu_read_unlock();
3307 
3308 		addr = intel_engine_get_active_head(engine);
3309 		seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
3310 			   upper_32_bits(addr), lower_32_bits(addr));
3311 		addr = intel_engine_get_last_batch_head(engine);
3312 		seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
3313 			   upper_32_bits(addr), lower_32_bits(addr));
3314 
3315 		if (i915.enable_execlists) {
3316 			u32 ptr, read, write;
3317 			unsigned int idx;
3318 
3319 			seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
3320 				   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
3321 				   I915_READ(RING_EXECLIST_STATUS_HI(engine)));
3322 
3323 			ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
3324 			read = GEN8_CSB_READ_PTR(ptr);
3325 			write = GEN8_CSB_WRITE_PTR(ptr);
3326 			seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
3327 				   read, write,
3328 				   yesno(test_bit(ENGINE_IRQ_EXECLIST,
3329 						  &engine->irq_posted)));
3330 			if (read >= GEN8_CSB_ENTRIES)
3331 				read = 0;
3332 			if (write >= GEN8_CSB_ENTRIES)
3333 				write = 0;
3334 			if (read > write)
3335 				write += GEN8_CSB_ENTRIES;
3336 			while (read < write) {
3337 				idx = ++read % GEN8_CSB_ENTRIES;
3338 				seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
3339 					   idx,
3340 					   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
3341 					   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
3342 			}
3343 
3344 			rcu_read_lock();
3345 			for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
3346 				unsigned int count;
3347 
3348 				rq = port_unpack(&engine->execlist_port[idx],
3349 						 &count);
3350 				if (rq) {
3351 					seq_printf(m, "\t\tELSP[%d] count=%d, ",
3352 						   idx, count);
3353 					print_request(m, rq, "rq: ");
3354 				} else {
3355 					seq_printf(m, "\t\tELSP[%d] idle\n",
3356 						   idx);
3357 				}
3358 			}
3359 			rcu_read_unlock();
3360 
3361 			spin_lock_irq(&engine->timeline->lock);
3362 			for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
3363 				struct i915_priolist *p =
3364 					rb_entry(rb, typeof(*p), node);
3365 
3366 				list_for_each_entry(rq, &p->requests,
3367 						    priotree.link)
3368 					print_request(m, rq, "\t\tQ ");
3369 			}
3370 			spin_unlock_irq(&engine->timeline->lock);
3371 		} else if (INTEL_GEN(dev_priv) > 6) {
3372 			seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
3373 				   I915_READ(RING_PP_DIR_BASE(engine)));
3374 			seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
3375 				   I915_READ(RING_PP_DIR_BASE_READ(engine)));
3376 			seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
3377 				   I915_READ(RING_PP_DIR_DCLV(engine)));
3378 		}
3379 
3380 		spin_lock_irq(&b->rb_lock);
3381 		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
3382 			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
3383 
3384 			seq_printf(m, "\t%s [%d] waiting for %x\n",
3385 				   w->tsk->comm, w->tsk->pid, w->seqno);
3386 		}
3387 		spin_unlock_irq(&b->rb_lock);
3388 
3389 		seq_puts(m, "\n");
3390 	}
3391 
3392 	intel_runtime_pm_put(dev_priv);
3393 
3394 	return 0;
3395 }
3396 
3397 static int i915_semaphore_status(struct seq_file *m, void *unused)
3398 {
3399 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3400 	struct drm_device *dev = &dev_priv->drm;
3401 	struct intel_engine_cs *engine;
3402 	int num_rings = INTEL_INFO(dev_priv)->num_rings;
3403 	enum intel_engine_id id;
3404 	int j, ret;
3405 
3406 	if (!i915.semaphores) {
3407 		seq_puts(m, "Semaphores are disabled\n");
3408 		return 0;
3409 	}
3410 
3411 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3412 	if (ret)
3413 		return ret;
3414 	intel_runtime_pm_get(dev_priv);
3415 
3416 	if (IS_BROADWELL(dev_priv)) {
3417 		struct page *page;
3418 		uint64_t *seqno;
3419 
3420 		page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
3421 
3422 		seqno = (uint64_t *)kmap_atomic(page);
3423 		for_each_engine(engine, dev_priv, id) {
3424 			uint64_t offset;
3425 
3426 			seq_printf(m, "%s\n", engine->name);
3427 
3428 			seq_puts(m, "  Last signal:");
3429 			for (j = 0; j < num_rings; j++) {
3430 				offset = id * I915_NUM_ENGINES + j;
3431 				seq_printf(m, "0x%08llx (0x%02llx) ",
3432 					   seqno[offset], offset * 8);
3433 			}
3434 			seq_putc(m, '\n');
3435 
3436 			seq_puts(m, "  Last wait:  ");
3437 			for (j = 0; j < num_rings; j++) {
3438 				offset = id + (j * I915_NUM_ENGINES);
3439 				seq_printf(m, "0x%08llx (0x%02llx) ",
3440 					   seqno[offset], offset * 8);
3441 			}
3442 			seq_putc(m, '\n');
3443 
3444 		}
3445 		kunmap_atomic(seqno);
3446 	} else {
3447 		seq_puts(m, "  Last signal:");
3448 		for_each_engine(engine, dev_priv, id)
3449 			for (j = 0; j < num_rings; j++)
3450 				seq_printf(m, "0x%08x\n",
3451 					   I915_READ(engine->semaphore.mbox.signal[j]));
3452 		seq_putc(m, '\n');
3453 	}
3454 
3455 	intel_runtime_pm_put(dev_priv);
3456 	mutex_unlock(&dev->struct_mutex);
3457 	return 0;
3458 }
3459 
3460 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3461 {
3462 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3463 	struct drm_device *dev = &dev_priv->drm;
3464 	int i;
3465 
3466 	drm_modeset_lock_all(dev);
3467 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3468 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3469 
3470 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3471 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3472 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3473 		seq_printf(m, " tracked hardware state:\n");
3474 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3475 		seq_printf(m, " dpll_md: 0x%08x\n",
3476 			   pll->state.hw_state.dpll_md);
3477 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3478 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3479 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3480 	}
3481 	drm_modeset_unlock_all(dev);
3482 
3483 	return 0;
3484 }
3485 
3486 static int i915_wa_registers(struct seq_file *m, void *unused)
3487 {
3488 	int i;
3489 	int ret;
3490 	struct intel_engine_cs *engine;
3491 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3492 	struct drm_device *dev = &dev_priv->drm;
3493 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3494 	enum intel_engine_id id;
3495 
3496 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3497 	if (ret)
3498 		return ret;
3499 
3500 	intel_runtime_pm_get(dev_priv);
3501 
3502 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3503 	for_each_engine(engine, dev_priv, id)
3504 		seq_printf(m, "HW whitelist count for %s: %d\n",
3505 			   engine->name, workarounds->hw_whitelist_count[id]);
3506 	for (i = 0; i < workarounds->count; ++i) {
3507 		i915_reg_t addr;
3508 		u32 mask, value, read;
3509 		bool ok;
3510 
3511 		addr = workarounds->reg[i].addr;
3512 		mask = workarounds->reg[i].mask;
3513 		value = workarounds->reg[i].value;
3514 		read = I915_READ(addr);
3515 		ok = (value & mask) == (read & mask);
3516 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3517 			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3518 	}
3519 
3520 	intel_runtime_pm_put(dev_priv);
3521 	mutex_unlock(&dev->struct_mutex);
3522 
3523 	return 0;
3524 }
3525 
3526 static int i915_ddb_info(struct seq_file *m, void *unused)
3527 {
3528 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3529 	struct drm_device *dev = &dev_priv->drm;
3530 	struct skl_ddb_allocation *ddb;
3531 	struct skl_ddb_entry *entry;
3532 	enum pipe pipe;
3533 	int plane;
3534 
3535 	if (INTEL_GEN(dev_priv) < 9)
3536 		return 0;
3537 
3538 	drm_modeset_lock_all(dev);
3539 
3540 	ddb = &dev_priv->wm.skl_hw.ddb;
3541 
3542 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3543 
3544 	for_each_pipe(dev_priv, pipe) {
3545 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3546 
3547 		for_each_universal_plane(dev_priv, pipe, plane) {
3548 			entry = &ddb->plane[pipe][plane];
3549 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3550 				   entry->start, entry->end,
3551 				   skl_ddb_entry_size(entry));
3552 		}
3553 
3554 		entry = &ddb->plane[pipe][PLANE_CURSOR];
3555 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3556 			   entry->end, skl_ddb_entry_size(entry));
3557 	}
3558 
3559 	drm_modeset_unlock_all(dev);
3560 
3561 	return 0;
3562 }
3563 
3564 static void drrs_status_per_crtc(struct seq_file *m,
3565 				 struct drm_device *dev,
3566 				 struct intel_crtc *intel_crtc)
3567 {
3568 	struct drm_i915_private *dev_priv = to_i915(dev);
3569 	struct i915_drrs *drrs = &dev_priv->drrs;
3570 	int vrefresh = 0;
3571 	struct drm_connector *connector;
3572 	struct drm_connector_list_iter conn_iter;
3573 
3574 	drm_connector_list_iter_begin(dev, &conn_iter);
3575 	drm_for_each_connector_iter(connector, &conn_iter) {
3576 		if (connector->state->crtc != &intel_crtc->base)
3577 			continue;
3578 
3579 		seq_printf(m, "%s:\n", connector->name);
3580 	}
3581 	drm_connector_list_iter_end(&conn_iter);
3582 
3583 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3584 		seq_puts(m, "\tVBT: DRRS_type: Static");
3585 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3586 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3587 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3588 		seq_puts(m, "\tVBT: DRRS_type: None");
3589 	else
3590 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3591 
3592 	seq_puts(m, "\n\n");
3593 
3594 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3595 		struct intel_panel *panel;
3596 
3597 		mutex_lock(&drrs->mutex);
3598 		/* DRRS Supported */
3599 		seq_puts(m, "\tDRRS Supported: Yes\n");
3600 
3601 		/* disable_drrs() will make drrs->dp NULL */
3602 		if (!drrs->dp) {
3603 			seq_puts(m, "Idleness DRRS: Disabled");
3604 			mutex_unlock(&drrs->mutex);
3605 			return;
3606 		}
3607 
3608 		panel = &drrs->dp->attached_connector->panel;
3609 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3610 					drrs->busy_frontbuffer_bits);
3611 
3612 		seq_puts(m, "\n\t\t");
3613 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3614 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3615 			vrefresh = panel->fixed_mode->vrefresh;
3616 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3617 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3618 			vrefresh = panel->downclock_mode->vrefresh;
3619 		} else {
3620 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3621 						drrs->refresh_rate_type);
3622 			mutex_unlock(&drrs->mutex);
3623 			return;
3624 		}
3625 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3626 
3627 		seq_puts(m, "\n\t\t");
3628 		mutex_unlock(&drrs->mutex);
3629 	} else {
3630 		/* DRRS not supported. Print the VBT parameter*/
3631 		seq_puts(m, "\tDRRS Supported : No");
3632 	}
3633 	seq_puts(m, "\n");
3634 }
3635 
3636 static int i915_drrs_status(struct seq_file *m, void *unused)
3637 {
3638 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3639 	struct drm_device *dev = &dev_priv->drm;
3640 	struct intel_crtc *intel_crtc;
3641 	int active_crtc_cnt = 0;
3642 
3643 	drm_modeset_lock_all(dev);
3644 	for_each_intel_crtc(dev, intel_crtc) {
3645 		if (intel_crtc->base.state->active) {
3646 			active_crtc_cnt++;
3647 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3648 
3649 			drrs_status_per_crtc(m, dev, intel_crtc);
3650 		}
3651 	}
3652 	drm_modeset_unlock_all(dev);
3653 
3654 	if (!active_crtc_cnt)
3655 		seq_puts(m, "No active crtc found\n");
3656 
3657 	return 0;
3658 }
3659 
3660 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3661 {
3662 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3663 	struct drm_device *dev = &dev_priv->drm;
3664 	struct intel_encoder *intel_encoder;
3665 	struct intel_digital_port *intel_dig_port;
3666 	struct drm_connector *connector;
3667 	struct drm_connector_list_iter conn_iter;
3668 
3669 	drm_connector_list_iter_begin(dev, &conn_iter);
3670 	drm_for_each_connector_iter(connector, &conn_iter) {
3671 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3672 			continue;
3673 
3674 		intel_encoder = intel_attached_encoder(connector);
3675 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3676 			continue;
3677 
3678 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3679 		if (!intel_dig_port->dp.can_mst)
3680 			continue;
3681 
3682 		seq_printf(m, "MST Source Port %c\n",
3683 			   port_name(intel_dig_port->port));
3684 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3685 	}
3686 	drm_connector_list_iter_end(&conn_iter);
3687 
3688 	return 0;
3689 }
3690 
3691 static ssize_t i915_displayport_test_active_write(struct file *file,
3692 						  const char __user *ubuf,
3693 						  size_t len, loff_t *offp)
3694 {
3695 	char *input_buffer;
3696 	int status = 0;
3697 	struct drm_device *dev;
3698 	struct drm_connector *connector;
3699 	struct drm_connector_list_iter conn_iter;
3700 	struct intel_dp *intel_dp;
3701 	int val = 0;
3702 
3703 	dev = ((struct seq_file *)file->private_data)->private;
3704 
3705 	if (len == 0)
3706 		return 0;
3707 
3708 	input_buffer = memdup_user_nul(ubuf, len);
3709 	if (IS_ERR(input_buffer))
3710 		return PTR_ERR(input_buffer);
3711 
3712 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3713 
3714 	drm_connector_list_iter_begin(dev, &conn_iter);
3715 	drm_for_each_connector_iter(connector, &conn_iter) {
3716 		struct intel_encoder *encoder;
3717 
3718 		if (connector->connector_type !=
3719 		    DRM_MODE_CONNECTOR_DisplayPort)
3720 			continue;
3721 
3722 		encoder = to_intel_encoder(connector->encoder);
3723 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3724 			continue;
3725 
3726 		if (encoder && connector->status == connector_status_connected) {
3727 			intel_dp = enc_to_intel_dp(&encoder->base);
3728 			status = kstrtoint(input_buffer, 10, &val);
3729 			if (status < 0)
3730 				break;
3731 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3732 			/* To prevent erroneous activation of the compliance
3733 			 * testing code, only accept an actual value of 1 here
3734 			 */
3735 			if (val == 1)
3736 				intel_dp->compliance.test_active = 1;
3737 			else
3738 				intel_dp->compliance.test_active = 0;
3739 		}
3740 	}
3741 	drm_connector_list_iter_end(&conn_iter);
3742 	kfree(input_buffer);
3743 	if (status < 0)
3744 		return status;
3745 
3746 	*offp += len;
3747 	return len;
3748 }
3749 
3750 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3751 {
3752 	struct drm_device *dev = m->private;
3753 	struct drm_connector *connector;
3754 	struct drm_connector_list_iter conn_iter;
3755 	struct intel_dp *intel_dp;
3756 
3757 	drm_connector_list_iter_begin(dev, &conn_iter);
3758 	drm_for_each_connector_iter(connector, &conn_iter) {
3759 		struct intel_encoder *encoder;
3760 
3761 		if (connector->connector_type !=
3762 		    DRM_MODE_CONNECTOR_DisplayPort)
3763 			continue;
3764 
3765 		encoder = to_intel_encoder(connector->encoder);
3766 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3767 			continue;
3768 
3769 		if (encoder && connector->status == connector_status_connected) {
3770 			intel_dp = enc_to_intel_dp(&encoder->base);
3771 			if (intel_dp->compliance.test_active)
3772 				seq_puts(m, "1");
3773 			else
3774 				seq_puts(m, "0");
3775 		} else
3776 			seq_puts(m, "0");
3777 	}
3778 	drm_connector_list_iter_end(&conn_iter);
3779 
3780 	return 0;
3781 }
3782 
3783 static int i915_displayport_test_active_open(struct inode *inode,
3784 					     struct file *file)
3785 {
3786 	struct drm_i915_private *dev_priv = inode->i_private;
3787 
3788 	return single_open(file, i915_displayport_test_active_show,
3789 			   &dev_priv->drm);
3790 }
3791 
3792 static const struct file_operations i915_displayport_test_active_fops = {
3793 	.owner = THIS_MODULE,
3794 	.open = i915_displayport_test_active_open,
3795 	.read = seq_read,
3796 	.llseek = seq_lseek,
3797 	.release = single_release,
3798 	.write = i915_displayport_test_active_write
3799 };
3800 
3801 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3802 {
3803 	struct drm_device *dev = m->private;
3804 	struct drm_connector *connector;
3805 	struct drm_connector_list_iter conn_iter;
3806 	struct intel_dp *intel_dp;
3807 
3808 	drm_connector_list_iter_begin(dev, &conn_iter);
3809 	drm_for_each_connector_iter(connector, &conn_iter) {
3810 		struct intel_encoder *encoder;
3811 
3812 		if (connector->connector_type !=
3813 		    DRM_MODE_CONNECTOR_DisplayPort)
3814 			continue;
3815 
3816 		encoder = to_intel_encoder(connector->encoder);
3817 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3818 			continue;
3819 
3820 		if (encoder && connector->status == connector_status_connected) {
3821 			intel_dp = enc_to_intel_dp(&encoder->base);
3822 			if (intel_dp->compliance.test_type ==
3823 			    DP_TEST_LINK_EDID_READ)
3824 				seq_printf(m, "%lx",
3825 					   intel_dp->compliance.test_data.edid);
3826 			else if (intel_dp->compliance.test_type ==
3827 				 DP_TEST_LINK_VIDEO_PATTERN) {
3828 				seq_printf(m, "hdisplay: %d\n",
3829 					   intel_dp->compliance.test_data.hdisplay);
3830 				seq_printf(m, "vdisplay: %d\n",
3831 					   intel_dp->compliance.test_data.vdisplay);
3832 				seq_printf(m, "bpc: %u\n",
3833 					   intel_dp->compliance.test_data.bpc);
3834 			}
3835 		} else
3836 			seq_puts(m, "0");
3837 	}
3838 	drm_connector_list_iter_end(&conn_iter);
3839 
3840 	return 0;
3841 }
3842 static int i915_displayport_test_data_open(struct inode *inode,
3843 					   struct file *file)
3844 {
3845 	struct drm_i915_private *dev_priv = inode->i_private;
3846 
3847 	return single_open(file, i915_displayport_test_data_show,
3848 			   &dev_priv->drm);
3849 }
3850 
3851 static const struct file_operations i915_displayport_test_data_fops = {
3852 	.owner = THIS_MODULE,
3853 	.open = i915_displayport_test_data_open,
3854 	.read = seq_read,
3855 	.llseek = seq_lseek,
3856 	.release = single_release
3857 };
3858 
3859 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3860 {
3861 	struct drm_device *dev = m->private;
3862 	struct drm_connector *connector;
3863 	struct drm_connector_list_iter conn_iter;
3864 	struct intel_dp *intel_dp;
3865 
3866 	drm_connector_list_iter_begin(dev, &conn_iter);
3867 	drm_for_each_connector_iter(connector, &conn_iter) {
3868 		struct intel_encoder *encoder;
3869 
3870 		if (connector->connector_type !=
3871 		    DRM_MODE_CONNECTOR_DisplayPort)
3872 			continue;
3873 
3874 		encoder = to_intel_encoder(connector->encoder);
3875 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3876 			continue;
3877 
3878 		if (encoder && connector->status == connector_status_connected) {
3879 			intel_dp = enc_to_intel_dp(&encoder->base);
3880 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3881 		} else
3882 			seq_puts(m, "0");
3883 	}
3884 	drm_connector_list_iter_end(&conn_iter);
3885 
3886 	return 0;
3887 }
3888 
3889 static int i915_displayport_test_type_open(struct inode *inode,
3890 				       struct file *file)
3891 {
3892 	struct drm_i915_private *dev_priv = inode->i_private;
3893 
3894 	return single_open(file, i915_displayport_test_type_show,
3895 			   &dev_priv->drm);
3896 }
3897 
3898 static const struct file_operations i915_displayport_test_type_fops = {
3899 	.owner = THIS_MODULE,
3900 	.open = i915_displayport_test_type_open,
3901 	.read = seq_read,
3902 	.llseek = seq_lseek,
3903 	.release = single_release
3904 };
3905 
3906 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3907 {
3908 	struct drm_i915_private *dev_priv = m->private;
3909 	struct drm_device *dev = &dev_priv->drm;
3910 	int level;
3911 	int num_levels;
3912 
3913 	if (IS_CHERRYVIEW(dev_priv))
3914 		num_levels = 3;
3915 	else if (IS_VALLEYVIEW(dev_priv))
3916 		num_levels = 1;
3917 	else if (IS_G4X(dev_priv))
3918 		num_levels = 3;
3919 	else
3920 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3921 
3922 	drm_modeset_lock_all(dev);
3923 
3924 	for (level = 0; level < num_levels; level++) {
3925 		unsigned int latency = wm[level];
3926 
3927 		/*
3928 		 * - WM1+ latency values in 0.5us units
3929 		 * - latencies are in us on gen9/vlv/chv
3930 		 */
3931 		if (INTEL_GEN(dev_priv) >= 9 ||
3932 		    IS_VALLEYVIEW(dev_priv) ||
3933 		    IS_CHERRYVIEW(dev_priv) ||
3934 		    IS_G4X(dev_priv))
3935 			latency *= 10;
3936 		else if (level > 0)
3937 			latency *= 5;
3938 
3939 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3940 			   level, wm[level], latency / 10, latency % 10);
3941 	}
3942 
3943 	drm_modeset_unlock_all(dev);
3944 }
3945 
3946 static int pri_wm_latency_show(struct seq_file *m, void *data)
3947 {
3948 	struct drm_i915_private *dev_priv = m->private;
3949 	const uint16_t *latencies;
3950 
3951 	if (INTEL_GEN(dev_priv) >= 9)
3952 		latencies = dev_priv->wm.skl_latency;
3953 	else
3954 		latencies = dev_priv->wm.pri_latency;
3955 
3956 	wm_latency_show(m, latencies);
3957 
3958 	return 0;
3959 }
3960 
3961 static int spr_wm_latency_show(struct seq_file *m, void *data)
3962 {
3963 	struct drm_i915_private *dev_priv = m->private;
3964 	const uint16_t *latencies;
3965 
3966 	if (INTEL_GEN(dev_priv) >= 9)
3967 		latencies = dev_priv->wm.skl_latency;
3968 	else
3969 		latencies = dev_priv->wm.spr_latency;
3970 
3971 	wm_latency_show(m, latencies);
3972 
3973 	return 0;
3974 }
3975 
3976 static int cur_wm_latency_show(struct seq_file *m, void *data)
3977 {
3978 	struct drm_i915_private *dev_priv = m->private;
3979 	const uint16_t *latencies;
3980 
3981 	if (INTEL_GEN(dev_priv) >= 9)
3982 		latencies = dev_priv->wm.skl_latency;
3983 	else
3984 		latencies = dev_priv->wm.cur_latency;
3985 
3986 	wm_latency_show(m, latencies);
3987 
3988 	return 0;
3989 }
3990 
3991 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3992 {
3993 	struct drm_i915_private *dev_priv = inode->i_private;
3994 
3995 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3996 		return -ENODEV;
3997 
3998 	return single_open(file, pri_wm_latency_show, dev_priv);
3999 }
4000 
4001 static int spr_wm_latency_open(struct inode *inode, struct file *file)
4002 {
4003 	struct drm_i915_private *dev_priv = inode->i_private;
4004 
4005 	if (HAS_GMCH_DISPLAY(dev_priv))
4006 		return -ENODEV;
4007 
4008 	return single_open(file, spr_wm_latency_show, dev_priv);
4009 }
4010 
4011 static int cur_wm_latency_open(struct inode *inode, struct file *file)
4012 {
4013 	struct drm_i915_private *dev_priv = inode->i_private;
4014 
4015 	if (HAS_GMCH_DISPLAY(dev_priv))
4016 		return -ENODEV;
4017 
4018 	return single_open(file, cur_wm_latency_show, dev_priv);
4019 }
4020 
4021 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4022 				size_t len, loff_t *offp, uint16_t wm[8])
4023 {
4024 	struct seq_file *m = file->private_data;
4025 	struct drm_i915_private *dev_priv = m->private;
4026 	struct drm_device *dev = &dev_priv->drm;
4027 	uint16_t new[8] = { 0 };
4028 	int num_levels;
4029 	int level;
4030 	int ret;
4031 	char tmp[32];
4032 
4033 	if (IS_CHERRYVIEW(dev_priv))
4034 		num_levels = 3;
4035 	else if (IS_VALLEYVIEW(dev_priv))
4036 		num_levels = 1;
4037 	else if (IS_G4X(dev_priv))
4038 		num_levels = 3;
4039 	else
4040 		num_levels = ilk_wm_max_level(dev_priv) + 1;
4041 
4042 	if (len >= sizeof(tmp))
4043 		return -EINVAL;
4044 
4045 	if (copy_from_user(tmp, ubuf, len))
4046 		return -EFAULT;
4047 
4048 	tmp[len] = '\0';
4049 
4050 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4051 		     &new[0], &new[1], &new[2], &new[3],
4052 		     &new[4], &new[5], &new[6], &new[7]);
4053 	if (ret != num_levels)
4054 		return -EINVAL;
4055 
4056 	drm_modeset_lock_all(dev);
4057 
4058 	for (level = 0; level < num_levels; level++)
4059 		wm[level] = new[level];
4060 
4061 	drm_modeset_unlock_all(dev);
4062 
4063 	return len;
4064 }
4065 
4066 
4067 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4068 				    size_t len, loff_t *offp)
4069 {
4070 	struct seq_file *m = file->private_data;
4071 	struct drm_i915_private *dev_priv = m->private;
4072 	uint16_t *latencies;
4073 
4074 	if (INTEL_GEN(dev_priv) >= 9)
4075 		latencies = dev_priv->wm.skl_latency;
4076 	else
4077 		latencies = dev_priv->wm.pri_latency;
4078 
4079 	return wm_latency_write(file, ubuf, len, offp, latencies);
4080 }
4081 
4082 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4083 				    size_t len, loff_t *offp)
4084 {
4085 	struct seq_file *m = file->private_data;
4086 	struct drm_i915_private *dev_priv = m->private;
4087 	uint16_t *latencies;
4088 
4089 	if (INTEL_GEN(dev_priv) >= 9)
4090 		latencies = dev_priv->wm.skl_latency;
4091 	else
4092 		latencies = dev_priv->wm.spr_latency;
4093 
4094 	return wm_latency_write(file, ubuf, len, offp, latencies);
4095 }
4096 
4097 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4098 				    size_t len, loff_t *offp)
4099 {
4100 	struct seq_file *m = file->private_data;
4101 	struct drm_i915_private *dev_priv = m->private;
4102 	uint16_t *latencies;
4103 
4104 	if (INTEL_GEN(dev_priv) >= 9)
4105 		latencies = dev_priv->wm.skl_latency;
4106 	else
4107 		latencies = dev_priv->wm.cur_latency;
4108 
4109 	return wm_latency_write(file, ubuf, len, offp, latencies);
4110 }
4111 
4112 static const struct file_operations i915_pri_wm_latency_fops = {
4113 	.owner = THIS_MODULE,
4114 	.open = pri_wm_latency_open,
4115 	.read = seq_read,
4116 	.llseek = seq_lseek,
4117 	.release = single_release,
4118 	.write = pri_wm_latency_write
4119 };
4120 
4121 static const struct file_operations i915_spr_wm_latency_fops = {
4122 	.owner = THIS_MODULE,
4123 	.open = spr_wm_latency_open,
4124 	.read = seq_read,
4125 	.llseek = seq_lseek,
4126 	.release = single_release,
4127 	.write = spr_wm_latency_write
4128 };
4129 
4130 static const struct file_operations i915_cur_wm_latency_fops = {
4131 	.owner = THIS_MODULE,
4132 	.open = cur_wm_latency_open,
4133 	.read = seq_read,
4134 	.llseek = seq_lseek,
4135 	.release = single_release,
4136 	.write = cur_wm_latency_write
4137 };
4138 
4139 static int
4140 i915_wedged_get(void *data, u64 *val)
4141 {
4142 	struct drm_i915_private *dev_priv = data;
4143 
4144 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4145 
4146 	return 0;
4147 }
4148 
4149 static int
4150 i915_wedged_set(void *data, u64 val)
4151 {
4152 	struct drm_i915_private *i915 = data;
4153 	struct intel_engine_cs *engine;
4154 	unsigned int tmp;
4155 
4156 	/*
4157 	 * There is no safeguard against this debugfs entry colliding
4158 	 * with the hangcheck calling same i915_handle_error() in
4159 	 * parallel, causing an explosion. For now we assume that the
4160 	 * test harness is responsible enough not to inject gpu hangs
4161 	 * while it is writing to 'i915_wedged'
4162 	 */
4163 
4164 	if (i915_reset_backoff(&i915->gpu_error))
4165 		return -EAGAIN;
4166 
4167 	for_each_engine_masked(engine, i915, val, tmp) {
4168 		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4169 		engine->hangcheck.stalled = true;
4170 	}
4171 
4172 	i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
4173 
4174 	wait_on_bit(&i915->gpu_error.flags,
4175 		    I915_RESET_HANDOFF,
4176 		    TASK_UNINTERRUPTIBLE);
4177 
4178 	return 0;
4179 }
4180 
4181 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4182 			i915_wedged_get, i915_wedged_set,
4183 			"%llu\n");
4184 
4185 static int
4186 fault_irq_set(struct drm_i915_private *i915,
4187 	      unsigned long *irq,
4188 	      unsigned long val)
4189 {
4190 	int err;
4191 
4192 	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4193 	if (err)
4194 		return err;
4195 
4196 	err = i915_gem_wait_for_idle(i915,
4197 				     I915_WAIT_LOCKED |
4198 				     I915_WAIT_INTERRUPTIBLE);
4199 	if (err)
4200 		goto err_unlock;
4201 
4202 	*irq = val;
4203 	mutex_unlock(&i915->drm.struct_mutex);
4204 
4205 	/* Flush idle worker to disarm irq */
4206 	while (flush_delayed_work(&i915->gt.idle_work))
4207 		;
4208 
4209 	return 0;
4210 
4211 err_unlock:
4212 	mutex_unlock(&i915->drm.struct_mutex);
4213 	return err;
4214 }
4215 
4216 static int
4217 i915_ring_missed_irq_get(void *data, u64 *val)
4218 {
4219 	struct drm_i915_private *dev_priv = data;
4220 
4221 	*val = dev_priv->gpu_error.missed_irq_rings;
4222 	return 0;
4223 }
4224 
4225 static int
4226 i915_ring_missed_irq_set(void *data, u64 val)
4227 {
4228 	struct drm_i915_private *i915 = data;
4229 
4230 	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4231 }
4232 
4233 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4234 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4235 			"0x%08llx\n");
4236 
4237 static int
4238 i915_ring_test_irq_get(void *data, u64 *val)
4239 {
4240 	struct drm_i915_private *dev_priv = data;
4241 
4242 	*val = dev_priv->gpu_error.test_irq_rings;
4243 
4244 	return 0;
4245 }
4246 
4247 static int
4248 i915_ring_test_irq_set(void *data, u64 val)
4249 {
4250 	struct drm_i915_private *i915 = data;
4251 
4252 	val &= INTEL_INFO(i915)->ring_mask;
4253 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4254 
4255 	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4256 }
4257 
4258 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4259 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4260 			"0x%08llx\n");
4261 
4262 #define DROP_UNBOUND 0x1
4263 #define DROP_BOUND 0x2
4264 #define DROP_RETIRE 0x4
4265 #define DROP_ACTIVE 0x8
4266 #define DROP_FREED 0x10
4267 #define DROP_SHRINK_ALL 0x20
4268 #define DROP_ALL (DROP_UNBOUND	| \
4269 		  DROP_BOUND	| \
4270 		  DROP_RETIRE	| \
4271 		  DROP_ACTIVE	| \
4272 		  DROP_FREED	| \
4273 		  DROP_SHRINK_ALL)
4274 static int
4275 i915_drop_caches_get(void *data, u64 *val)
4276 {
4277 	*val = DROP_ALL;
4278 
4279 	return 0;
4280 }
4281 
4282 static int
4283 i915_drop_caches_set(void *data, u64 val)
4284 {
4285 	struct drm_i915_private *dev_priv = data;
4286 	struct drm_device *dev = &dev_priv->drm;
4287 	int ret = 0;
4288 
4289 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4290 
4291 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4292 	 * on ioctls on -EAGAIN. */
4293 	if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4294 		ret = mutex_lock_interruptible(&dev->struct_mutex);
4295 		if (ret)
4296 			return ret;
4297 
4298 		if (val & DROP_ACTIVE)
4299 			ret = i915_gem_wait_for_idle(dev_priv,
4300 						     I915_WAIT_INTERRUPTIBLE |
4301 						     I915_WAIT_LOCKED);
4302 
4303 		if (val & DROP_RETIRE)
4304 			i915_gem_retire_requests(dev_priv);
4305 
4306 		mutex_unlock(&dev->struct_mutex);
4307 	}
4308 
4309 	fs_reclaim_acquire(GFP_KERNEL);
4310 	if (val & DROP_BOUND)
4311 		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4312 
4313 	if (val & DROP_UNBOUND)
4314 		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4315 
4316 	if (val & DROP_SHRINK_ALL)
4317 		i915_gem_shrink_all(dev_priv);
4318 	fs_reclaim_release(GFP_KERNEL);
4319 
4320 	if (val & DROP_FREED) {
4321 		synchronize_rcu();
4322 		i915_gem_drain_freed_objects(dev_priv);
4323 	}
4324 
4325 	return ret;
4326 }
4327 
4328 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4329 			i915_drop_caches_get, i915_drop_caches_set,
4330 			"0x%08llx\n");
4331 
4332 static int
4333 i915_max_freq_get(void *data, u64 *val)
4334 {
4335 	struct drm_i915_private *dev_priv = data;
4336 
4337 	if (INTEL_GEN(dev_priv) < 6)
4338 		return -ENODEV;
4339 
4340 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4341 	return 0;
4342 }
4343 
4344 static int
4345 i915_max_freq_set(void *data, u64 val)
4346 {
4347 	struct drm_i915_private *dev_priv = data;
4348 	u32 hw_max, hw_min;
4349 	int ret;
4350 
4351 	if (INTEL_GEN(dev_priv) < 6)
4352 		return -ENODEV;
4353 
4354 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4355 
4356 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4357 	if (ret)
4358 		return ret;
4359 
4360 	/*
4361 	 * Turbo will still be enabled, but won't go above the set value.
4362 	 */
4363 	val = intel_freq_opcode(dev_priv, val);
4364 
4365 	hw_max = dev_priv->rps.max_freq;
4366 	hw_min = dev_priv->rps.min_freq;
4367 
4368 	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4369 		mutex_unlock(&dev_priv->rps.hw_lock);
4370 		return -EINVAL;
4371 	}
4372 
4373 	dev_priv->rps.max_freq_softlimit = val;
4374 
4375 	if (intel_set_rps(dev_priv, val))
4376 		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4377 
4378 	mutex_unlock(&dev_priv->rps.hw_lock);
4379 
4380 	return 0;
4381 }
4382 
4383 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4384 			i915_max_freq_get, i915_max_freq_set,
4385 			"%llu\n");
4386 
4387 static int
4388 i915_min_freq_get(void *data, u64 *val)
4389 {
4390 	struct drm_i915_private *dev_priv = data;
4391 
4392 	if (INTEL_GEN(dev_priv) < 6)
4393 		return -ENODEV;
4394 
4395 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4396 	return 0;
4397 }
4398 
4399 static int
4400 i915_min_freq_set(void *data, u64 val)
4401 {
4402 	struct drm_i915_private *dev_priv = data;
4403 	u32 hw_max, hw_min;
4404 	int ret;
4405 
4406 	if (INTEL_GEN(dev_priv) < 6)
4407 		return -ENODEV;
4408 
4409 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4410 
4411 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4412 	if (ret)
4413 		return ret;
4414 
4415 	/*
4416 	 * Turbo will still be enabled, but won't go below the set value.
4417 	 */
4418 	val = intel_freq_opcode(dev_priv, val);
4419 
4420 	hw_max = dev_priv->rps.max_freq;
4421 	hw_min = dev_priv->rps.min_freq;
4422 
4423 	if (val < hw_min ||
4424 	    val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4425 		mutex_unlock(&dev_priv->rps.hw_lock);
4426 		return -EINVAL;
4427 	}
4428 
4429 	dev_priv->rps.min_freq_softlimit = val;
4430 
4431 	if (intel_set_rps(dev_priv, val))
4432 		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4433 
4434 	mutex_unlock(&dev_priv->rps.hw_lock);
4435 
4436 	return 0;
4437 }
4438 
4439 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4440 			i915_min_freq_get, i915_min_freq_set,
4441 			"%llu\n");
4442 
4443 static int
4444 i915_cache_sharing_get(void *data, u64 *val)
4445 {
4446 	struct drm_i915_private *dev_priv = data;
4447 	u32 snpcr;
4448 
4449 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4450 		return -ENODEV;
4451 
4452 	intel_runtime_pm_get(dev_priv);
4453 
4454 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4455 
4456 	intel_runtime_pm_put(dev_priv);
4457 
4458 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4459 
4460 	return 0;
4461 }
4462 
4463 static int
4464 i915_cache_sharing_set(void *data, u64 val)
4465 {
4466 	struct drm_i915_private *dev_priv = data;
4467 	u32 snpcr;
4468 
4469 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4470 		return -ENODEV;
4471 
4472 	if (val > 3)
4473 		return -EINVAL;
4474 
4475 	intel_runtime_pm_get(dev_priv);
4476 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4477 
4478 	/* Update the cache sharing policy here as well */
4479 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4480 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4481 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4482 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4483 
4484 	intel_runtime_pm_put(dev_priv);
4485 	return 0;
4486 }
4487 
4488 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4489 			i915_cache_sharing_get, i915_cache_sharing_set,
4490 			"%llu\n");
4491 
4492 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4493 					  struct sseu_dev_info *sseu)
4494 {
4495 	int ss_max = 2;
4496 	int ss;
4497 	u32 sig1[ss_max], sig2[ss_max];
4498 
4499 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4500 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4501 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4502 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4503 
4504 	for (ss = 0; ss < ss_max; ss++) {
4505 		unsigned int eu_cnt;
4506 
4507 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4508 			/* skip disabled subslice */
4509 			continue;
4510 
4511 		sseu->slice_mask = BIT(0);
4512 		sseu->subslice_mask |= BIT(ss);
4513 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4514 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4515 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4516 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4517 		sseu->eu_total += eu_cnt;
4518 		sseu->eu_per_subslice = max_t(unsigned int,
4519 					      sseu->eu_per_subslice, eu_cnt);
4520 	}
4521 }
4522 
4523 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4524 				    struct sseu_dev_info *sseu)
4525 {
4526 	int s_max = 3, ss_max = 4;
4527 	int s, ss;
4528 	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4529 
4530 	/* BXT has a single slice and at most 3 subslices. */
4531 	if (IS_GEN9_LP(dev_priv)) {
4532 		s_max = 1;
4533 		ss_max = 3;
4534 	}
4535 
4536 	for (s = 0; s < s_max; s++) {
4537 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4538 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4539 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4540 	}
4541 
4542 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4543 		     GEN9_PGCTL_SSA_EU19_ACK |
4544 		     GEN9_PGCTL_SSA_EU210_ACK |
4545 		     GEN9_PGCTL_SSA_EU311_ACK;
4546 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4547 		     GEN9_PGCTL_SSB_EU19_ACK |
4548 		     GEN9_PGCTL_SSB_EU210_ACK |
4549 		     GEN9_PGCTL_SSB_EU311_ACK;
4550 
4551 	for (s = 0; s < s_max; s++) {
4552 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4553 			/* skip disabled slice */
4554 			continue;
4555 
4556 		sseu->slice_mask |= BIT(s);
4557 
4558 		if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
4559 			sseu->subslice_mask =
4560 				INTEL_INFO(dev_priv)->sseu.subslice_mask;
4561 
4562 		for (ss = 0; ss < ss_max; ss++) {
4563 			unsigned int eu_cnt;
4564 
4565 			if (IS_GEN9_LP(dev_priv)) {
4566 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4567 					/* skip disabled subslice */
4568 					continue;
4569 
4570 				sseu->subslice_mask |= BIT(ss);
4571 			}
4572 
4573 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4574 					       eu_mask[ss%2]);
4575 			sseu->eu_total += eu_cnt;
4576 			sseu->eu_per_subslice = max_t(unsigned int,
4577 						      sseu->eu_per_subslice,
4578 						      eu_cnt);
4579 		}
4580 	}
4581 }
4582 
4583 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4584 					 struct sseu_dev_info *sseu)
4585 {
4586 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4587 	int s;
4588 
4589 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4590 
4591 	if (sseu->slice_mask) {
4592 		sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
4593 		sseu->eu_per_subslice =
4594 				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4595 		sseu->eu_total = sseu->eu_per_subslice *
4596 				 sseu_subslice_total(sseu);
4597 
4598 		/* subtract fused off EU(s) from enabled slice(s) */
4599 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4600 			u8 subslice_7eu =
4601 				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4602 
4603 			sseu->eu_total -= hweight8(subslice_7eu);
4604 		}
4605 	}
4606 }
4607 
4608 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4609 				 const struct sseu_dev_info *sseu)
4610 {
4611 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4612 	const char *type = is_available_info ? "Available" : "Enabled";
4613 
4614 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4615 		   sseu->slice_mask);
4616 	seq_printf(m, "  %s Slice Total: %u\n", type,
4617 		   hweight8(sseu->slice_mask));
4618 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4619 		   sseu_subslice_total(sseu));
4620 	seq_printf(m, "  %s Subslice Mask: %04x\n", type,
4621 		   sseu->subslice_mask);
4622 	seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
4623 		   hweight8(sseu->subslice_mask));
4624 	seq_printf(m, "  %s EU Total: %u\n", type,
4625 		   sseu->eu_total);
4626 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4627 		   sseu->eu_per_subslice);
4628 
4629 	if (!is_available_info)
4630 		return;
4631 
4632 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4633 	if (HAS_POOLED_EU(dev_priv))
4634 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4635 
4636 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4637 		   yesno(sseu->has_slice_pg));
4638 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4639 		   yesno(sseu->has_subslice_pg));
4640 	seq_printf(m, "  Has EU Power Gating: %s\n",
4641 		   yesno(sseu->has_eu_pg));
4642 }
4643 
4644 static int i915_sseu_status(struct seq_file *m, void *unused)
4645 {
4646 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4647 	struct sseu_dev_info sseu;
4648 
4649 	if (INTEL_GEN(dev_priv) < 8)
4650 		return -ENODEV;
4651 
4652 	seq_puts(m, "SSEU Device Info\n");
4653 	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4654 
4655 	seq_puts(m, "SSEU Device Status\n");
4656 	memset(&sseu, 0, sizeof(sseu));
4657 
4658 	intel_runtime_pm_get(dev_priv);
4659 
4660 	if (IS_CHERRYVIEW(dev_priv)) {
4661 		cherryview_sseu_device_status(dev_priv, &sseu);
4662 	} else if (IS_BROADWELL(dev_priv)) {
4663 		broadwell_sseu_device_status(dev_priv, &sseu);
4664 	} else if (INTEL_GEN(dev_priv) >= 9) {
4665 		gen9_sseu_device_status(dev_priv, &sseu);
4666 	}
4667 
4668 	intel_runtime_pm_put(dev_priv);
4669 
4670 	i915_print_sseu_info(m, false, &sseu);
4671 
4672 	return 0;
4673 }
4674 
4675 static int i915_forcewake_open(struct inode *inode, struct file *file)
4676 {
4677 	struct drm_i915_private *dev_priv = inode->i_private;
4678 
4679 	if (INTEL_GEN(dev_priv) < 6)
4680 		return 0;
4681 
4682 	intel_runtime_pm_get(dev_priv);
4683 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4684 
4685 	return 0;
4686 }
4687 
4688 static int i915_forcewake_release(struct inode *inode, struct file *file)
4689 {
4690 	struct drm_i915_private *dev_priv = inode->i_private;
4691 
4692 	if (INTEL_GEN(dev_priv) < 6)
4693 		return 0;
4694 
4695 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4696 	intel_runtime_pm_put(dev_priv);
4697 
4698 	return 0;
4699 }
4700 
4701 static const struct file_operations i915_forcewake_fops = {
4702 	.owner = THIS_MODULE,
4703 	.open = i915_forcewake_open,
4704 	.release = i915_forcewake_release,
4705 };
4706 
4707 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4708 {
4709 	struct drm_i915_private *dev_priv = m->private;
4710 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4711 
4712 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4713 	seq_printf(m, "Detected: %s\n",
4714 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4715 
4716 	return 0;
4717 }
4718 
4719 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4720 					const char __user *ubuf, size_t len,
4721 					loff_t *offp)
4722 {
4723 	struct seq_file *m = file->private_data;
4724 	struct drm_i915_private *dev_priv = m->private;
4725 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4726 	unsigned int new_threshold;
4727 	int i;
4728 	char *newline;
4729 	char tmp[16];
4730 
4731 	if (len >= sizeof(tmp))
4732 		return -EINVAL;
4733 
4734 	if (copy_from_user(tmp, ubuf, len))
4735 		return -EFAULT;
4736 
4737 	tmp[len] = '\0';
4738 
4739 	/* Strip newline, if any */
4740 	newline = strchr(tmp, '\n');
4741 	if (newline)
4742 		*newline = '\0';
4743 
4744 	if (strcmp(tmp, "reset") == 0)
4745 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4746 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4747 		return -EINVAL;
4748 
4749 	if (new_threshold > 0)
4750 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4751 			      new_threshold);
4752 	else
4753 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4754 
4755 	spin_lock_irq(&dev_priv->irq_lock);
4756 	hotplug->hpd_storm_threshold = new_threshold;
4757 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4758 	for_each_hpd_pin(i)
4759 		hotplug->stats[i].count = 0;
4760 	spin_unlock_irq(&dev_priv->irq_lock);
4761 
4762 	/* Re-enable hpd immediately if we were in an irq storm */
4763 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4764 
4765 	return len;
4766 }
4767 
4768 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4769 {
4770 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4771 }
4772 
4773 static const struct file_operations i915_hpd_storm_ctl_fops = {
4774 	.owner = THIS_MODULE,
4775 	.open = i915_hpd_storm_ctl_open,
4776 	.read = seq_read,
4777 	.llseek = seq_lseek,
4778 	.release = single_release,
4779 	.write = i915_hpd_storm_ctl_write
4780 };
4781 
4782 static const struct drm_info_list i915_debugfs_list[] = {
4783 	{"i915_capabilities", i915_capabilities, 0},
4784 	{"i915_gem_objects", i915_gem_object_info, 0},
4785 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4786 	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
4787 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4788 	{"i915_gem_request", i915_gem_request_info, 0},
4789 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
4790 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4791 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4792 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4793 	{"i915_guc_info", i915_guc_info, 0},
4794 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4795 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4796 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4797 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4798 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4799 	{"i915_frequency_info", i915_frequency_info, 0},
4800 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4801 	{"i915_reset_info", i915_reset_info, 0},
4802 	{"i915_drpc_info", i915_drpc_info, 0},
4803 	{"i915_emon_status", i915_emon_status, 0},
4804 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4805 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4806 	{"i915_fbc_status", i915_fbc_status, 0},
4807 	{"i915_ips_status", i915_ips_status, 0},
4808 	{"i915_sr_status", i915_sr_status, 0},
4809 	{"i915_opregion", i915_opregion, 0},
4810 	{"i915_vbt", i915_vbt, 0},
4811 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4812 	{"i915_context_status", i915_context_status, 0},
4813 	{"i915_dump_lrc", i915_dump_lrc, 0},
4814 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4815 	{"i915_swizzle_info", i915_swizzle_info, 0},
4816 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4817 	{"i915_llc", i915_llc, 0},
4818 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4819 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4820 	{"i915_energy_uJ", i915_energy_uJ, 0},
4821 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4822 	{"i915_power_domain_info", i915_power_domain_info, 0},
4823 	{"i915_dmc_info", i915_dmc_info, 0},
4824 	{"i915_display_info", i915_display_info, 0},
4825 	{"i915_engine_info", i915_engine_info, 0},
4826 	{"i915_semaphore_status", i915_semaphore_status, 0},
4827 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4828 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4829 	{"i915_wa_registers", i915_wa_registers, 0},
4830 	{"i915_ddb_info", i915_ddb_info, 0},
4831 	{"i915_sseu_status", i915_sseu_status, 0},
4832 	{"i915_drrs_status", i915_drrs_status, 0},
4833 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4834 };
4835 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4836 
4837 static const struct i915_debugfs_files {
4838 	const char *name;
4839 	const struct file_operations *fops;
4840 } i915_debugfs_files[] = {
4841 	{"i915_wedged", &i915_wedged_fops},
4842 	{"i915_max_freq", &i915_max_freq_fops},
4843 	{"i915_min_freq", &i915_min_freq_fops},
4844 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4845 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4846 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4847 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4848 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4849 	{"i915_error_state", &i915_error_state_fops},
4850 	{"i915_gpu_info", &i915_gpu_info_fops},
4851 #endif
4852 	{"i915_next_seqno", &i915_next_seqno_fops},
4853 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4854 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4855 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4856 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4857 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4858 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4859 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4860 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4861 	{"i915_guc_log_control", &i915_guc_log_control_fops},
4862 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
4863 };
4864 
4865 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4866 {
4867 	struct drm_minor *minor = dev_priv->drm.primary;
4868 	struct dentry *ent;
4869 	int ret, i;
4870 
4871 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4872 				  minor->debugfs_root, to_i915(minor->dev),
4873 				  &i915_forcewake_fops);
4874 	if (!ent)
4875 		return -ENOMEM;
4876 
4877 	ret = intel_pipe_crc_create(minor);
4878 	if (ret)
4879 		return ret;
4880 
4881 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4882 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4883 					  S_IRUGO | S_IWUSR,
4884 					  minor->debugfs_root,
4885 					  to_i915(minor->dev),
4886 					  i915_debugfs_files[i].fops);
4887 		if (!ent)
4888 			return -ENOMEM;
4889 	}
4890 
4891 	return drm_debugfs_create_files(i915_debugfs_list,
4892 					I915_DEBUGFS_ENTRIES,
4893 					minor->debugfs_root, minor);
4894 }
4895 
4896 struct dpcd_block {
4897 	/* DPCD dump start address. */
4898 	unsigned int offset;
4899 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4900 	unsigned int end;
4901 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4902 	size_t size;
4903 	/* Only valid for eDP. */
4904 	bool edp;
4905 };
4906 
4907 static const struct dpcd_block i915_dpcd_debug[] = {
4908 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4909 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4910 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4911 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4912 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4913 	{ .offset = DP_SET_POWER },
4914 	{ .offset = DP_EDP_DPCD_REV },
4915 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4916 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4917 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4918 };
4919 
4920 static int i915_dpcd_show(struct seq_file *m, void *data)
4921 {
4922 	struct drm_connector *connector = m->private;
4923 	struct intel_dp *intel_dp =
4924 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4925 	uint8_t buf[16];
4926 	ssize_t err;
4927 	int i;
4928 
4929 	if (connector->status != connector_status_connected)
4930 		return -ENODEV;
4931 
4932 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4933 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4934 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4935 
4936 		if (b->edp &&
4937 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4938 			continue;
4939 
4940 		/* low tech for now */
4941 		if (WARN_ON(size > sizeof(buf)))
4942 			continue;
4943 
4944 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4945 		if (err <= 0) {
4946 			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4947 				  size, b->offset, err);
4948 			continue;
4949 		}
4950 
4951 		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4952 	}
4953 
4954 	return 0;
4955 }
4956 
4957 static int i915_dpcd_open(struct inode *inode, struct file *file)
4958 {
4959 	return single_open(file, i915_dpcd_show, inode->i_private);
4960 }
4961 
4962 static const struct file_operations i915_dpcd_fops = {
4963 	.owner = THIS_MODULE,
4964 	.open = i915_dpcd_open,
4965 	.read = seq_read,
4966 	.llseek = seq_lseek,
4967 	.release = single_release,
4968 };
4969 
4970 static int i915_panel_show(struct seq_file *m, void *data)
4971 {
4972 	struct drm_connector *connector = m->private;
4973 	struct intel_dp *intel_dp =
4974 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4975 
4976 	if (connector->status != connector_status_connected)
4977 		return -ENODEV;
4978 
4979 	seq_printf(m, "Panel power up delay: %d\n",
4980 		   intel_dp->panel_power_up_delay);
4981 	seq_printf(m, "Panel power down delay: %d\n",
4982 		   intel_dp->panel_power_down_delay);
4983 	seq_printf(m, "Backlight on delay: %d\n",
4984 		   intel_dp->backlight_on_delay);
4985 	seq_printf(m, "Backlight off delay: %d\n",
4986 		   intel_dp->backlight_off_delay);
4987 
4988 	return 0;
4989 }
4990 
4991 static int i915_panel_open(struct inode *inode, struct file *file)
4992 {
4993 	return single_open(file, i915_panel_show, inode->i_private);
4994 }
4995 
4996 static const struct file_operations i915_panel_fops = {
4997 	.owner = THIS_MODULE,
4998 	.open = i915_panel_open,
4999 	.read = seq_read,
5000 	.llseek = seq_lseek,
5001 	.release = single_release,
5002 };
5003 
5004 /**
5005  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5006  * @connector: pointer to a registered drm_connector
5007  *
5008  * Cleanup will be done by drm_connector_unregister() through a call to
5009  * drm_debugfs_connector_remove().
5010  *
5011  * Returns 0 on success, negative error codes on error.
5012  */
5013 int i915_debugfs_connector_add(struct drm_connector *connector)
5014 {
5015 	struct dentry *root = connector->debugfs_entry;
5016 
5017 	/* The connector must have been registered beforehands. */
5018 	if (!root)
5019 		return -ENODEV;
5020 
5021 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5022 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5023 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
5024 				    connector, &i915_dpcd_fops);
5025 
5026 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5027 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5028 				    connector, &i915_panel_fops);
5029 
5030 	return 0;
5031 }
5032