xref: /linux/drivers/gpu/drm/i915/display/intel_display_debugfs.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 
22 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
23 {
24 	return to_i915(node->minor->dev);
25 }
26 
27 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
28 {
29 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
30 
31 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
32 		   dev_priv->fb_tracking.busy_bits);
33 
34 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.flip_bits);
36 
37 	return 0;
38 }
39 
40 static int i915_fbc_status(struct seq_file *m, void *unused)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	struct intel_fbc *fbc = &dev_priv->fbc;
44 	intel_wakeref_t wakeref;
45 
46 	if (!HAS_FBC(dev_priv))
47 		return -ENODEV;
48 
49 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
50 	mutex_lock(&fbc->lock);
51 
52 	if (intel_fbc_is_active(dev_priv))
53 		seq_puts(m, "FBC enabled\n");
54 	else
55 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
56 
57 	if (intel_fbc_is_active(dev_priv)) {
58 		u32 mask;
59 
60 		if (INTEL_GEN(dev_priv) >= 8)
61 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
62 		else if (INTEL_GEN(dev_priv) >= 7)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
64 		else if (INTEL_GEN(dev_priv) >= 5)
65 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
66 		else if (IS_G4X(dev_priv))
67 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
68 		else
69 			mask = intel_de_read(dev_priv, FBC_STATUS) &
70 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
71 
72 		seq_printf(m, "Compressing: %s\n", yesno(mask));
73 	}
74 
75 	mutex_unlock(&fbc->lock);
76 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
77 
78 	return 0;
79 }
80 
81 static int i915_fbc_false_color_get(void *data, u64 *val)
82 {
83 	struct drm_i915_private *dev_priv = data;
84 
85 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
86 		return -ENODEV;
87 
88 	*val = dev_priv->fbc.false_color;
89 
90 	return 0;
91 }
92 
93 static int i915_fbc_false_color_set(void *data, u64 val)
94 {
95 	struct drm_i915_private *dev_priv = data;
96 	u32 reg;
97 
98 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
99 		return -ENODEV;
100 
101 	mutex_lock(&dev_priv->fbc.lock);
102 
103 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
104 	dev_priv->fbc.false_color = val;
105 
106 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
107 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
108 
109 	mutex_unlock(&dev_priv->fbc.lock);
110 	return 0;
111 }
112 
113 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
114 			i915_fbc_false_color_get, i915_fbc_false_color_set,
115 			"%llu\n");
116 
117 static int i915_ips_status(struct seq_file *m, void *unused)
118 {
119 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
120 	intel_wakeref_t wakeref;
121 
122 	if (!HAS_IPS(dev_priv))
123 		return -ENODEV;
124 
125 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
126 
127 	seq_printf(m, "Enabled by kernel parameter: %s\n",
128 		   yesno(i915_modparams.enable_ips));
129 
130 	if (INTEL_GEN(dev_priv) >= 8) {
131 		seq_puts(m, "Currently: unknown\n");
132 	} else {
133 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
134 			seq_puts(m, "Currently: enabled\n");
135 		else
136 			seq_puts(m, "Currently: disabled\n");
137 	}
138 
139 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
140 
141 	return 0;
142 }
143 
144 static int i915_sr_status(struct seq_file *m, void *unused)
145 {
146 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
147 	intel_wakeref_t wakeref;
148 	bool sr_enabled = false;
149 
150 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
151 
152 	if (INTEL_GEN(dev_priv) >= 9)
153 		/* no global SR status; inspect per-plane WM */;
154 	else if (HAS_PCH_SPLIT(dev_priv))
155 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
156 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
157 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
158 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 	else if (IS_I915GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
161 	else if (IS_PINEVIEW(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
163 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
165 
166 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
167 
168 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
169 
170 	return 0;
171 }
172 
173 static int i915_opregion(struct seq_file *m, void *unused)
174 {
175 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
176 
177 	if (opregion->header)
178 		seq_write(m, opregion->header, OPREGION_SIZE);
179 
180 	return 0;
181 }
182 
183 static int i915_vbt(struct seq_file *m, void *unused)
184 {
185 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
186 
187 	if (opregion->vbt)
188 		seq_write(m, opregion->vbt, opregion->vbt_size);
189 
190 	return 0;
191 }
192 
193 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
194 {
195 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
196 	struct drm_device *dev = &dev_priv->drm;
197 	struct intel_framebuffer *fbdev_fb = NULL;
198 	struct drm_framebuffer *drm_fb;
199 
200 #ifdef CONFIG_DRM_FBDEV_EMULATION
201 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
202 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
203 
204 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
205 			   fbdev_fb->base.width,
206 			   fbdev_fb->base.height,
207 			   fbdev_fb->base.format->depth,
208 			   fbdev_fb->base.format->cpp[0] * 8,
209 			   fbdev_fb->base.modifier,
210 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
211 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
212 		seq_putc(m, '\n');
213 	}
214 #endif
215 
216 	mutex_lock(&dev->mode_config.fb_lock);
217 	drm_for_each_fb(drm_fb, dev) {
218 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
219 		if (fb == fbdev_fb)
220 			continue;
221 
222 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
223 			   fb->base.width,
224 			   fb->base.height,
225 			   fb->base.format->depth,
226 			   fb->base.format->cpp[0] * 8,
227 			   fb->base.modifier,
228 			   drm_framebuffer_read_refcount(&fb->base));
229 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
230 		seq_putc(m, '\n');
231 	}
232 	mutex_unlock(&dev->mode_config.fb_lock);
233 
234 	return 0;
235 }
236 
237 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
238 {
239 	u8 val;
240 	static const char * const sink_status[] = {
241 		"inactive",
242 		"transition to active, capture and display",
243 		"active, display from RFB",
244 		"active, capture and display on sink device timings",
245 		"transition to inactive, capture and display, timing re-sync",
246 		"reserved",
247 		"reserved",
248 		"sink internal error",
249 	};
250 	struct drm_connector *connector = m->private;
251 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
252 	struct intel_dp *intel_dp =
253 		intel_attached_dp(to_intel_connector(connector));
254 	int ret;
255 
256 	if (!CAN_PSR(dev_priv)) {
257 		seq_puts(m, "PSR Unsupported\n");
258 		return -ENODEV;
259 	}
260 
261 	if (connector->status != connector_status_connected)
262 		return -ENODEV;
263 
264 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
265 
266 	if (ret == 1) {
267 		const char *str = "unknown";
268 
269 		val &= DP_PSR_SINK_STATE_MASK;
270 		if (val < ARRAY_SIZE(sink_status))
271 			str = sink_status[val];
272 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 	} else {
274 		return ret;
275 	}
276 
277 	return 0;
278 }
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
280 
281 static void
282 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
283 {
284 	u32 val, status_val;
285 	const char *status = "unknown";
286 
287 	if (dev_priv->psr.psr2_enabled) {
288 		static const char * const live_status[] = {
289 			"IDLE",
290 			"CAPTURE",
291 			"CAPTURE_FS",
292 			"SLEEP",
293 			"BUFON_FW",
294 			"ML_UP",
295 			"SU_STANDBY",
296 			"FAST_SLEEP",
297 			"DEEP_SLEEP",
298 			"BUF_ON",
299 			"TG_ON"
300 		};
301 		val = intel_de_read(dev_priv,
302 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
303 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
304 			      EDP_PSR2_STATUS_STATE_SHIFT;
305 		if (status_val < ARRAY_SIZE(live_status))
306 			status = live_status[status_val];
307 	} else {
308 		static const char * const live_status[] = {
309 			"IDLE",
310 			"SRDONACK",
311 			"SRDENT",
312 			"BUFOFF",
313 			"BUFON",
314 			"AUXACK",
315 			"SRDOFFACK",
316 			"SRDENT_ON",
317 		};
318 		val = intel_de_read(dev_priv,
319 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
320 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
321 			      EDP_PSR_STATUS_STATE_SHIFT;
322 		if (status_val < ARRAY_SIZE(live_status))
323 			status = live_status[status_val];
324 	}
325 
326 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
327 }
328 
329 static int i915_edp_psr_status(struct seq_file *m, void *data)
330 {
331 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
332 	struct i915_psr *psr = &dev_priv->psr;
333 	intel_wakeref_t wakeref;
334 	const char *status;
335 	bool enabled;
336 	u32 val;
337 
338 	if (!HAS_PSR(dev_priv))
339 		return -ENODEV;
340 
341 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 	if (psr->dp)
343 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
344 	seq_puts(m, "\n");
345 
346 	if (!psr->sink_support)
347 		return 0;
348 
349 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 	mutex_lock(&psr->lock);
351 
352 	if (psr->enabled)
353 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 	else
355 		status = "disabled";
356 	seq_printf(m, "PSR mode: %s\n", status);
357 
358 	if (!psr->enabled) {
359 		seq_printf(m, "PSR sink not reliable: %s\n",
360 			   yesno(psr->sink_not_reliable));
361 
362 		goto unlock;
363 	}
364 
365 	if (psr->psr2_enabled) {
366 		val = intel_de_read(dev_priv,
367 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
368 		enabled = val & EDP_PSR2_ENABLE;
369 	} else {
370 		val = intel_de_read(dev_priv,
371 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
372 		enabled = val & EDP_PSR_ENABLE;
373 	}
374 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 		   enableddisabled(enabled), val);
376 	psr_source_status(dev_priv, m);
377 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 		   psr->busy_frontbuffer_bits);
379 
380 	/*
381 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
382 	 */
383 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 		val = intel_de_read(dev_priv,
385 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
386 		val &= EDP_PSR_PERF_CNT_MASK;
387 		seq_printf(m, "Performance counter: %u\n", val);
388 	}
389 
390 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 		seq_printf(m, "Last attempted entry at: %lld\n",
392 			   psr->last_entry_attempt);
393 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
394 	}
395 
396 	if (psr->psr2_enabled) {
397 		u32 su_frames_val[3];
398 		int frame;
399 
400 		/*
401 		 * Reading all 3 registers before hand to minimize crossing a
402 		 * frame boundary between register reads
403 		 */
404 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 			val = intel_de_read(dev_priv,
406 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
407 			su_frames_val[frame / 3] = val;
408 		}
409 
410 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
411 
412 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 			u32 su_blocks;
414 
415 			su_blocks = su_frames_val[frame / 3] &
416 				    PSR2_SU_STATUS_MASK(frame);
417 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
419 		}
420 	}
421 
422 unlock:
423 	mutex_unlock(&psr->lock);
424 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
425 
426 	return 0;
427 }
428 
429 static int
430 i915_edp_psr_debug_set(void *data, u64 val)
431 {
432 	struct drm_i915_private *dev_priv = data;
433 	intel_wakeref_t wakeref;
434 	int ret;
435 
436 	if (!CAN_PSR(dev_priv))
437 		return -ENODEV;
438 
439 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
440 
441 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
442 
443 	ret = intel_psr_debug_set(dev_priv, val);
444 
445 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
446 
447 	return ret;
448 }
449 
450 static int
451 i915_edp_psr_debug_get(void *data, u64 *val)
452 {
453 	struct drm_i915_private *dev_priv = data;
454 
455 	if (!CAN_PSR(dev_priv))
456 		return -ENODEV;
457 
458 	*val = READ_ONCE(dev_priv->psr.debug);
459 	return 0;
460 }
461 
462 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
463 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
464 			"%llu\n");
465 
466 static int i915_power_domain_info(struct seq_file *m, void *unused)
467 {
468 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
469 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
470 	int i;
471 
472 	mutex_lock(&power_domains->lock);
473 
474 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
475 	for (i = 0; i < power_domains->power_well_count; i++) {
476 		struct i915_power_well *power_well;
477 		enum intel_display_power_domain power_domain;
478 
479 		power_well = &power_domains->power_wells[i];
480 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
481 			   power_well->count);
482 
483 		for_each_power_domain(power_domain, power_well->desc->domains)
484 			seq_printf(m, "  %-23s %d\n",
485 				 intel_display_power_domain_str(power_domain),
486 				 power_domains->domain_use_count[power_domain]);
487 	}
488 
489 	mutex_unlock(&power_domains->lock);
490 
491 	return 0;
492 }
493 
494 static int i915_dmc_info(struct seq_file *m, void *unused)
495 {
496 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
497 	intel_wakeref_t wakeref;
498 	struct intel_csr *csr;
499 	i915_reg_t dc5_reg, dc6_reg = {};
500 
501 	if (!HAS_CSR(dev_priv))
502 		return -ENODEV;
503 
504 	csr = &dev_priv->csr;
505 
506 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
507 
508 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
509 	seq_printf(m, "path: %s\n", csr->fw_path);
510 
511 	if (!csr->dmc_payload)
512 		goto out;
513 
514 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
515 		   CSR_VERSION_MINOR(csr->version));
516 
517 	if (INTEL_GEN(dev_priv) >= 12) {
518 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
519 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
520 		/*
521 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
522 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
523 		 * reg for DC3CO debugging and validation,
524 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
525 		 */
526 		seq_printf(m, "DC3CO count: %d\n",
527 			   intel_de_read(dev_priv, DMC_DEBUG3));
528 	} else {
529 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
530 						 SKL_CSR_DC3_DC5_COUNT;
531 		if (!IS_GEN9_LP(dev_priv))
532 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
533 	}
534 
535 	seq_printf(m, "DC3 -> DC5 count: %d\n",
536 		   intel_de_read(dev_priv, dc5_reg));
537 	if (dc6_reg.reg)
538 		seq_printf(m, "DC5 -> DC6 count: %d\n",
539 			   intel_de_read(dev_priv, dc6_reg));
540 
541 out:
542 	seq_printf(m, "program base: 0x%08x\n",
543 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
544 	seq_printf(m, "ssp base: 0x%08x\n",
545 		   intel_de_read(dev_priv, CSR_SSP_BASE));
546 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
547 
548 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
549 
550 	return 0;
551 }
552 
553 static void intel_seq_print_mode(struct seq_file *m, int tabs,
554 				 const struct drm_display_mode *mode)
555 {
556 	int i;
557 
558 	for (i = 0; i < tabs; i++)
559 		seq_putc(m, '\t');
560 
561 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
562 }
563 
564 static void intel_encoder_info(struct seq_file *m,
565 			       struct intel_crtc *crtc,
566 			       struct intel_encoder *encoder)
567 {
568 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
569 	struct drm_connector_list_iter conn_iter;
570 	struct drm_connector *connector;
571 
572 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
573 		   encoder->base.base.id, encoder->base.name);
574 
575 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
576 	drm_for_each_connector_iter(connector, &conn_iter) {
577 		const struct drm_connector_state *conn_state =
578 			connector->state;
579 
580 		if (conn_state->best_encoder != &encoder->base)
581 			continue;
582 
583 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
584 			   connector->base.id, connector->name);
585 	}
586 	drm_connector_list_iter_end(&conn_iter);
587 }
588 
589 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
590 {
591 	const struct drm_display_mode *mode = panel->fixed_mode;
592 
593 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
594 }
595 
596 static void intel_hdcp_info(struct seq_file *m,
597 			    struct intel_connector *intel_connector)
598 {
599 	bool hdcp_cap, hdcp2_cap;
600 
601 	hdcp_cap = intel_hdcp_capable(intel_connector);
602 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
603 
604 	if (hdcp_cap)
605 		seq_puts(m, "HDCP1.4 ");
606 	if (hdcp2_cap)
607 		seq_puts(m, "HDCP2.2 ");
608 
609 	if (!hdcp_cap && !hdcp2_cap)
610 		seq_puts(m, "None");
611 
612 	seq_puts(m, "\n");
613 }
614 
615 static void intel_dp_info(struct seq_file *m,
616 			  struct intel_connector *intel_connector)
617 {
618 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
619 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
620 
621 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
622 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
623 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
624 		intel_panel_info(m, &intel_connector->panel);
625 
626 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
627 				&intel_dp->aux);
628 	if (intel_connector->hdcp.shim) {
629 		seq_puts(m, "\tHDCP version: ");
630 		intel_hdcp_info(m, intel_connector);
631 	}
632 }
633 
634 static void intel_dp_mst_info(struct seq_file *m,
635 			      struct intel_connector *intel_connector)
636 {
637 	bool has_audio = intel_connector->port->has_audio;
638 
639 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
640 }
641 
642 static void intel_hdmi_info(struct seq_file *m,
643 			    struct intel_connector *intel_connector)
644 {
645 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
646 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
647 
648 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
649 	if (intel_connector->hdcp.shim) {
650 		seq_puts(m, "\tHDCP version: ");
651 		intel_hdcp_info(m, intel_connector);
652 	}
653 }
654 
655 static void intel_lvds_info(struct seq_file *m,
656 			    struct intel_connector *intel_connector)
657 {
658 	intel_panel_info(m, &intel_connector->panel);
659 }
660 
661 static void intel_connector_info(struct seq_file *m,
662 				 struct drm_connector *connector)
663 {
664 	struct intel_connector *intel_connector = to_intel_connector(connector);
665 	const struct drm_connector_state *conn_state = connector->state;
666 	struct intel_encoder *encoder =
667 		to_intel_encoder(conn_state->best_encoder);
668 	const struct drm_display_mode *mode;
669 
670 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
671 		   connector->base.id, connector->name,
672 		   drm_get_connector_status_name(connector->status));
673 
674 	if (connector->status == connector_status_disconnected)
675 		return;
676 
677 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
678 		   connector->display_info.width_mm,
679 		   connector->display_info.height_mm);
680 	seq_printf(m, "\tsubpixel order: %s\n",
681 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
682 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
683 
684 	if (!encoder)
685 		return;
686 
687 	switch (connector->connector_type) {
688 	case DRM_MODE_CONNECTOR_DisplayPort:
689 	case DRM_MODE_CONNECTOR_eDP:
690 		if (encoder->type == INTEL_OUTPUT_DP_MST)
691 			intel_dp_mst_info(m, intel_connector);
692 		else
693 			intel_dp_info(m, intel_connector);
694 		break;
695 	case DRM_MODE_CONNECTOR_LVDS:
696 		if (encoder->type == INTEL_OUTPUT_LVDS)
697 			intel_lvds_info(m, intel_connector);
698 		break;
699 	case DRM_MODE_CONNECTOR_HDMIA:
700 		if (encoder->type == INTEL_OUTPUT_HDMI ||
701 		    encoder->type == INTEL_OUTPUT_DDI)
702 			intel_hdmi_info(m, intel_connector);
703 		break;
704 	default:
705 		break;
706 	}
707 
708 	seq_printf(m, "\tmodes:\n");
709 	list_for_each_entry(mode, &connector->modes, head)
710 		intel_seq_print_mode(m, 2, mode);
711 }
712 
713 static const char *plane_type(enum drm_plane_type type)
714 {
715 	switch (type) {
716 	case DRM_PLANE_TYPE_OVERLAY:
717 		return "OVL";
718 	case DRM_PLANE_TYPE_PRIMARY:
719 		return "PRI";
720 	case DRM_PLANE_TYPE_CURSOR:
721 		return "CUR";
722 	/*
723 	 * Deliberately omitting default: to generate compiler warnings
724 	 * when a new drm_plane_type gets added.
725 	 */
726 	}
727 
728 	return "unknown";
729 }
730 
731 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
732 {
733 	/*
734 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
735 	 * will print them all to visualize if the values are misused
736 	 */
737 	snprintf(buf, bufsize,
738 		 "%s%s%s%s%s%s(0x%08x)",
739 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
740 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
741 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
742 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
743 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
744 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
745 		 rotation);
746 }
747 
748 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
749 {
750 	const struct intel_plane_state *plane_state =
751 		to_intel_plane_state(plane->base.state);
752 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
753 	struct drm_format_name_buf format_name;
754 	struct drm_rect src, dst;
755 	char rot_str[48];
756 
757 	src = drm_plane_state_src(&plane_state->uapi);
758 	dst = drm_plane_state_dest(&plane_state->uapi);
759 
760 	if (fb)
761 		drm_get_format_name(fb->format->format, &format_name);
762 
763 	plane_rotation(rot_str, sizeof(rot_str),
764 		       plane_state->uapi.rotation);
765 
766 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
767 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
768 		   fb ? fb->width : 0, fb ? fb->height : 0,
769 		   DRM_RECT_FP_ARG(&src),
770 		   DRM_RECT_ARG(&dst),
771 		   rot_str);
772 }
773 
774 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
775 {
776 	const struct intel_plane_state *plane_state =
777 		to_intel_plane_state(plane->base.state);
778 	const struct drm_framebuffer *fb = plane_state->hw.fb;
779 	struct drm_format_name_buf format_name;
780 	char rot_str[48];
781 
782 	if (!fb)
783 		return;
784 
785 	drm_get_format_name(fb->format->format, &format_name);
786 
787 	plane_rotation(rot_str, sizeof(rot_str),
788 		       plane_state->hw.rotation);
789 
790 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
791 		   fb->base.id, format_name.str,
792 		   fb->width, fb->height,
793 		   yesno(plane_state->uapi.visible),
794 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
795 		   DRM_RECT_ARG(&plane_state->uapi.dst),
796 		   rot_str);
797 }
798 
799 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
800 {
801 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
802 	struct intel_plane *plane;
803 
804 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
805 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
806 			   plane->base.base.id, plane->base.name,
807 			   plane_type(plane->base.type));
808 		intel_plane_uapi_info(m, plane);
809 		intel_plane_hw_info(m, plane);
810 	}
811 }
812 
813 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
814 {
815 	const struct intel_crtc_state *crtc_state =
816 		to_intel_crtc_state(crtc->base.state);
817 	int num_scalers = crtc->num_scalers;
818 	int i;
819 
820 	/* Not all platformas have a scaler */
821 	if (num_scalers) {
822 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
823 			   num_scalers,
824 			   crtc_state->scaler_state.scaler_users,
825 			   crtc_state->scaler_state.scaler_id);
826 
827 		for (i = 0; i < num_scalers; i++) {
828 			const struct intel_scaler *sc =
829 				&crtc_state->scaler_state.scalers[i];
830 
831 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
832 				   i, yesno(sc->in_use), sc->mode);
833 		}
834 		seq_puts(m, "\n");
835 	} else {
836 		seq_puts(m, "\tNo scalers available on this platform\n");
837 	}
838 }
839 
840 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
841 {
842 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
843 	const struct intel_crtc_state *crtc_state =
844 		to_intel_crtc_state(crtc->base.state);
845 	struct intel_encoder *encoder;
846 
847 	seq_printf(m, "[CRTC:%d:%s]:\n",
848 		   crtc->base.base.id, crtc->base.name);
849 
850 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
851 		   yesno(crtc_state->uapi.enable),
852 		   yesno(crtc_state->uapi.active),
853 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
854 
855 	if (crtc_state->hw.enable) {
856 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
857 			   yesno(crtc_state->hw.active),
858 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
859 
860 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
861 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
862 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
863 
864 		intel_scaler_info(m, crtc);
865 	}
866 
867 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
868 				    crtc_state->uapi.encoder_mask)
869 		intel_encoder_info(m, crtc, encoder);
870 
871 	intel_plane_info(m, crtc);
872 
873 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
874 		   yesno(!crtc->cpu_fifo_underrun_disabled),
875 		   yesno(!crtc->pch_fifo_underrun_disabled));
876 }
877 
878 static int i915_display_info(struct seq_file *m, void *unused)
879 {
880 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
881 	struct drm_device *dev = &dev_priv->drm;
882 	struct intel_crtc *crtc;
883 	struct drm_connector *connector;
884 	struct drm_connector_list_iter conn_iter;
885 	intel_wakeref_t wakeref;
886 
887 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
888 
889 	drm_modeset_lock_all(dev);
890 
891 	seq_printf(m, "CRTC info\n");
892 	seq_printf(m, "---------\n");
893 	for_each_intel_crtc(dev, crtc)
894 		intel_crtc_info(m, crtc);
895 
896 	seq_printf(m, "\n");
897 	seq_printf(m, "Connector info\n");
898 	seq_printf(m, "--------------\n");
899 	drm_connector_list_iter_begin(dev, &conn_iter);
900 	drm_for_each_connector_iter(connector, &conn_iter)
901 		intel_connector_info(m, connector);
902 	drm_connector_list_iter_end(&conn_iter);
903 
904 	drm_modeset_unlock_all(dev);
905 
906 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
907 
908 	return 0;
909 }
910 
911 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
912 {
913 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
914 	struct drm_device *dev = &dev_priv->drm;
915 	int i;
916 
917 	drm_modeset_lock_all(dev);
918 
919 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
920 		   dev_priv->dpll.ref_clks.nssc,
921 		   dev_priv->dpll.ref_clks.ssc);
922 
923 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
924 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
925 
926 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
927 			   pll->info->id);
928 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
929 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
930 		seq_printf(m, " tracked hardware state:\n");
931 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
932 		seq_printf(m, " dpll_md: 0x%08x\n",
933 			   pll->state.hw_state.dpll_md);
934 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
935 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
936 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
937 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
938 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
939 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
940 			   pll->state.hw_state.mg_refclkin_ctl);
941 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
942 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
943 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
944 			   pll->state.hw_state.mg_clktop2_hsclkctl);
945 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
946 			   pll->state.hw_state.mg_pll_div0);
947 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
948 			   pll->state.hw_state.mg_pll_div1);
949 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
950 			   pll->state.hw_state.mg_pll_lf);
951 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
952 			   pll->state.hw_state.mg_pll_frac_lock);
953 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
954 			   pll->state.hw_state.mg_pll_ssc);
955 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
956 			   pll->state.hw_state.mg_pll_bias);
957 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
958 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
959 	}
960 	drm_modeset_unlock_all(dev);
961 
962 	return 0;
963 }
964 
965 static int i915_ipc_status_show(struct seq_file *m, void *data)
966 {
967 	struct drm_i915_private *dev_priv = m->private;
968 
969 	seq_printf(m, "Isochronous Priority Control: %s\n",
970 			yesno(dev_priv->ipc_enabled));
971 	return 0;
972 }
973 
974 static int i915_ipc_status_open(struct inode *inode, struct file *file)
975 {
976 	struct drm_i915_private *dev_priv = inode->i_private;
977 
978 	if (!HAS_IPC(dev_priv))
979 		return -ENODEV;
980 
981 	return single_open(file, i915_ipc_status_show, dev_priv);
982 }
983 
984 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
985 				     size_t len, loff_t *offp)
986 {
987 	struct seq_file *m = file->private_data;
988 	struct drm_i915_private *dev_priv = m->private;
989 	intel_wakeref_t wakeref;
990 	bool enable;
991 	int ret;
992 
993 	ret = kstrtobool_from_user(ubuf, len, &enable);
994 	if (ret < 0)
995 		return ret;
996 
997 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
998 		if (!dev_priv->ipc_enabled && enable)
999 			drm_info(&dev_priv->drm,
1000 				 "Enabling IPC: WM will be proper only after next commit\n");
1001 		dev_priv->wm.distrust_bios_wm = true;
1002 		dev_priv->ipc_enabled = enable;
1003 		intel_enable_ipc(dev_priv);
1004 	}
1005 
1006 	return len;
1007 }
1008 
1009 static const struct file_operations i915_ipc_status_fops = {
1010 	.owner = THIS_MODULE,
1011 	.open = i915_ipc_status_open,
1012 	.read = seq_read,
1013 	.llseek = seq_lseek,
1014 	.release = single_release,
1015 	.write = i915_ipc_status_write
1016 };
1017 
1018 static int i915_ddb_info(struct seq_file *m, void *unused)
1019 {
1020 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1021 	struct drm_device *dev = &dev_priv->drm;
1022 	struct skl_ddb_entry *entry;
1023 	struct intel_crtc *crtc;
1024 
1025 	if (INTEL_GEN(dev_priv) < 9)
1026 		return -ENODEV;
1027 
1028 	drm_modeset_lock_all(dev);
1029 
1030 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1031 
1032 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1033 		struct intel_crtc_state *crtc_state =
1034 			to_intel_crtc_state(crtc->base.state);
1035 		enum pipe pipe = crtc->pipe;
1036 		enum plane_id plane_id;
1037 
1038 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1039 
1040 		for_each_plane_id_on_crtc(crtc, plane_id) {
1041 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1042 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1043 				   entry->start, entry->end,
1044 				   skl_ddb_entry_size(entry));
1045 		}
1046 
1047 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1048 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1049 			   entry->end, skl_ddb_entry_size(entry));
1050 	}
1051 
1052 	drm_modeset_unlock_all(dev);
1053 
1054 	return 0;
1055 }
1056 
1057 static void drrs_status_per_crtc(struct seq_file *m,
1058 				 struct drm_device *dev,
1059 				 struct intel_crtc *intel_crtc)
1060 {
1061 	struct drm_i915_private *dev_priv = to_i915(dev);
1062 	struct i915_drrs *drrs = &dev_priv->drrs;
1063 	int vrefresh = 0;
1064 	struct drm_connector *connector;
1065 	struct drm_connector_list_iter conn_iter;
1066 
1067 	drm_connector_list_iter_begin(dev, &conn_iter);
1068 	drm_for_each_connector_iter(connector, &conn_iter) {
1069 		if (connector->state->crtc != &intel_crtc->base)
1070 			continue;
1071 
1072 		seq_printf(m, "%s:\n", connector->name);
1073 	}
1074 	drm_connector_list_iter_end(&conn_iter);
1075 
1076 	seq_puts(m, "\n");
1077 
1078 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1079 		struct intel_panel *panel;
1080 
1081 		mutex_lock(&drrs->mutex);
1082 		/* DRRS Supported */
1083 		seq_puts(m, "\tDRRS Supported: Yes\n");
1084 
1085 		/* disable_drrs() will make drrs->dp NULL */
1086 		if (!drrs->dp) {
1087 			seq_puts(m, "Idleness DRRS: Disabled\n");
1088 			if (dev_priv->psr.enabled)
1089 				seq_puts(m,
1090 				"\tAs PSR is enabled, DRRS is not enabled\n");
1091 			mutex_unlock(&drrs->mutex);
1092 			return;
1093 		}
1094 
1095 		panel = &drrs->dp->attached_connector->panel;
1096 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1097 					drrs->busy_frontbuffer_bits);
1098 
1099 		seq_puts(m, "\n\t\t");
1100 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1101 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1102 			vrefresh = panel->fixed_mode->vrefresh;
1103 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1104 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1105 			vrefresh = panel->downclock_mode->vrefresh;
1106 		} else {
1107 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1108 						drrs->refresh_rate_type);
1109 			mutex_unlock(&drrs->mutex);
1110 			return;
1111 		}
1112 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1113 
1114 		seq_puts(m, "\n\t\t");
1115 		mutex_unlock(&drrs->mutex);
1116 	} else {
1117 		/* DRRS not supported. Print the VBT parameter*/
1118 		seq_puts(m, "\tDRRS Supported : No");
1119 	}
1120 	seq_puts(m, "\n");
1121 }
1122 
1123 static int i915_drrs_status(struct seq_file *m, void *unused)
1124 {
1125 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1126 	struct drm_device *dev = &dev_priv->drm;
1127 	struct intel_crtc *intel_crtc;
1128 	int active_crtc_cnt = 0;
1129 
1130 	drm_modeset_lock_all(dev);
1131 	for_each_intel_crtc(dev, intel_crtc) {
1132 		if (intel_crtc->base.state->active) {
1133 			active_crtc_cnt++;
1134 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1135 
1136 			drrs_status_per_crtc(m, dev, intel_crtc);
1137 		}
1138 	}
1139 	drm_modeset_unlock_all(dev);
1140 
1141 	if (!active_crtc_cnt)
1142 		seq_puts(m, "No active crtc found\n");
1143 
1144 	return 0;
1145 }
1146 
1147 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1148 				seq_puts(m, "LPSP: disabled\n"))
1149 
1150 static bool
1151 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1152 			      enum i915_power_well_id power_well_id)
1153 {
1154 	intel_wakeref_t wakeref;
1155 	bool is_enabled;
1156 
1157 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1158 	is_enabled = intel_display_power_well_is_enabled(i915,
1159 							 power_well_id);
1160 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1161 
1162 	return is_enabled;
1163 }
1164 
1165 static int i915_lpsp_status(struct seq_file *m, void *unused)
1166 {
1167 	struct drm_i915_private *i915 = node_to_i915(m->private);
1168 
1169 	switch (INTEL_GEN(i915)) {
1170 	case 12:
1171 	case 11:
1172 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1173 		break;
1174 	case 10:
1175 	case 9:
1176 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1177 		break;
1178 	default:
1179 		/*
1180 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1181 		 * support lpsp.
1182 		 */
1183 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1184 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1185 		else
1186 			seq_puts(m, "LPSP: not supported\n");
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1193 {
1194 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1195 	struct drm_device *dev = &dev_priv->drm;
1196 	struct intel_encoder *intel_encoder;
1197 	struct intel_digital_port *intel_dig_port;
1198 	struct drm_connector *connector;
1199 	struct drm_connector_list_iter conn_iter;
1200 
1201 	drm_connector_list_iter_begin(dev, &conn_iter);
1202 	drm_for_each_connector_iter(connector, &conn_iter) {
1203 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1204 			continue;
1205 
1206 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1207 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1208 			continue;
1209 
1210 		intel_dig_port = enc_to_dig_port(intel_encoder);
1211 		if (!intel_dig_port->dp.can_mst)
1212 			continue;
1213 
1214 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1215 			   intel_dig_port->base.base.base.id,
1216 			   intel_dig_port->base.base.name);
1217 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
1218 	}
1219 	drm_connector_list_iter_end(&conn_iter);
1220 
1221 	return 0;
1222 }
1223 
1224 static ssize_t i915_displayport_test_active_write(struct file *file,
1225 						  const char __user *ubuf,
1226 						  size_t len, loff_t *offp)
1227 {
1228 	char *input_buffer;
1229 	int status = 0;
1230 	struct drm_device *dev;
1231 	struct drm_connector *connector;
1232 	struct drm_connector_list_iter conn_iter;
1233 	struct intel_dp *intel_dp;
1234 	int val = 0;
1235 
1236 	dev = ((struct seq_file *)file->private_data)->private;
1237 
1238 	if (len == 0)
1239 		return 0;
1240 
1241 	input_buffer = memdup_user_nul(ubuf, len);
1242 	if (IS_ERR(input_buffer))
1243 		return PTR_ERR(input_buffer);
1244 
1245 	drm_dbg(&to_i915(dev)->drm,
1246 		"Copied %d bytes from user\n", (unsigned int)len);
1247 
1248 	drm_connector_list_iter_begin(dev, &conn_iter);
1249 	drm_for_each_connector_iter(connector, &conn_iter) {
1250 		struct intel_encoder *encoder;
1251 
1252 		if (connector->connector_type !=
1253 		    DRM_MODE_CONNECTOR_DisplayPort)
1254 			continue;
1255 
1256 		encoder = to_intel_encoder(connector->encoder);
1257 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1258 			continue;
1259 
1260 		if (encoder && connector->status == connector_status_connected) {
1261 			intel_dp = enc_to_intel_dp(encoder);
1262 			status = kstrtoint(input_buffer, 10, &val);
1263 			if (status < 0)
1264 				break;
1265 			drm_dbg(&to_i915(dev)->drm,
1266 				"Got %d for test active\n", val);
1267 			/* To prevent erroneous activation of the compliance
1268 			 * testing code, only accept an actual value of 1 here
1269 			 */
1270 			if (val == 1)
1271 				intel_dp->compliance.test_active = true;
1272 			else
1273 				intel_dp->compliance.test_active = false;
1274 		}
1275 	}
1276 	drm_connector_list_iter_end(&conn_iter);
1277 	kfree(input_buffer);
1278 	if (status < 0)
1279 		return status;
1280 
1281 	*offp += len;
1282 	return len;
1283 }
1284 
1285 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1286 {
1287 	struct drm_i915_private *dev_priv = m->private;
1288 	struct drm_device *dev = &dev_priv->drm;
1289 	struct drm_connector *connector;
1290 	struct drm_connector_list_iter conn_iter;
1291 	struct intel_dp *intel_dp;
1292 
1293 	drm_connector_list_iter_begin(dev, &conn_iter);
1294 	drm_for_each_connector_iter(connector, &conn_iter) {
1295 		struct intel_encoder *encoder;
1296 
1297 		if (connector->connector_type !=
1298 		    DRM_MODE_CONNECTOR_DisplayPort)
1299 			continue;
1300 
1301 		encoder = to_intel_encoder(connector->encoder);
1302 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1303 			continue;
1304 
1305 		if (encoder && connector->status == connector_status_connected) {
1306 			intel_dp = enc_to_intel_dp(encoder);
1307 			if (intel_dp->compliance.test_active)
1308 				seq_puts(m, "1");
1309 			else
1310 				seq_puts(m, "0");
1311 		} else
1312 			seq_puts(m, "0");
1313 	}
1314 	drm_connector_list_iter_end(&conn_iter);
1315 
1316 	return 0;
1317 }
1318 
1319 static int i915_displayport_test_active_open(struct inode *inode,
1320 					     struct file *file)
1321 {
1322 	return single_open(file, i915_displayport_test_active_show,
1323 			   inode->i_private);
1324 }
1325 
1326 static const struct file_operations i915_displayport_test_active_fops = {
1327 	.owner = THIS_MODULE,
1328 	.open = i915_displayport_test_active_open,
1329 	.read = seq_read,
1330 	.llseek = seq_lseek,
1331 	.release = single_release,
1332 	.write = i915_displayport_test_active_write
1333 };
1334 
1335 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1336 {
1337 	struct drm_i915_private *dev_priv = m->private;
1338 	struct drm_device *dev = &dev_priv->drm;
1339 	struct drm_connector *connector;
1340 	struct drm_connector_list_iter conn_iter;
1341 	struct intel_dp *intel_dp;
1342 
1343 	drm_connector_list_iter_begin(dev, &conn_iter);
1344 	drm_for_each_connector_iter(connector, &conn_iter) {
1345 		struct intel_encoder *encoder;
1346 
1347 		if (connector->connector_type !=
1348 		    DRM_MODE_CONNECTOR_DisplayPort)
1349 			continue;
1350 
1351 		encoder = to_intel_encoder(connector->encoder);
1352 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1353 			continue;
1354 
1355 		if (encoder && connector->status == connector_status_connected) {
1356 			intel_dp = enc_to_intel_dp(encoder);
1357 			if (intel_dp->compliance.test_type ==
1358 			    DP_TEST_LINK_EDID_READ)
1359 				seq_printf(m, "%lx",
1360 					   intel_dp->compliance.test_data.edid);
1361 			else if (intel_dp->compliance.test_type ==
1362 				 DP_TEST_LINK_VIDEO_PATTERN) {
1363 				seq_printf(m, "hdisplay: %d\n",
1364 					   intel_dp->compliance.test_data.hdisplay);
1365 				seq_printf(m, "vdisplay: %d\n",
1366 					   intel_dp->compliance.test_data.vdisplay);
1367 				seq_printf(m, "bpc: %u\n",
1368 					   intel_dp->compliance.test_data.bpc);
1369 			} else if (intel_dp->compliance.test_type ==
1370 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1371 				seq_printf(m, "pattern: %d\n",
1372 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1373 				seq_printf(m, "Number of lanes: %d\n",
1374 					   intel_dp->compliance.test_data.phytest.num_lanes);
1375 				seq_printf(m, "Link Rate: %d\n",
1376 					   intel_dp->compliance.test_data.phytest.link_rate);
1377 				seq_printf(m, "level: %02x\n",
1378 					   intel_dp->train_set[0]);
1379 			}
1380 		} else
1381 			seq_puts(m, "0");
1382 	}
1383 	drm_connector_list_iter_end(&conn_iter);
1384 
1385 	return 0;
1386 }
1387 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1388 
1389 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1390 {
1391 	struct drm_i915_private *dev_priv = m->private;
1392 	struct drm_device *dev = &dev_priv->drm;
1393 	struct drm_connector *connector;
1394 	struct drm_connector_list_iter conn_iter;
1395 	struct intel_dp *intel_dp;
1396 
1397 	drm_connector_list_iter_begin(dev, &conn_iter);
1398 	drm_for_each_connector_iter(connector, &conn_iter) {
1399 		struct intel_encoder *encoder;
1400 
1401 		if (connector->connector_type !=
1402 		    DRM_MODE_CONNECTOR_DisplayPort)
1403 			continue;
1404 
1405 		encoder = to_intel_encoder(connector->encoder);
1406 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1407 			continue;
1408 
1409 		if (encoder && connector->status == connector_status_connected) {
1410 			intel_dp = enc_to_intel_dp(encoder);
1411 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1412 		} else
1413 			seq_puts(m, "0");
1414 	}
1415 	drm_connector_list_iter_end(&conn_iter);
1416 
1417 	return 0;
1418 }
1419 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1420 
1421 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1422 {
1423 	struct drm_i915_private *dev_priv = m->private;
1424 	struct drm_device *dev = &dev_priv->drm;
1425 	int level;
1426 	int num_levels;
1427 
1428 	if (IS_CHERRYVIEW(dev_priv))
1429 		num_levels = 3;
1430 	else if (IS_VALLEYVIEW(dev_priv))
1431 		num_levels = 1;
1432 	else if (IS_G4X(dev_priv))
1433 		num_levels = 3;
1434 	else
1435 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1436 
1437 	drm_modeset_lock_all(dev);
1438 
1439 	for (level = 0; level < num_levels; level++) {
1440 		unsigned int latency = wm[level];
1441 
1442 		/*
1443 		 * - WM1+ latency values in 0.5us units
1444 		 * - latencies are in us on gen9/vlv/chv
1445 		 */
1446 		if (INTEL_GEN(dev_priv) >= 9 ||
1447 		    IS_VALLEYVIEW(dev_priv) ||
1448 		    IS_CHERRYVIEW(dev_priv) ||
1449 		    IS_G4X(dev_priv))
1450 			latency *= 10;
1451 		else if (level > 0)
1452 			latency *= 5;
1453 
1454 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1455 			   level, wm[level], latency / 10, latency % 10);
1456 	}
1457 
1458 	drm_modeset_unlock_all(dev);
1459 }
1460 
1461 static int pri_wm_latency_show(struct seq_file *m, void *data)
1462 {
1463 	struct drm_i915_private *dev_priv = m->private;
1464 	const u16 *latencies;
1465 
1466 	if (INTEL_GEN(dev_priv) >= 9)
1467 		latencies = dev_priv->wm.skl_latency;
1468 	else
1469 		latencies = dev_priv->wm.pri_latency;
1470 
1471 	wm_latency_show(m, latencies);
1472 
1473 	return 0;
1474 }
1475 
1476 static int spr_wm_latency_show(struct seq_file *m, void *data)
1477 {
1478 	struct drm_i915_private *dev_priv = m->private;
1479 	const u16 *latencies;
1480 
1481 	if (INTEL_GEN(dev_priv) >= 9)
1482 		latencies = dev_priv->wm.skl_latency;
1483 	else
1484 		latencies = dev_priv->wm.spr_latency;
1485 
1486 	wm_latency_show(m, latencies);
1487 
1488 	return 0;
1489 }
1490 
1491 static int cur_wm_latency_show(struct seq_file *m, void *data)
1492 {
1493 	struct drm_i915_private *dev_priv = m->private;
1494 	const u16 *latencies;
1495 
1496 	if (INTEL_GEN(dev_priv) >= 9)
1497 		latencies = dev_priv->wm.skl_latency;
1498 	else
1499 		latencies = dev_priv->wm.cur_latency;
1500 
1501 	wm_latency_show(m, latencies);
1502 
1503 	return 0;
1504 }
1505 
1506 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1507 {
1508 	struct drm_i915_private *dev_priv = inode->i_private;
1509 
1510 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1511 		return -ENODEV;
1512 
1513 	return single_open(file, pri_wm_latency_show, dev_priv);
1514 }
1515 
1516 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1517 {
1518 	struct drm_i915_private *dev_priv = inode->i_private;
1519 
1520 	if (HAS_GMCH(dev_priv))
1521 		return -ENODEV;
1522 
1523 	return single_open(file, spr_wm_latency_show, dev_priv);
1524 }
1525 
1526 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1527 {
1528 	struct drm_i915_private *dev_priv = inode->i_private;
1529 
1530 	if (HAS_GMCH(dev_priv))
1531 		return -ENODEV;
1532 
1533 	return single_open(file, cur_wm_latency_show, dev_priv);
1534 }
1535 
1536 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1537 				size_t len, loff_t *offp, u16 wm[8])
1538 {
1539 	struct seq_file *m = file->private_data;
1540 	struct drm_i915_private *dev_priv = m->private;
1541 	struct drm_device *dev = &dev_priv->drm;
1542 	u16 new[8] = { 0 };
1543 	int num_levels;
1544 	int level;
1545 	int ret;
1546 	char tmp[32];
1547 
1548 	if (IS_CHERRYVIEW(dev_priv))
1549 		num_levels = 3;
1550 	else if (IS_VALLEYVIEW(dev_priv))
1551 		num_levels = 1;
1552 	else if (IS_G4X(dev_priv))
1553 		num_levels = 3;
1554 	else
1555 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1556 
1557 	if (len >= sizeof(tmp))
1558 		return -EINVAL;
1559 
1560 	if (copy_from_user(tmp, ubuf, len))
1561 		return -EFAULT;
1562 
1563 	tmp[len] = '\0';
1564 
1565 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1566 		     &new[0], &new[1], &new[2], &new[3],
1567 		     &new[4], &new[5], &new[6], &new[7]);
1568 	if (ret != num_levels)
1569 		return -EINVAL;
1570 
1571 	drm_modeset_lock_all(dev);
1572 
1573 	for (level = 0; level < num_levels; level++)
1574 		wm[level] = new[level];
1575 
1576 	drm_modeset_unlock_all(dev);
1577 
1578 	return len;
1579 }
1580 
1581 
1582 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1583 				    size_t len, loff_t *offp)
1584 {
1585 	struct seq_file *m = file->private_data;
1586 	struct drm_i915_private *dev_priv = m->private;
1587 	u16 *latencies;
1588 
1589 	if (INTEL_GEN(dev_priv) >= 9)
1590 		latencies = dev_priv->wm.skl_latency;
1591 	else
1592 		latencies = dev_priv->wm.pri_latency;
1593 
1594 	return wm_latency_write(file, ubuf, len, offp, latencies);
1595 }
1596 
1597 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1598 				    size_t len, loff_t *offp)
1599 {
1600 	struct seq_file *m = file->private_data;
1601 	struct drm_i915_private *dev_priv = m->private;
1602 	u16 *latencies;
1603 
1604 	if (INTEL_GEN(dev_priv) >= 9)
1605 		latencies = dev_priv->wm.skl_latency;
1606 	else
1607 		latencies = dev_priv->wm.spr_latency;
1608 
1609 	return wm_latency_write(file, ubuf, len, offp, latencies);
1610 }
1611 
1612 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1613 				    size_t len, loff_t *offp)
1614 {
1615 	struct seq_file *m = file->private_data;
1616 	struct drm_i915_private *dev_priv = m->private;
1617 	u16 *latencies;
1618 
1619 	if (INTEL_GEN(dev_priv) >= 9)
1620 		latencies = dev_priv->wm.skl_latency;
1621 	else
1622 		latencies = dev_priv->wm.cur_latency;
1623 
1624 	return wm_latency_write(file, ubuf, len, offp, latencies);
1625 }
1626 
1627 static const struct file_operations i915_pri_wm_latency_fops = {
1628 	.owner = THIS_MODULE,
1629 	.open = pri_wm_latency_open,
1630 	.read = seq_read,
1631 	.llseek = seq_lseek,
1632 	.release = single_release,
1633 	.write = pri_wm_latency_write
1634 };
1635 
1636 static const struct file_operations i915_spr_wm_latency_fops = {
1637 	.owner = THIS_MODULE,
1638 	.open = spr_wm_latency_open,
1639 	.read = seq_read,
1640 	.llseek = seq_lseek,
1641 	.release = single_release,
1642 	.write = spr_wm_latency_write
1643 };
1644 
1645 static const struct file_operations i915_cur_wm_latency_fops = {
1646 	.owner = THIS_MODULE,
1647 	.open = cur_wm_latency_open,
1648 	.read = seq_read,
1649 	.llseek = seq_lseek,
1650 	.release = single_release,
1651 	.write = cur_wm_latency_write
1652 };
1653 
1654 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1655 {
1656 	struct drm_i915_private *dev_priv = m->private;
1657 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1658 
1659 	/* Synchronize with everything first in case there's been an HPD
1660 	 * storm, but we haven't finished handling it in the kernel yet
1661 	 */
1662 	intel_synchronize_irq(dev_priv);
1663 	flush_work(&dev_priv->hotplug.dig_port_work);
1664 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1665 
1666 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1667 	seq_printf(m, "Detected: %s\n",
1668 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1669 
1670 	return 0;
1671 }
1672 
1673 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1674 					const char __user *ubuf, size_t len,
1675 					loff_t *offp)
1676 {
1677 	struct seq_file *m = file->private_data;
1678 	struct drm_i915_private *dev_priv = m->private;
1679 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1680 	unsigned int new_threshold;
1681 	int i;
1682 	char *newline;
1683 	char tmp[16];
1684 
1685 	if (len >= sizeof(tmp))
1686 		return -EINVAL;
1687 
1688 	if (copy_from_user(tmp, ubuf, len))
1689 		return -EFAULT;
1690 
1691 	tmp[len] = '\0';
1692 
1693 	/* Strip newline, if any */
1694 	newline = strchr(tmp, '\n');
1695 	if (newline)
1696 		*newline = '\0';
1697 
1698 	if (strcmp(tmp, "reset") == 0)
1699 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1700 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1701 		return -EINVAL;
1702 
1703 	if (new_threshold > 0)
1704 		drm_dbg_kms(&dev_priv->drm,
1705 			    "Setting HPD storm detection threshold to %d\n",
1706 			    new_threshold);
1707 	else
1708 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1709 
1710 	spin_lock_irq(&dev_priv->irq_lock);
1711 	hotplug->hpd_storm_threshold = new_threshold;
1712 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1713 	for_each_hpd_pin(i)
1714 		hotplug->stats[i].count = 0;
1715 	spin_unlock_irq(&dev_priv->irq_lock);
1716 
1717 	/* Re-enable hpd immediately if we were in an irq storm */
1718 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1719 
1720 	return len;
1721 }
1722 
1723 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1724 {
1725 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1726 }
1727 
1728 static const struct file_operations i915_hpd_storm_ctl_fops = {
1729 	.owner = THIS_MODULE,
1730 	.open = i915_hpd_storm_ctl_open,
1731 	.read = seq_read,
1732 	.llseek = seq_lseek,
1733 	.release = single_release,
1734 	.write = i915_hpd_storm_ctl_write
1735 };
1736 
1737 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1738 {
1739 	struct drm_i915_private *dev_priv = m->private;
1740 
1741 	seq_printf(m, "Enabled: %s\n",
1742 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1743 
1744 	return 0;
1745 }
1746 
1747 static int
1748 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1749 {
1750 	return single_open(file, i915_hpd_short_storm_ctl_show,
1751 			   inode->i_private);
1752 }
1753 
1754 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1755 					      const char __user *ubuf,
1756 					      size_t len, loff_t *offp)
1757 {
1758 	struct seq_file *m = file->private_data;
1759 	struct drm_i915_private *dev_priv = m->private;
1760 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1761 	char *newline;
1762 	char tmp[16];
1763 	int i;
1764 	bool new_state;
1765 
1766 	if (len >= sizeof(tmp))
1767 		return -EINVAL;
1768 
1769 	if (copy_from_user(tmp, ubuf, len))
1770 		return -EFAULT;
1771 
1772 	tmp[len] = '\0';
1773 
1774 	/* Strip newline, if any */
1775 	newline = strchr(tmp, '\n');
1776 	if (newline)
1777 		*newline = '\0';
1778 
1779 	/* Reset to the "default" state for this system */
1780 	if (strcmp(tmp, "reset") == 0)
1781 		new_state = !HAS_DP_MST(dev_priv);
1782 	else if (kstrtobool(tmp, &new_state) != 0)
1783 		return -EINVAL;
1784 
1785 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1786 		    new_state ? "En" : "Dis");
1787 
1788 	spin_lock_irq(&dev_priv->irq_lock);
1789 	hotplug->hpd_short_storm_enabled = new_state;
1790 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1791 	for_each_hpd_pin(i)
1792 		hotplug->stats[i].count = 0;
1793 	spin_unlock_irq(&dev_priv->irq_lock);
1794 
1795 	/* Re-enable hpd immediately if we were in an irq storm */
1796 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1797 
1798 	return len;
1799 }
1800 
1801 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1802 	.owner = THIS_MODULE,
1803 	.open = i915_hpd_short_storm_ctl_open,
1804 	.read = seq_read,
1805 	.llseek = seq_lseek,
1806 	.release = single_release,
1807 	.write = i915_hpd_short_storm_ctl_write,
1808 };
1809 
1810 static int i915_drrs_ctl_set(void *data, u64 val)
1811 {
1812 	struct drm_i915_private *dev_priv = data;
1813 	struct drm_device *dev = &dev_priv->drm;
1814 	struct intel_crtc *crtc;
1815 
1816 	if (INTEL_GEN(dev_priv) < 7)
1817 		return -ENODEV;
1818 
1819 	for_each_intel_crtc(dev, crtc) {
1820 		struct drm_connector_list_iter conn_iter;
1821 		struct intel_crtc_state *crtc_state;
1822 		struct drm_connector *connector;
1823 		struct drm_crtc_commit *commit;
1824 		int ret;
1825 
1826 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1827 		if (ret)
1828 			return ret;
1829 
1830 		crtc_state = to_intel_crtc_state(crtc->base.state);
1831 
1832 		if (!crtc_state->hw.active ||
1833 		    !crtc_state->has_drrs)
1834 			goto out;
1835 
1836 		commit = crtc_state->uapi.commit;
1837 		if (commit) {
1838 			ret = wait_for_completion_interruptible(&commit->hw_done);
1839 			if (ret)
1840 				goto out;
1841 		}
1842 
1843 		drm_connector_list_iter_begin(dev, &conn_iter);
1844 		drm_for_each_connector_iter(connector, &conn_iter) {
1845 			struct intel_encoder *encoder;
1846 			struct intel_dp *intel_dp;
1847 
1848 			if (!(crtc_state->uapi.connector_mask &
1849 			      drm_connector_mask(connector)))
1850 				continue;
1851 
1852 			encoder = intel_attached_encoder(to_intel_connector(connector));
1853 			if (encoder->type != INTEL_OUTPUT_EDP)
1854 				continue;
1855 
1856 			drm_dbg(&dev_priv->drm,
1857 				"Manually %sabling DRRS. %llu\n",
1858 				val ? "en" : "dis", val);
1859 
1860 			intel_dp = enc_to_intel_dp(encoder);
1861 			if (val)
1862 				intel_edp_drrs_enable(intel_dp,
1863 						      crtc_state);
1864 			else
1865 				intel_edp_drrs_disable(intel_dp,
1866 						       crtc_state);
1867 		}
1868 		drm_connector_list_iter_end(&conn_iter);
1869 
1870 out:
1871 		drm_modeset_unlock(&crtc->base.mutex);
1872 		if (ret)
1873 			return ret;
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1880 
1881 static ssize_t
1882 i915_fifo_underrun_reset_write(struct file *filp,
1883 			       const char __user *ubuf,
1884 			       size_t cnt, loff_t *ppos)
1885 {
1886 	struct drm_i915_private *dev_priv = filp->private_data;
1887 	struct intel_crtc *intel_crtc;
1888 	struct drm_device *dev = &dev_priv->drm;
1889 	int ret;
1890 	bool reset;
1891 
1892 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1893 	if (ret)
1894 		return ret;
1895 
1896 	if (!reset)
1897 		return cnt;
1898 
1899 	for_each_intel_crtc(dev, intel_crtc) {
1900 		struct drm_crtc_commit *commit;
1901 		struct intel_crtc_state *crtc_state;
1902 
1903 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1904 		if (ret)
1905 			return ret;
1906 
1907 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1908 		commit = crtc_state->uapi.commit;
1909 		if (commit) {
1910 			ret = wait_for_completion_interruptible(&commit->hw_done);
1911 			if (!ret)
1912 				ret = wait_for_completion_interruptible(&commit->flip_done);
1913 		}
1914 
1915 		if (!ret && crtc_state->hw.active) {
1916 			drm_dbg_kms(&dev_priv->drm,
1917 				    "Re-arming FIFO underruns on pipe %c\n",
1918 				    pipe_name(intel_crtc->pipe));
1919 
1920 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1921 		}
1922 
1923 		drm_modeset_unlock(&intel_crtc->base.mutex);
1924 
1925 		if (ret)
1926 			return ret;
1927 	}
1928 
1929 	ret = intel_fbc_reset_underrun(dev_priv);
1930 	if (ret)
1931 		return ret;
1932 
1933 	return cnt;
1934 }
1935 
1936 static const struct file_operations i915_fifo_underrun_reset_ops = {
1937 	.owner = THIS_MODULE,
1938 	.open = simple_open,
1939 	.write = i915_fifo_underrun_reset_write,
1940 	.llseek = default_llseek,
1941 };
1942 
1943 static const struct drm_info_list intel_display_debugfs_list[] = {
1944 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1945 	{"i915_fbc_status", i915_fbc_status, 0},
1946 	{"i915_ips_status", i915_ips_status, 0},
1947 	{"i915_sr_status", i915_sr_status, 0},
1948 	{"i915_opregion", i915_opregion, 0},
1949 	{"i915_vbt", i915_vbt, 0},
1950 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1951 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1952 	{"i915_power_domain_info", i915_power_domain_info, 0},
1953 	{"i915_dmc_info", i915_dmc_info, 0},
1954 	{"i915_display_info", i915_display_info, 0},
1955 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1956 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1957 	{"i915_ddb_info", i915_ddb_info, 0},
1958 	{"i915_drrs_status", i915_drrs_status, 0},
1959 	{"i915_lpsp_status", i915_lpsp_status, 0},
1960 };
1961 
1962 static const struct {
1963 	const char *name;
1964 	const struct file_operations *fops;
1965 } intel_display_debugfs_files[] = {
1966 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1967 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1968 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1969 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1970 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1971 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1972 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1973 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1974 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1975 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1976 	{"i915_ipc_status", &i915_ipc_status_fops},
1977 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1978 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1979 };
1980 
1981 void intel_display_debugfs_register(struct drm_i915_private *i915)
1982 {
1983 	struct drm_minor *minor = i915->drm.primary;
1984 	int i;
1985 
1986 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
1987 		debugfs_create_file(intel_display_debugfs_files[i].name,
1988 				    S_IRUGO | S_IWUSR,
1989 				    minor->debugfs_root,
1990 				    to_i915(minor->dev),
1991 				    intel_display_debugfs_files[i].fops);
1992 	}
1993 
1994 	drm_debugfs_create_files(intel_display_debugfs_list,
1995 				 ARRAY_SIZE(intel_display_debugfs_list),
1996 				 minor->debugfs_root, minor);
1997 }
1998 
1999 static int i915_panel_show(struct seq_file *m, void *data)
2000 {
2001 	struct drm_connector *connector = m->private;
2002 	struct intel_dp *intel_dp =
2003 		intel_attached_dp(to_intel_connector(connector));
2004 
2005 	if (connector->status != connector_status_connected)
2006 		return -ENODEV;
2007 
2008 	seq_printf(m, "Panel power up delay: %d\n",
2009 		   intel_dp->panel_power_up_delay);
2010 	seq_printf(m, "Panel power down delay: %d\n",
2011 		   intel_dp->panel_power_down_delay);
2012 	seq_printf(m, "Backlight on delay: %d\n",
2013 		   intel_dp->backlight_on_delay);
2014 	seq_printf(m, "Backlight off delay: %d\n",
2015 		   intel_dp->backlight_off_delay);
2016 
2017 	return 0;
2018 }
2019 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2020 
2021 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2022 {
2023 	struct drm_connector *connector = m->private;
2024 	struct intel_connector *intel_connector = to_intel_connector(connector);
2025 
2026 	if (connector->status != connector_status_connected)
2027 		return -ENODEV;
2028 
2029 	/* HDCP is supported by connector */
2030 	if (!intel_connector->hdcp.shim)
2031 		return -EINVAL;
2032 
2033 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2034 		   connector->base.id);
2035 	intel_hdcp_info(m, intel_connector);
2036 
2037 	return 0;
2038 }
2039 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2040 
2041 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2042 				seq_puts(m, "LPSP: incapable\n"))
2043 
2044 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2045 {
2046 	struct drm_connector *connector = m->private;
2047 	struct intel_encoder *encoder =
2048 			intel_attached_encoder(to_intel_connector(connector));
2049 	struct drm_i915_private *i915 = to_i915(connector->dev);
2050 
2051 	if (connector->status != connector_status_connected)
2052 		return -ENODEV;
2053 
2054 	switch (INTEL_GEN(i915)) {
2055 	case 12:
2056 		/*
2057 		 * Actually TGL can drive LPSP on port till DDI_C
2058 		 * but there is no physical connected DDI_C on TGL sku's,
2059 		 * even driver is not initilizing DDI_C port for gen12.
2060 		 */
2061 		LPSP_CAPABLE(encoder->port <= PORT_B);
2062 		break;
2063 	case 11:
2064 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2065 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2066 		break;
2067 	case 10:
2068 	case 9:
2069 		LPSP_CAPABLE(encoder->port == PORT_A &&
2070 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2071 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2072 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2073 		break;
2074 	default:
2075 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2076 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2077 	}
2078 
2079 	return 0;
2080 }
2081 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2082 
2083 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2084 {
2085 	struct drm_connector *connector = m->private;
2086 	struct drm_device *dev = connector->dev;
2087 	struct drm_crtc *crtc;
2088 	struct intel_dp *intel_dp;
2089 	struct drm_modeset_acquire_ctx ctx;
2090 	struct intel_crtc_state *crtc_state = NULL;
2091 	int ret = 0;
2092 	bool try_again = false;
2093 
2094 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2095 
2096 	do {
2097 		try_again = false;
2098 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2099 				       &ctx);
2100 		if (ret) {
2101 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2102 				try_again = true;
2103 				continue;
2104 			}
2105 			break;
2106 		}
2107 		crtc = connector->state->crtc;
2108 		if (connector->status != connector_status_connected || !crtc) {
2109 			ret = -ENODEV;
2110 			break;
2111 		}
2112 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2113 		if (ret == -EDEADLK) {
2114 			ret = drm_modeset_backoff(&ctx);
2115 			if (!ret) {
2116 				try_again = true;
2117 				continue;
2118 			}
2119 			break;
2120 		} else if (ret) {
2121 			break;
2122 		}
2123 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2124 		crtc_state = to_intel_crtc_state(crtc->state);
2125 		seq_printf(m, "DSC_Enabled: %s\n",
2126 			   yesno(crtc_state->dsc.compression_enable));
2127 		seq_printf(m, "DSC_Sink_Support: %s\n",
2128 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2129 		seq_printf(m, "Force_DSC_Enable: %s\n",
2130 			   yesno(intel_dp->force_dsc_en));
2131 		if (!intel_dp_is_edp(intel_dp))
2132 			seq_printf(m, "FEC_Sink_Support: %s\n",
2133 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2134 	} while (try_again);
2135 
2136 	drm_modeset_drop_locks(&ctx);
2137 	drm_modeset_acquire_fini(&ctx);
2138 
2139 	return ret;
2140 }
2141 
2142 static ssize_t i915_dsc_fec_support_write(struct file *file,
2143 					  const char __user *ubuf,
2144 					  size_t len, loff_t *offp)
2145 {
2146 	bool dsc_enable = false;
2147 	int ret;
2148 	struct drm_connector *connector =
2149 		((struct seq_file *)file->private_data)->private;
2150 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2151 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2152 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2153 
2154 	if (len == 0)
2155 		return 0;
2156 
2157 	drm_dbg(&i915->drm,
2158 		"Copied %zu bytes from user to force DSC\n", len);
2159 
2160 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2161 	if (ret < 0)
2162 		return ret;
2163 
2164 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2165 		(dsc_enable) ? "true" : "false");
2166 	intel_dp->force_dsc_en = dsc_enable;
2167 
2168 	*offp += len;
2169 	return len;
2170 }
2171 
2172 static int i915_dsc_fec_support_open(struct inode *inode,
2173 				     struct file *file)
2174 {
2175 	return single_open(file, i915_dsc_fec_support_show,
2176 			   inode->i_private);
2177 }
2178 
2179 static const struct file_operations i915_dsc_fec_support_fops = {
2180 	.owner = THIS_MODULE,
2181 	.open = i915_dsc_fec_support_open,
2182 	.read = seq_read,
2183 	.llseek = seq_lseek,
2184 	.release = single_release,
2185 	.write = i915_dsc_fec_support_write
2186 };
2187 
2188 /**
2189  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2190  * @connector: pointer to a registered drm_connector
2191  *
2192  * Cleanup will be done by drm_connector_unregister() through a call to
2193  * drm_debugfs_connector_remove().
2194  *
2195  * Returns 0 on success, negative error codes on error.
2196  */
2197 int intel_connector_debugfs_add(struct drm_connector *connector)
2198 {
2199 	struct dentry *root = connector->debugfs_entry;
2200 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2201 
2202 	/* The connector must have been registered beforehands. */
2203 	if (!root)
2204 		return -ENODEV;
2205 
2206 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2207 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2208 				    connector, &i915_panel_fops);
2209 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2210 				    connector, &i915_psr_sink_status_fops);
2211 	}
2212 
2213 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2214 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2215 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2216 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2217 				    connector, &i915_hdcp_sink_capability_fops);
2218 	}
2219 
2220 	if (INTEL_GEN(dev_priv) >= 10 &&
2221 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2222 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2223 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2224 				    connector, &i915_dsc_fec_support_fops);
2225 
2226 	/* Legacy panels doesn't lpsp on any platform */
2227 	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2228 	     IS_BROADWELL(dev_priv)) &&
2229 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2230 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2231 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2232 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2233 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2234 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2235 				    connector, &i915_lpsp_capability_fops);
2236 
2237 	return 0;
2238 }
2239