xref: /linux/drivers/gpu/drm/i915/display/intel_display_debugfs.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 #include "intel_sprite.h"
22 
23 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
24 {
25 	return to_i915(node->minor->dev);
26 }
27 
28 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
29 {
30 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
31 
32 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
33 		   dev_priv->fb_tracking.busy_bits);
34 
35 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
36 		   dev_priv->fb_tracking.flip_bits);
37 
38 	return 0;
39 }
40 
41 static int i915_fbc_status(struct seq_file *m, void *unused)
42 {
43 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
44 	struct intel_fbc *fbc = &dev_priv->fbc;
45 	intel_wakeref_t wakeref;
46 
47 	if (!HAS_FBC(dev_priv))
48 		return -ENODEV;
49 
50 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
51 	mutex_lock(&fbc->lock);
52 
53 	if (intel_fbc_is_active(dev_priv))
54 		seq_puts(m, "FBC enabled\n");
55 	else
56 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
57 
58 	if (intel_fbc_is_active(dev_priv)) {
59 		u32 mask;
60 
61 		if (INTEL_GEN(dev_priv) >= 8)
62 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
63 		else if (INTEL_GEN(dev_priv) >= 7)
64 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
65 		else if (INTEL_GEN(dev_priv) >= 5)
66 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
67 		else if (IS_G4X(dev_priv))
68 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
69 		else
70 			mask = intel_de_read(dev_priv, FBC_STATUS) &
71 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
72 
73 		seq_printf(m, "Compressing: %s\n", yesno(mask));
74 	}
75 
76 	mutex_unlock(&fbc->lock);
77 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
78 
79 	return 0;
80 }
81 
82 static int i915_fbc_false_color_get(void *data, u64 *val)
83 {
84 	struct drm_i915_private *dev_priv = data;
85 
86 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
87 		return -ENODEV;
88 
89 	*val = dev_priv->fbc.false_color;
90 
91 	return 0;
92 }
93 
94 static int i915_fbc_false_color_set(void *data, u64 val)
95 {
96 	struct drm_i915_private *dev_priv = data;
97 	u32 reg;
98 
99 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
100 		return -ENODEV;
101 
102 	mutex_lock(&dev_priv->fbc.lock);
103 
104 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
105 	dev_priv->fbc.false_color = val;
106 
107 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
108 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
109 
110 	mutex_unlock(&dev_priv->fbc.lock);
111 	return 0;
112 }
113 
114 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
115 			i915_fbc_false_color_get, i915_fbc_false_color_set,
116 			"%llu\n");
117 
118 static int i915_ips_status(struct seq_file *m, void *unused)
119 {
120 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
121 	intel_wakeref_t wakeref;
122 
123 	if (!HAS_IPS(dev_priv))
124 		return -ENODEV;
125 
126 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
127 
128 	seq_printf(m, "Enabled by kernel parameter: %s\n",
129 		   yesno(dev_priv->params.enable_ips));
130 
131 	if (INTEL_GEN(dev_priv) >= 8) {
132 		seq_puts(m, "Currently: unknown\n");
133 	} else {
134 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
135 			seq_puts(m, "Currently: enabled\n");
136 		else
137 			seq_puts(m, "Currently: disabled\n");
138 	}
139 
140 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
141 
142 	return 0;
143 }
144 
145 static int i915_sr_status(struct seq_file *m, void *unused)
146 {
147 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
148 	intel_wakeref_t wakeref;
149 	bool sr_enabled = false;
150 
151 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
152 
153 	if (INTEL_GEN(dev_priv) >= 9)
154 		/* no global SR status; inspect per-plane WM */;
155 	else if (HAS_PCH_SPLIT(dev_priv))
156 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
157 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
158 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
159 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
160 	else if (IS_I915GM(dev_priv))
161 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
162 	else if (IS_PINEVIEW(dev_priv))
163 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
164 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
165 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
166 
167 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
168 
169 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
170 
171 	return 0;
172 }
173 
174 static int i915_opregion(struct seq_file *m, void *unused)
175 {
176 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
177 
178 	if (opregion->header)
179 		seq_write(m, opregion->header, OPREGION_SIZE);
180 
181 	return 0;
182 }
183 
184 static int i915_vbt(struct seq_file *m, void *unused)
185 {
186 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
187 
188 	if (opregion->vbt)
189 		seq_write(m, opregion->vbt, opregion->vbt_size);
190 
191 	return 0;
192 }
193 
194 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
195 {
196 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
197 	struct drm_device *dev = &dev_priv->drm;
198 	struct intel_framebuffer *fbdev_fb = NULL;
199 	struct drm_framebuffer *drm_fb;
200 
201 #ifdef CONFIG_DRM_FBDEV_EMULATION
202 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
203 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
204 
205 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
206 			   fbdev_fb->base.width,
207 			   fbdev_fb->base.height,
208 			   fbdev_fb->base.format->depth,
209 			   fbdev_fb->base.format->cpp[0] * 8,
210 			   fbdev_fb->base.modifier,
211 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
212 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
213 		seq_putc(m, '\n');
214 	}
215 #endif
216 
217 	mutex_lock(&dev->mode_config.fb_lock);
218 	drm_for_each_fb(drm_fb, dev) {
219 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
220 		if (fb == fbdev_fb)
221 			continue;
222 
223 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
224 			   fb->base.width,
225 			   fb->base.height,
226 			   fb->base.format->depth,
227 			   fb->base.format->cpp[0] * 8,
228 			   fb->base.modifier,
229 			   drm_framebuffer_read_refcount(&fb->base));
230 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
231 		seq_putc(m, '\n');
232 	}
233 	mutex_unlock(&dev->mode_config.fb_lock);
234 
235 	return 0;
236 }
237 
238 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
239 {
240 	u8 val;
241 	static const char * const sink_status[] = {
242 		"inactive",
243 		"transition to active, capture and display",
244 		"active, display from RFB",
245 		"active, capture and display on sink device timings",
246 		"transition to inactive, capture and display, timing re-sync",
247 		"reserved",
248 		"reserved",
249 		"sink internal error",
250 	};
251 	struct drm_connector *connector = m->private;
252 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
253 	struct intel_dp *intel_dp =
254 		intel_attached_dp(to_intel_connector(connector));
255 	int ret;
256 
257 	if (!CAN_PSR(dev_priv)) {
258 		seq_puts(m, "PSR Unsupported\n");
259 		return -ENODEV;
260 	}
261 
262 	if (connector->status != connector_status_connected)
263 		return -ENODEV;
264 
265 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
266 
267 	if (ret == 1) {
268 		const char *str = "unknown";
269 
270 		val &= DP_PSR_SINK_STATE_MASK;
271 		if (val < ARRAY_SIZE(sink_status))
272 			str = sink_status[val];
273 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
274 	} else {
275 		return ret;
276 	}
277 
278 	return 0;
279 }
280 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
281 
282 static void
283 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
284 {
285 	u32 val, status_val;
286 	const char *status = "unknown";
287 
288 	if (dev_priv->psr.psr2_enabled) {
289 		static const char * const live_status[] = {
290 			"IDLE",
291 			"CAPTURE",
292 			"CAPTURE_FS",
293 			"SLEEP",
294 			"BUFON_FW",
295 			"ML_UP",
296 			"SU_STANDBY",
297 			"FAST_SLEEP",
298 			"DEEP_SLEEP",
299 			"BUF_ON",
300 			"TG_ON"
301 		};
302 		val = intel_de_read(dev_priv,
303 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
304 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
305 			      EDP_PSR2_STATUS_STATE_SHIFT;
306 		if (status_val < ARRAY_SIZE(live_status))
307 			status = live_status[status_val];
308 	} else {
309 		static const char * const live_status[] = {
310 			"IDLE",
311 			"SRDONACK",
312 			"SRDENT",
313 			"BUFOFF",
314 			"BUFON",
315 			"AUXACK",
316 			"SRDOFFACK",
317 			"SRDENT_ON",
318 		};
319 		val = intel_de_read(dev_priv,
320 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
321 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
322 			      EDP_PSR_STATUS_STATE_SHIFT;
323 		if (status_val < ARRAY_SIZE(live_status))
324 			status = live_status[status_val];
325 	}
326 
327 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
328 }
329 
330 static int i915_edp_psr_status(struct seq_file *m, void *data)
331 {
332 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
333 	struct i915_psr *psr = &dev_priv->psr;
334 	intel_wakeref_t wakeref;
335 	const char *status;
336 	bool enabled;
337 	u32 val;
338 
339 	if (!HAS_PSR(dev_priv))
340 		return -ENODEV;
341 
342 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
343 	if (psr->dp)
344 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
345 	seq_puts(m, "\n");
346 
347 	if (!psr->sink_support)
348 		return 0;
349 
350 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
351 	mutex_lock(&psr->lock);
352 
353 	if (psr->enabled)
354 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
355 	else
356 		status = "disabled";
357 	seq_printf(m, "PSR mode: %s\n", status);
358 
359 	if (!psr->enabled) {
360 		seq_printf(m, "PSR sink not reliable: %s\n",
361 			   yesno(psr->sink_not_reliable));
362 
363 		goto unlock;
364 	}
365 
366 	if (psr->psr2_enabled) {
367 		val = intel_de_read(dev_priv,
368 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
369 		enabled = val & EDP_PSR2_ENABLE;
370 	} else {
371 		val = intel_de_read(dev_priv,
372 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
373 		enabled = val & EDP_PSR_ENABLE;
374 	}
375 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
376 		   enableddisabled(enabled), val);
377 	psr_source_status(dev_priv, m);
378 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
379 		   psr->busy_frontbuffer_bits);
380 
381 	/*
382 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
383 	 */
384 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
385 		val = intel_de_read(dev_priv,
386 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
387 		val &= EDP_PSR_PERF_CNT_MASK;
388 		seq_printf(m, "Performance counter: %u\n", val);
389 	}
390 
391 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
392 		seq_printf(m, "Last attempted entry at: %lld\n",
393 			   psr->last_entry_attempt);
394 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
395 	}
396 
397 	if (psr->psr2_enabled) {
398 		u32 su_frames_val[3];
399 		int frame;
400 
401 		/*
402 		 * Reading all 3 registers before hand to minimize crossing a
403 		 * frame boundary between register reads
404 		 */
405 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
406 			val = intel_de_read(dev_priv,
407 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
408 			su_frames_val[frame / 3] = val;
409 		}
410 
411 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
412 
413 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
414 			u32 su_blocks;
415 
416 			su_blocks = su_frames_val[frame / 3] &
417 				    PSR2_SU_STATUS_MASK(frame);
418 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
419 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
420 		}
421 
422 		seq_printf(m, "PSR2 selective fetch: %s\n",
423 			   enableddisabled(psr->psr2_sel_fetch_enabled));
424 	}
425 
426 unlock:
427 	mutex_unlock(&psr->lock);
428 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
429 
430 	return 0;
431 }
432 
433 static int
434 i915_edp_psr_debug_set(void *data, u64 val)
435 {
436 	struct drm_i915_private *dev_priv = data;
437 	intel_wakeref_t wakeref;
438 	int ret;
439 
440 	if (!CAN_PSR(dev_priv))
441 		return -ENODEV;
442 
443 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
444 
445 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
446 
447 	ret = intel_psr_debug_set(dev_priv, val);
448 
449 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
450 
451 	return ret;
452 }
453 
454 static int
455 i915_edp_psr_debug_get(void *data, u64 *val)
456 {
457 	struct drm_i915_private *dev_priv = data;
458 
459 	if (!CAN_PSR(dev_priv))
460 		return -ENODEV;
461 
462 	*val = READ_ONCE(dev_priv->psr.debug);
463 	return 0;
464 }
465 
466 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
467 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
468 			"%llu\n");
469 
470 static int i915_power_domain_info(struct seq_file *m, void *unused)
471 {
472 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
473 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
474 	int i;
475 
476 	mutex_lock(&power_domains->lock);
477 
478 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
479 	for (i = 0; i < power_domains->power_well_count; i++) {
480 		struct i915_power_well *power_well;
481 		enum intel_display_power_domain power_domain;
482 
483 		power_well = &power_domains->power_wells[i];
484 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
485 			   power_well->count);
486 
487 		for_each_power_domain(power_domain, power_well->desc->domains)
488 			seq_printf(m, "  %-23s %d\n",
489 				 intel_display_power_domain_str(power_domain),
490 				 power_domains->domain_use_count[power_domain]);
491 	}
492 
493 	mutex_unlock(&power_domains->lock);
494 
495 	return 0;
496 }
497 
498 static int i915_dmc_info(struct seq_file *m, void *unused)
499 {
500 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
501 	intel_wakeref_t wakeref;
502 	struct intel_csr *csr;
503 	i915_reg_t dc5_reg, dc6_reg = {};
504 
505 	if (!HAS_CSR(dev_priv))
506 		return -ENODEV;
507 
508 	csr = &dev_priv->csr;
509 
510 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
511 
512 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
513 	seq_printf(m, "path: %s\n", csr->fw_path);
514 
515 	if (!csr->dmc_payload)
516 		goto out;
517 
518 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
519 		   CSR_VERSION_MINOR(csr->version));
520 
521 	if (INTEL_GEN(dev_priv) >= 12) {
522 		if (IS_DGFX(dev_priv)) {
523 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
524 		} else {
525 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
526 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
527 		}
528 
529 		/*
530 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
531 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
532 		 * reg for DC3CO debugging and validation,
533 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
534 		 */
535 		seq_printf(m, "DC3CO count: %d\n",
536 			   intel_de_read(dev_priv, DMC_DEBUG3));
537 	} else {
538 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
539 						 SKL_CSR_DC3_DC5_COUNT;
540 		if (!IS_GEN9_LP(dev_priv))
541 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
542 	}
543 
544 	seq_printf(m, "DC3 -> DC5 count: %d\n",
545 		   intel_de_read(dev_priv, dc5_reg));
546 	if (dc6_reg.reg)
547 		seq_printf(m, "DC5 -> DC6 count: %d\n",
548 			   intel_de_read(dev_priv, dc6_reg));
549 
550 out:
551 	seq_printf(m, "program base: 0x%08x\n",
552 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
553 	seq_printf(m, "ssp base: 0x%08x\n",
554 		   intel_de_read(dev_priv, CSR_SSP_BASE));
555 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
556 
557 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
558 
559 	return 0;
560 }
561 
562 static void intel_seq_print_mode(struct seq_file *m, int tabs,
563 				 const struct drm_display_mode *mode)
564 {
565 	int i;
566 
567 	for (i = 0; i < tabs; i++)
568 		seq_putc(m, '\t');
569 
570 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
571 }
572 
573 static void intel_encoder_info(struct seq_file *m,
574 			       struct intel_crtc *crtc,
575 			       struct intel_encoder *encoder)
576 {
577 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
578 	struct drm_connector_list_iter conn_iter;
579 	struct drm_connector *connector;
580 
581 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
582 		   encoder->base.base.id, encoder->base.name);
583 
584 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
585 	drm_for_each_connector_iter(connector, &conn_iter) {
586 		const struct drm_connector_state *conn_state =
587 			connector->state;
588 
589 		if (conn_state->best_encoder != &encoder->base)
590 			continue;
591 
592 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
593 			   connector->base.id, connector->name);
594 	}
595 	drm_connector_list_iter_end(&conn_iter);
596 }
597 
598 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
599 {
600 	const struct drm_display_mode *mode = panel->fixed_mode;
601 
602 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
603 }
604 
605 static void intel_hdcp_info(struct seq_file *m,
606 			    struct intel_connector *intel_connector)
607 {
608 	bool hdcp_cap, hdcp2_cap;
609 
610 	if (!intel_connector->hdcp.shim) {
611 		seq_puts(m, "No Connector Support");
612 		goto out;
613 	}
614 
615 	hdcp_cap = intel_hdcp_capable(intel_connector);
616 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
617 
618 	if (hdcp_cap)
619 		seq_puts(m, "HDCP1.4 ");
620 	if (hdcp2_cap)
621 		seq_puts(m, "HDCP2.2 ");
622 
623 	if (!hdcp_cap && !hdcp2_cap)
624 		seq_puts(m, "None");
625 
626 out:
627 	seq_puts(m, "\n");
628 }
629 
630 static void intel_dp_info(struct seq_file *m,
631 			  struct intel_connector *intel_connector)
632 {
633 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
634 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
635 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
636 
637 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
638 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
639 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
640 		intel_panel_info(m, &intel_connector->panel);
641 
642 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
643 				edid ? edid->data : NULL, &intel_dp->aux);
644 }
645 
646 static void intel_dp_mst_info(struct seq_file *m,
647 			      struct intel_connector *intel_connector)
648 {
649 	bool has_audio = intel_connector->port->has_audio;
650 
651 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
652 }
653 
654 static void intel_hdmi_info(struct seq_file *m,
655 			    struct intel_connector *intel_connector)
656 {
657 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
658 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
659 
660 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
661 }
662 
663 static void intel_lvds_info(struct seq_file *m,
664 			    struct intel_connector *intel_connector)
665 {
666 	intel_panel_info(m, &intel_connector->panel);
667 }
668 
669 static void intel_connector_info(struct seq_file *m,
670 				 struct drm_connector *connector)
671 {
672 	struct intel_connector *intel_connector = to_intel_connector(connector);
673 	const struct drm_connector_state *conn_state = connector->state;
674 	struct intel_encoder *encoder =
675 		to_intel_encoder(conn_state->best_encoder);
676 	const struct drm_display_mode *mode;
677 
678 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
679 		   connector->base.id, connector->name,
680 		   drm_get_connector_status_name(connector->status));
681 
682 	if (connector->status == connector_status_disconnected)
683 		return;
684 
685 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
686 		   connector->display_info.width_mm,
687 		   connector->display_info.height_mm);
688 	seq_printf(m, "\tsubpixel order: %s\n",
689 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
690 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
691 
692 	if (!encoder)
693 		return;
694 
695 	switch (connector->connector_type) {
696 	case DRM_MODE_CONNECTOR_DisplayPort:
697 	case DRM_MODE_CONNECTOR_eDP:
698 		if (encoder->type == INTEL_OUTPUT_DP_MST)
699 			intel_dp_mst_info(m, intel_connector);
700 		else
701 			intel_dp_info(m, intel_connector);
702 		break;
703 	case DRM_MODE_CONNECTOR_LVDS:
704 		if (encoder->type == INTEL_OUTPUT_LVDS)
705 			intel_lvds_info(m, intel_connector);
706 		break;
707 	case DRM_MODE_CONNECTOR_HDMIA:
708 		if (encoder->type == INTEL_OUTPUT_HDMI ||
709 		    encoder->type == INTEL_OUTPUT_DDI)
710 			intel_hdmi_info(m, intel_connector);
711 		break;
712 	default:
713 		break;
714 	}
715 
716 	seq_puts(m, "\tHDCP version: ");
717 	intel_hdcp_info(m, intel_connector);
718 
719 	seq_printf(m, "\tmodes:\n");
720 	list_for_each_entry(mode, &connector->modes, head)
721 		intel_seq_print_mode(m, 2, mode);
722 }
723 
724 static const char *plane_type(enum drm_plane_type type)
725 {
726 	switch (type) {
727 	case DRM_PLANE_TYPE_OVERLAY:
728 		return "OVL";
729 	case DRM_PLANE_TYPE_PRIMARY:
730 		return "PRI";
731 	case DRM_PLANE_TYPE_CURSOR:
732 		return "CUR";
733 	/*
734 	 * Deliberately omitting default: to generate compiler warnings
735 	 * when a new drm_plane_type gets added.
736 	 */
737 	}
738 
739 	return "unknown";
740 }
741 
742 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
743 {
744 	/*
745 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
746 	 * will print them all to visualize if the values are misused
747 	 */
748 	snprintf(buf, bufsize,
749 		 "%s%s%s%s%s%s(0x%08x)",
750 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
751 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
752 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
753 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
754 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
755 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
756 		 rotation);
757 }
758 
759 static const char *plane_visibility(const struct intel_plane_state *plane_state)
760 {
761 	if (plane_state->uapi.visible)
762 		return "visible";
763 
764 	if (plane_state->planar_slave)
765 		return "planar-slave";
766 
767 	return "hidden";
768 }
769 
770 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
771 {
772 	const struct intel_plane_state *plane_state =
773 		to_intel_plane_state(plane->base.state);
774 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
775 	struct drm_rect src, dst;
776 	char rot_str[48];
777 
778 	src = drm_plane_state_src(&plane_state->uapi);
779 	dst = drm_plane_state_dest(&plane_state->uapi);
780 
781 	plane_rotation(rot_str, sizeof(rot_str),
782 		       plane_state->uapi.rotation);
783 
784 	seq_puts(m, "\t\tuapi: [FB:");
785 	if (fb)
786 		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
787 			   &fb->format->format, fb->modifier, fb->width,
788 			   fb->height);
789 	else
790 		seq_puts(m, "0] n/a,0x0,0x0,");
791 	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
792 		   ", rotation=%s\n", plane_visibility(plane_state),
793 		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
794 
795 	if (plane_state->planar_linked_plane)
796 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
797 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
798 			   plane_state->planar_slave ? "slave" : "master");
799 }
800 
801 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
802 {
803 	const struct intel_plane_state *plane_state =
804 		to_intel_plane_state(plane->base.state);
805 	const struct drm_framebuffer *fb = plane_state->hw.fb;
806 	char rot_str[48];
807 
808 	if (!fb)
809 		return;
810 
811 	plane_rotation(rot_str, sizeof(rot_str),
812 		       plane_state->hw.rotation);
813 
814 	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
815 		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
816 		   fb->base.id, &fb->format->format,
817 		   fb->modifier, fb->width, fb->height,
818 		   yesno(plane_state->uapi.visible),
819 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
820 		   DRM_RECT_ARG(&plane_state->uapi.dst),
821 		   rot_str);
822 }
823 
824 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
825 {
826 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
827 	struct intel_plane *plane;
828 
829 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
830 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
831 			   plane->base.base.id, plane->base.name,
832 			   plane_type(plane->base.type));
833 		intel_plane_uapi_info(m, plane);
834 		intel_plane_hw_info(m, plane);
835 	}
836 }
837 
838 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
839 {
840 	const struct intel_crtc_state *crtc_state =
841 		to_intel_crtc_state(crtc->base.state);
842 	int num_scalers = crtc->num_scalers;
843 	int i;
844 
845 	/* Not all platformas have a scaler */
846 	if (num_scalers) {
847 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
848 			   num_scalers,
849 			   crtc_state->scaler_state.scaler_users,
850 			   crtc_state->scaler_state.scaler_id);
851 
852 		for (i = 0; i < num_scalers; i++) {
853 			const struct intel_scaler *sc =
854 				&crtc_state->scaler_state.scalers[i];
855 
856 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
857 				   i, yesno(sc->in_use), sc->mode);
858 		}
859 		seq_puts(m, "\n");
860 	} else {
861 		seq_puts(m, "\tNo scalers available on this platform\n");
862 	}
863 }
864 
865 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
866 static void crtc_updates_info(struct seq_file *m,
867 			      struct intel_crtc *crtc,
868 			      const char *hdr)
869 {
870 	u64 count;
871 	int row;
872 
873 	count = 0;
874 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
875 		count += crtc->debug.vbl.times[row];
876 	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
877 	if (!count)
878 		return;
879 
880 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
881 		char columns[80] = "       |";
882 		unsigned int x;
883 
884 		if (row & 1) {
885 			const char *units;
886 
887 			if (row > 10) {
888 				x = 1000000;
889 				units = "ms";
890 			} else {
891 				x = 1000;
892 				units = "us";
893 			}
894 
895 			snprintf(columns, sizeof(columns), "%4ld%s |",
896 				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
897 		}
898 
899 		if (crtc->debug.vbl.times[row]) {
900 			x = ilog2(crtc->debug.vbl.times[row]);
901 			memset(columns + 8, '*', x);
902 			columns[8 + x] = '\0';
903 		}
904 
905 		seq_printf(m, "%s%s\n", hdr, columns);
906 	}
907 
908 	seq_printf(m, "%sMin update: %lluns\n",
909 		   hdr, crtc->debug.vbl.min);
910 	seq_printf(m, "%sMax update: %lluns\n",
911 		   hdr, crtc->debug.vbl.max);
912 	seq_printf(m, "%sAverage update: %lluns\n",
913 		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
914 	seq_printf(m, "%sOverruns > %uus: %u\n",
915 		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
916 }
917 
918 static int crtc_updates_show(struct seq_file *m, void *data)
919 {
920 	crtc_updates_info(m, m->private, "");
921 	return 0;
922 }
923 
924 static int crtc_updates_open(struct inode *inode, struct file *file)
925 {
926 	return single_open(file, crtc_updates_show, inode->i_private);
927 }
928 
929 static ssize_t crtc_updates_write(struct file *file,
930 				  const char __user *ubuf,
931 				  size_t len, loff_t *offp)
932 {
933 	struct seq_file *m = file->private_data;
934 	struct intel_crtc *crtc = m->private;
935 
936 	/* May race with an update. Meh. */
937 	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
938 
939 	return len;
940 }
941 
942 static const struct file_operations crtc_updates_fops = {
943 	.owner = THIS_MODULE,
944 	.open = crtc_updates_open,
945 	.read = seq_read,
946 	.llseek = seq_lseek,
947 	.release = single_release,
948 	.write = crtc_updates_write
949 };
950 
951 static void crtc_updates_add(struct drm_crtc *crtc)
952 {
953 	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
954 			    to_intel_crtc(crtc), &crtc_updates_fops);
955 }
956 
957 #else
958 static void crtc_updates_info(struct seq_file *m,
959 			      struct intel_crtc *crtc,
960 			      const char *hdr)
961 {
962 }
963 
964 static void crtc_updates_add(struct drm_crtc *crtc)
965 {
966 }
967 #endif
968 
969 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
970 {
971 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
972 	const struct intel_crtc_state *crtc_state =
973 		to_intel_crtc_state(crtc->base.state);
974 	struct intel_encoder *encoder;
975 
976 	seq_printf(m, "[CRTC:%d:%s]:\n",
977 		   crtc->base.base.id, crtc->base.name);
978 
979 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
980 		   yesno(crtc_state->uapi.enable),
981 		   yesno(crtc_state->uapi.active),
982 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
983 
984 	if (crtc_state->hw.enable) {
985 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
986 			   yesno(crtc_state->hw.active),
987 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
988 
989 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
990 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
991 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
992 
993 		intel_scaler_info(m, crtc);
994 	}
995 
996 	if (crtc_state->bigjoiner)
997 		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
998 			   crtc_state->bigjoiner_linked_crtc->base.base.id,
999 			   crtc_state->bigjoiner_linked_crtc->base.name,
1000 			   crtc_state->bigjoiner_slave ? "slave" : "master");
1001 
1002 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
1003 				    crtc_state->uapi.encoder_mask)
1004 		intel_encoder_info(m, crtc, encoder);
1005 
1006 	intel_plane_info(m, crtc);
1007 
1008 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
1009 		   yesno(!crtc->cpu_fifo_underrun_disabled),
1010 		   yesno(!crtc->pch_fifo_underrun_disabled));
1011 
1012 	crtc_updates_info(m, crtc, "\t");
1013 }
1014 
1015 static int i915_display_info(struct seq_file *m, void *unused)
1016 {
1017 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1018 	struct drm_device *dev = &dev_priv->drm;
1019 	struct intel_crtc *crtc;
1020 	struct drm_connector *connector;
1021 	struct drm_connector_list_iter conn_iter;
1022 	intel_wakeref_t wakeref;
1023 
1024 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1025 
1026 	drm_modeset_lock_all(dev);
1027 
1028 	seq_printf(m, "CRTC info\n");
1029 	seq_printf(m, "---------\n");
1030 	for_each_intel_crtc(dev, crtc)
1031 		intel_crtc_info(m, crtc);
1032 
1033 	seq_printf(m, "\n");
1034 	seq_printf(m, "Connector info\n");
1035 	seq_printf(m, "--------------\n");
1036 	drm_connector_list_iter_begin(dev, &conn_iter);
1037 	drm_for_each_connector_iter(connector, &conn_iter)
1038 		intel_connector_info(m, connector);
1039 	drm_connector_list_iter_end(&conn_iter);
1040 
1041 	drm_modeset_unlock_all(dev);
1042 
1043 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1044 
1045 	return 0;
1046 }
1047 
1048 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
1049 {
1050 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1051 	struct drm_device *dev = &dev_priv->drm;
1052 	int i;
1053 
1054 	drm_modeset_lock_all(dev);
1055 
1056 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
1057 		   dev_priv->dpll.ref_clks.nssc,
1058 		   dev_priv->dpll.ref_clks.ssc);
1059 
1060 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1061 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1062 
1063 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1064 			   pll->info->id);
1065 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
1066 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
1067 		seq_printf(m, " tracked hardware state:\n");
1068 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1069 		seq_printf(m, " dpll_md: 0x%08x\n",
1070 			   pll->state.hw_state.dpll_md);
1071 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1072 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1073 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1074 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1075 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1076 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1077 			   pll->state.hw_state.mg_refclkin_ctl);
1078 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1079 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1080 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1081 			   pll->state.hw_state.mg_clktop2_hsclkctl);
1082 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1083 			   pll->state.hw_state.mg_pll_div0);
1084 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1085 			   pll->state.hw_state.mg_pll_div1);
1086 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1087 			   pll->state.hw_state.mg_pll_lf);
1088 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1089 			   pll->state.hw_state.mg_pll_frac_lock);
1090 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1091 			   pll->state.hw_state.mg_pll_ssc);
1092 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1093 			   pll->state.hw_state.mg_pll_bias);
1094 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1095 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1096 	}
1097 	drm_modeset_unlock_all(dev);
1098 
1099 	return 0;
1100 }
1101 
1102 static int i915_ipc_status_show(struct seq_file *m, void *data)
1103 {
1104 	struct drm_i915_private *dev_priv = m->private;
1105 
1106 	seq_printf(m, "Isochronous Priority Control: %s\n",
1107 			yesno(dev_priv->ipc_enabled));
1108 	return 0;
1109 }
1110 
1111 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1112 {
1113 	struct drm_i915_private *dev_priv = inode->i_private;
1114 
1115 	if (!HAS_IPC(dev_priv))
1116 		return -ENODEV;
1117 
1118 	return single_open(file, i915_ipc_status_show, dev_priv);
1119 }
1120 
1121 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1122 				     size_t len, loff_t *offp)
1123 {
1124 	struct seq_file *m = file->private_data;
1125 	struct drm_i915_private *dev_priv = m->private;
1126 	intel_wakeref_t wakeref;
1127 	bool enable;
1128 	int ret;
1129 
1130 	ret = kstrtobool_from_user(ubuf, len, &enable);
1131 	if (ret < 0)
1132 		return ret;
1133 
1134 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1135 		if (!dev_priv->ipc_enabled && enable)
1136 			drm_info(&dev_priv->drm,
1137 				 "Enabling IPC: WM will be proper only after next commit\n");
1138 		dev_priv->ipc_enabled = enable;
1139 		intel_enable_ipc(dev_priv);
1140 	}
1141 
1142 	return len;
1143 }
1144 
1145 static const struct file_operations i915_ipc_status_fops = {
1146 	.owner = THIS_MODULE,
1147 	.open = i915_ipc_status_open,
1148 	.read = seq_read,
1149 	.llseek = seq_lseek,
1150 	.release = single_release,
1151 	.write = i915_ipc_status_write
1152 };
1153 
1154 static int i915_ddb_info(struct seq_file *m, void *unused)
1155 {
1156 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1157 	struct drm_device *dev = &dev_priv->drm;
1158 	struct skl_ddb_entry *entry;
1159 	struct intel_crtc *crtc;
1160 
1161 	if (INTEL_GEN(dev_priv) < 9)
1162 		return -ENODEV;
1163 
1164 	drm_modeset_lock_all(dev);
1165 
1166 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1167 
1168 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1169 		struct intel_crtc_state *crtc_state =
1170 			to_intel_crtc_state(crtc->base.state);
1171 		enum pipe pipe = crtc->pipe;
1172 		enum plane_id plane_id;
1173 
1174 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1175 
1176 		for_each_plane_id_on_crtc(crtc, plane_id) {
1177 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1178 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1179 				   entry->start, entry->end,
1180 				   skl_ddb_entry_size(entry));
1181 		}
1182 
1183 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1184 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1185 			   entry->end, skl_ddb_entry_size(entry));
1186 	}
1187 
1188 	drm_modeset_unlock_all(dev);
1189 
1190 	return 0;
1191 }
1192 
1193 static void drrs_status_per_crtc(struct seq_file *m,
1194 				 struct drm_device *dev,
1195 				 struct intel_crtc *intel_crtc)
1196 {
1197 	struct drm_i915_private *dev_priv = to_i915(dev);
1198 	struct i915_drrs *drrs = &dev_priv->drrs;
1199 	int vrefresh = 0;
1200 	struct drm_connector *connector;
1201 	struct drm_connector_list_iter conn_iter;
1202 
1203 	drm_connector_list_iter_begin(dev, &conn_iter);
1204 	drm_for_each_connector_iter(connector, &conn_iter) {
1205 		bool supported = false;
1206 
1207 		if (connector->state->crtc != &intel_crtc->base)
1208 			continue;
1209 
1210 		seq_printf(m, "%s:\n", connector->name);
1211 
1212 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1213 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1214 			supported = true;
1215 
1216 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1217 	}
1218 	drm_connector_list_iter_end(&conn_iter);
1219 
1220 	seq_puts(m, "\n");
1221 
1222 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1223 		struct intel_panel *panel;
1224 
1225 		mutex_lock(&drrs->mutex);
1226 		/* DRRS Supported */
1227 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1228 
1229 		/* disable_drrs() will make drrs->dp NULL */
1230 		if (!drrs->dp) {
1231 			seq_puts(m, "Idleness DRRS: Disabled\n");
1232 			if (dev_priv->psr.enabled)
1233 				seq_puts(m,
1234 				"\tAs PSR is enabled, DRRS is not enabled\n");
1235 			mutex_unlock(&drrs->mutex);
1236 			return;
1237 		}
1238 
1239 		panel = &drrs->dp->attached_connector->panel;
1240 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1241 					drrs->busy_frontbuffer_bits);
1242 
1243 		seq_puts(m, "\n\t\t");
1244 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1245 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1246 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1247 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1248 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1249 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1250 		} else {
1251 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1252 						drrs->refresh_rate_type);
1253 			mutex_unlock(&drrs->mutex);
1254 			return;
1255 		}
1256 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1257 
1258 		seq_puts(m, "\n\t\t");
1259 		mutex_unlock(&drrs->mutex);
1260 	} else {
1261 		/* DRRS not supported. Print the VBT parameter*/
1262 		seq_puts(m, "\tDRRS Enabled : No");
1263 	}
1264 	seq_puts(m, "\n");
1265 }
1266 
1267 static int i915_drrs_status(struct seq_file *m, void *unused)
1268 {
1269 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1270 	struct drm_device *dev = &dev_priv->drm;
1271 	struct intel_crtc *intel_crtc;
1272 	int active_crtc_cnt = 0;
1273 
1274 	drm_modeset_lock_all(dev);
1275 	for_each_intel_crtc(dev, intel_crtc) {
1276 		if (intel_crtc->base.state->active) {
1277 			active_crtc_cnt++;
1278 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1279 
1280 			drrs_status_per_crtc(m, dev, intel_crtc);
1281 		}
1282 	}
1283 	drm_modeset_unlock_all(dev);
1284 
1285 	if (!active_crtc_cnt)
1286 		seq_puts(m, "No active crtc found\n");
1287 
1288 	return 0;
1289 }
1290 
1291 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1292 				seq_puts(m, "LPSP: disabled\n"))
1293 
1294 static bool
1295 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1296 			      enum i915_power_well_id power_well_id)
1297 {
1298 	intel_wakeref_t wakeref;
1299 	bool is_enabled;
1300 
1301 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1302 	is_enabled = intel_display_power_well_is_enabled(i915,
1303 							 power_well_id);
1304 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1305 
1306 	return is_enabled;
1307 }
1308 
1309 static int i915_lpsp_status(struct seq_file *m, void *unused)
1310 {
1311 	struct drm_i915_private *i915 = node_to_i915(m->private);
1312 
1313 	switch (INTEL_GEN(i915)) {
1314 	case 12:
1315 	case 11:
1316 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1317 		break;
1318 	case 10:
1319 	case 9:
1320 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1321 		break;
1322 	default:
1323 		/*
1324 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1325 		 * support lpsp.
1326 		 */
1327 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1328 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1329 		else
1330 			seq_puts(m, "LPSP: not supported\n");
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1337 {
1338 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1339 	struct drm_device *dev = &dev_priv->drm;
1340 	struct intel_encoder *intel_encoder;
1341 	struct intel_digital_port *dig_port;
1342 	struct drm_connector *connector;
1343 	struct drm_connector_list_iter conn_iter;
1344 
1345 	drm_connector_list_iter_begin(dev, &conn_iter);
1346 	drm_for_each_connector_iter(connector, &conn_iter) {
1347 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1348 			continue;
1349 
1350 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1351 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1352 			continue;
1353 
1354 		dig_port = enc_to_dig_port(intel_encoder);
1355 		if (!dig_port->dp.can_mst)
1356 			continue;
1357 
1358 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1359 			   dig_port->base.base.base.id,
1360 			   dig_port->base.base.name);
1361 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1362 	}
1363 	drm_connector_list_iter_end(&conn_iter);
1364 
1365 	return 0;
1366 }
1367 
1368 static ssize_t i915_displayport_test_active_write(struct file *file,
1369 						  const char __user *ubuf,
1370 						  size_t len, loff_t *offp)
1371 {
1372 	char *input_buffer;
1373 	int status = 0;
1374 	struct drm_device *dev;
1375 	struct drm_connector *connector;
1376 	struct drm_connector_list_iter conn_iter;
1377 	struct intel_dp *intel_dp;
1378 	int val = 0;
1379 
1380 	dev = ((struct seq_file *)file->private_data)->private;
1381 
1382 	if (len == 0)
1383 		return 0;
1384 
1385 	input_buffer = memdup_user_nul(ubuf, len);
1386 	if (IS_ERR(input_buffer))
1387 		return PTR_ERR(input_buffer);
1388 
1389 	drm_dbg(&to_i915(dev)->drm,
1390 		"Copied %d bytes from user\n", (unsigned int)len);
1391 
1392 	drm_connector_list_iter_begin(dev, &conn_iter);
1393 	drm_for_each_connector_iter(connector, &conn_iter) {
1394 		struct intel_encoder *encoder;
1395 
1396 		if (connector->connector_type !=
1397 		    DRM_MODE_CONNECTOR_DisplayPort)
1398 			continue;
1399 
1400 		encoder = to_intel_encoder(connector->encoder);
1401 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1402 			continue;
1403 
1404 		if (encoder && connector->status == connector_status_connected) {
1405 			intel_dp = enc_to_intel_dp(encoder);
1406 			status = kstrtoint(input_buffer, 10, &val);
1407 			if (status < 0)
1408 				break;
1409 			drm_dbg(&to_i915(dev)->drm,
1410 				"Got %d for test active\n", val);
1411 			/* To prevent erroneous activation of the compliance
1412 			 * testing code, only accept an actual value of 1 here
1413 			 */
1414 			if (val == 1)
1415 				intel_dp->compliance.test_active = true;
1416 			else
1417 				intel_dp->compliance.test_active = false;
1418 		}
1419 	}
1420 	drm_connector_list_iter_end(&conn_iter);
1421 	kfree(input_buffer);
1422 	if (status < 0)
1423 		return status;
1424 
1425 	*offp += len;
1426 	return len;
1427 }
1428 
1429 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1430 {
1431 	struct drm_i915_private *dev_priv = m->private;
1432 	struct drm_device *dev = &dev_priv->drm;
1433 	struct drm_connector *connector;
1434 	struct drm_connector_list_iter conn_iter;
1435 	struct intel_dp *intel_dp;
1436 
1437 	drm_connector_list_iter_begin(dev, &conn_iter);
1438 	drm_for_each_connector_iter(connector, &conn_iter) {
1439 		struct intel_encoder *encoder;
1440 
1441 		if (connector->connector_type !=
1442 		    DRM_MODE_CONNECTOR_DisplayPort)
1443 			continue;
1444 
1445 		encoder = to_intel_encoder(connector->encoder);
1446 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1447 			continue;
1448 
1449 		if (encoder && connector->status == connector_status_connected) {
1450 			intel_dp = enc_to_intel_dp(encoder);
1451 			if (intel_dp->compliance.test_active)
1452 				seq_puts(m, "1");
1453 			else
1454 				seq_puts(m, "0");
1455 		} else
1456 			seq_puts(m, "0");
1457 	}
1458 	drm_connector_list_iter_end(&conn_iter);
1459 
1460 	return 0;
1461 }
1462 
1463 static int i915_displayport_test_active_open(struct inode *inode,
1464 					     struct file *file)
1465 {
1466 	return single_open(file, i915_displayport_test_active_show,
1467 			   inode->i_private);
1468 }
1469 
1470 static const struct file_operations i915_displayport_test_active_fops = {
1471 	.owner = THIS_MODULE,
1472 	.open = i915_displayport_test_active_open,
1473 	.read = seq_read,
1474 	.llseek = seq_lseek,
1475 	.release = single_release,
1476 	.write = i915_displayport_test_active_write
1477 };
1478 
1479 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1480 {
1481 	struct drm_i915_private *dev_priv = m->private;
1482 	struct drm_device *dev = &dev_priv->drm;
1483 	struct drm_connector *connector;
1484 	struct drm_connector_list_iter conn_iter;
1485 	struct intel_dp *intel_dp;
1486 
1487 	drm_connector_list_iter_begin(dev, &conn_iter);
1488 	drm_for_each_connector_iter(connector, &conn_iter) {
1489 		struct intel_encoder *encoder;
1490 
1491 		if (connector->connector_type !=
1492 		    DRM_MODE_CONNECTOR_DisplayPort)
1493 			continue;
1494 
1495 		encoder = to_intel_encoder(connector->encoder);
1496 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1497 			continue;
1498 
1499 		if (encoder && connector->status == connector_status_connected) {
1500 			intel_dp = enc_to_intel_dp(encoder);
1501 			if (intel_dp->compliance.test_type ==
1502 			    DP_TEST_LINK_EDID_READ)
1503 				seq_printf(m, "%lx",
1504 					   intel_dp->compliance.test_data.edid);
1505 			else if (intel_dp->compliance.test_type ==
1506 				 DP_TEST_LINK_VIDEO_PATTERN) {
1507 				seq_printf(m, "hdisplay: %d\n",
1508 					   intel_dp->compliance.test_data.hdisplay);
1509 				seq_printf(m, "vdisplay: %d\n",
1510 					   intel_dp->compliance.test_data.vdisplay);
1511 				seq_printf(m, "bpc: %u\n",
1512 					   intel_dp->compliance.test_data.bpc);
1513 			} else if (intel_dp->compliance.test_type ==
1514 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1515 				seq_printf(m, "pattern: %d\n",
1516 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1517 				seq_printf(m, "Number of lanes: %d\n",
1518 					   intel_dp->compliance.test_data.phytest.num_lanes);
1519 				seq_printf(m, "Link Rate: %d\n",
1520 					   intel_dp->compliance.test_data.phytest.link_rate);
1521 				seq_printf(m, "level: %02x\n",
1522 					   intel_dp->train_set[0]);
1523 			}
1524 		} else
1525 			seq_puts(m, "0");
1526 	}
1527 	drm_connector_list_iter_end(&conn_iter);
1528 
1529 	return 0;
1530 }
1531 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1532 
1533 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1534 {
1535 	struct drm_i915_private *dev_priv = m->private;
1536 	struct drm_device *dev = &dev_priv->drm;
1537 	struct drm_connector *connector;
1538 	struct drm_connector_list_iter conn_iter;
1539 	struct intel_dp *intel_dp;
1540 
1541 	drm_connector_list_iter_begin(dev, &conn_iter);
1542 	drm_for_each_connector_iter(connector, &conn_iter) {
1543 		struct intel_encoder *encoder;
1544 
1545 		if (connector->connector_type !=
1546 		    DRM_MODE_CONNECTOR_DisplayPort)
1547 			continue;
1548 
1549 		encoder = to_intel_encoder(connector->encoder);
1550 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1551 			continue;
1552 
1553 		if (encoder && connector->status == connector_status_connected) {
1554 			intel_dp = enc_to_intel_dp(encoder);
1555 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1556 		} else
1557 			seq_puts(m, "0");
1558 	}
1559 	drm_connector_list_iter_end(&conn_iter);
1560 
1561 	return 0;
1562 }
1563 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1564 
1565 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1566 {
1567 	struct drm_i915_private *dev_priv = m->private;
1568 	struct drm_device *dev = &dev_priv->drm;
1569 	int level;
1570 	int num_levels;
1571 
1572 	if (IS_CHERRYVIEW(dev_priv))
1573 		num_levels = 3;
1574 	else if (IS_VALLEYVIEW(dev_priv))
1575 		num_levels = 1;
1576 	else if (IS_G4X(dev_priv))
1577 		num_levels = 3;
1578 	else
1579 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1580 
1581 	drm_modeset_lock_all(dev);
1582 
1583 	for (level = 0; level < num_levels; level++) {
1584 		unsigned int latency = wm[level];
1585 
1586 		/*
1587 		 * - WM1+ latency values in 0.5us units
1588 		 * - latencies are in us on gen9/vlv/chv
1589 		 */
1590 		if (INTEL_GEN(dev_priv) >= 9 ||
1591 		    IS_VALLEYVIEW(dev_priv) ||
1592 		    IS_CHERRYVIEW(dev_priv) ||
1593 		    IS_G4X(dev_priv))
1594 			latency *= 10;
1595 		else if (level > 0)
1596 			latency *= 5;
1597 
1598 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1599 			   level, wm[level], latency / 10, latency % 10);
1600 	}
1601 
1602 	drm_modeset_unlock_all(dev);
1603 }
1604 
1605 static int pri_wm_latency_show(struct seq_file *m, void *data)
1606 {
1607 	struct drm_i915_private *dev_priv = m->private;
1608 	const u16 *latencies;
1609 
1610 	if (INTEL_GEN(dev_priv) >= 9)
1611 		latencies = dev_priv->wm.skl_latency;
1612 	else
1613 		latencies = dev_priv->wm.pri_latency;
1614 
1615 	wm_latency_show(m, latencies);
1616 
1617 	return 0;
1618 }
1619 
1620 static int spr_wm_latency_show(struct seq_file *m, void *data)
1621 {
1622 	struct drm_i915_private *dev_priv = m->private;
1623 	const u16 *latencies;
1624 
1625 	if (INTEL_GEN(dev_priv) >= 9)
1626 		latencies = dev_priv->wm.skl_latency;
1627 	else
1628 		latencies = dev_priv->wm.spr_latency;
1629 
1630 	wm_latency_show(m, latencies);
1631 
1632 	return 0;
1633 }
1634 
1635 static int cur_wm_latency_show(struct seq_file *m, void *data)
1636 {
1637 	struct drm_i915_private *dev_priv = m->private;
1638 	const u16 *latencies;
1639 
1640 	if (INTEL_GEN(dev_priv) >= 9)
1641 		latencies = dev_priv->wm.skl_latency;
1642 	else
1643 		latencies = dev_priv->wm.cur_latency;
1644 
1645 	wm_latency_show(m, latencies);
1646 
1647 	return 0;
1648 }
1649 
1650 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1651 {
1652 	struct drm_i915_private *dev_priv = inode->i_private;
1653 
1654 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1655 		return -ENODEV;
1656 
1657 	return single_open(file, pri_wm_latency_show, dev_priv);
1658 }
1659 
1660 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1661 {
1662 	struct drm_i915_private *dev_priv = inode->i_private;
1663 
1664 	if (HAS_GMCH(dev_priv))
1665 		return -ENODEV;
1666 
1667 	return single_open(file, spr_wm_latency_show, dev_priv);
1668 }
1669 
1670 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1671 {
1672 	struct drm_i915_private *dev_priv = inode->i_private;
1673 
1674 	if (HAS_GMCH(dev_priv))
1675 		return -ENODEV;
1676 
1677 	return single_open(file, cur_wm_latency_show, dev_priv);
1678 }
1679 
1680 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1681 				size_t len, loff_t *offp, u16 wm[8])
1682 {
1683 	struct seq_file *m = file->private_data;
1684 	struct drm_i915_private *dev_priv = m->private;
1685 	struct drm_device *dev = &dev_priv->drm;
1686 	u16 new[8] = { 0 };
1687 	int num_levels;
1688 	int level;
1689 	int ret;
1690 	char tmp[32];
1691 
1692 	if (IS_CHERRYVIEW(dev_priv))
1693 		num_levels = 3;
1694 	else if (IS_VALLEYVIEW(dev_priv))
1695 		num_levels = 1;
1696 	else if (IS_G4X(dev_priv))
1697 		num_levels = 3;
1698 	else
1699 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1700 
1701 	if (len >= sizeof(tmp))
1702 		return -EINVAL;
1703 
1704 	if (copy_from_user(tmp, ubuf, len))
1705 		return -EFAULT;
1706 
1707 	tmp[len] = '\0';
1708 
1709 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1710 		     &new[0], &new[1], &new[2], &new[3],
1711 		     &new[4], &new[5], &new[6], &new[7]);
1712 	if (ret != num_levels)
1713 		return -EINVAL;
1714 
1715 	drm_modeset_lock_all(dev);
1716 
1717 	for (level = 0; level < num_levels; level++)
1718 		wm[level] = new[level];
1719 
1720 	drm_modeset_unlock_all(dev);
1721 
1722 	return len;
1723 }
1724 
1725 
1726 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1727 				    size_t len, loff_t *offp)
1728 {
1729 	struct seq_file *m = file->private_data;
1730 	struct drm_i915_private *dev_priv = m->private;
1731 	u16 *latencies;
1732 
1733 	if (INTEL_GEN(dev_priv) >= 9)
1734 		latencies = dev_priv->wm.skl_latency;
1735 	else
1736 		latencies = dev_priv->wm.pri_latency;
1737 
1738 	return wm_latency_write(file, ubuf, len, offp, latencies);
1739 }
1740 
1741 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1742 				    size_t len, loff_t *offp)
1743 {
1744 	struct seq_file *m = file->private_data;
1745 	struct drm_i915_private *dev_priv = m->private;
1746 	u16 *latencies;
1747 
1748 	if (INTEL_GEN(dev_priv) >= 9)
1749 		latencies = dev_priv->wm.skl_latency;
1750 	else
1751 		latencies = dev_priv->wm.spr_latency;
1752 
1753 	return wm_latency_write(file, ubuf, len, offp, latencies);
1754 }
1755 
1756 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1757 				    size_t len, loff_t *offp)
1758 {
1759 	struct seq_file *m = file->private_data;
1760 	struct drm_i915_private *dev_priv = m->private;
1761 	u16 *latencies;
1762 
1763 	if (INTEL_GEN(dev_priv) >= 9)
1764 		latencies = dev_priv->wm.skl_latency;
1765 	else
1766 		latencies = dev_priv->wm.cur_latency;
1767 
1768 	return wm_latency_write(file, ubuf, len, offp, latencies);
1769 }
1770 
1771 static const struct file_operations i915_pri_wm_latency_fops = {
1772 	.owner = THIS_MODULE,
1773 	.open = pri_wm_latency_open,
1774 	.read = seq_read,
1775 	.llseek = seq_lseek,
1776 	.release = single_release,
1777 	.write = pri_wm_latency_write
1778 };
1779 
1780 static const struct file_operations i915_spr_wm_latency_fops = {
1781 	.owner = THIS_MODULE,
1782 	.open = spr_wm_latency_open,
1783 	.read = seq_read,
1784 	.llseek = seq_lseek,
1785 	.release = single_release,
1786 	.write = spr_wm_latency_write
1787 };
1788 
1789 static const struct file_operations i915_cur_wm_latency_fops = {
1790 	.owner = THIS_MODULE,
1791 	.open = cur_wm_latency_open,
1792 	.read = seq_read,
1793 	.llseek = seq_lseek,
1794 	.release = single_release,
1795 	.write = cur_wm_latency_write
1796 };
1797 
1798 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1799 {
1800 	struct drm_i915_private *dev_priv = m->private;
1801 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1802 
1803 	/* Synchronize with everything first in case there's been an HPD
1804 	 * storm, but we haven't finished handling it in the kernel yet
1805 	 */
1806 	intel_synchronize_irq(dev_priv);
1807 	flush_work(&dev_priv->hotplug.dig_port_work);
1808 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1809 
1810 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1811 	seq_printf(m, "Detected: %s\n",
1812 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1813 
1814 	return 0;
1815 }
1816 
1817 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1818 					const char __user *ubuf, size_t len,
1819 					loff_t *offp)
1820 {
1821 	struct seq_file *m = file->private_data;
1822 	struct drm_i915_private *dev_priv = m->private;
1823 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1824 	unsigned int new_threshold;
1825 	int i;
1826 	char *newline;
1827 	char tmp[16];
1828 
1829 	if (len >= sizeof(tmp))
1830 		return -EINVAL;
1831 
1832 	if (copy_from_user(tmp, ubuf, len))
1833 		return -EFAULT;
1834 
1835 	tmp[len] = '\0';
1836 
1837 	/* Strip newline, if any */
1838 	newline = strchr(tmp, '\n');
1839 	if (newline)
1840 		*newline = '\0';
1841 
1842 	if (strcmp(tmp, "reset") == 0)
1843 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1844 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1845 		return -EINVAL;
1846 
1847 	if (new_threshold > 0)
1848 		drm_dbg_kms(&dev_priv->drm,
1849 			    "Setting HPD storm detection threshold to %d\n",
1850 			    new_threshold);
1851 	else
1852 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1853 
1854 	spin_lock_irq(&dev_priv->irq_lock);
1855 	hotplug->hpd_storm_threshold = new_threshold;
1856 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1857 	for_each_hpd_pin(i)
1858 		hotplug->stats[i].count = 0;
1859 	spin_unlock_irq(&dev_priv->irq_lock);
1860 
1861 	/* Re-enable hpd immediately if we were in an irq storm */
1862 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1863 
1864 	return len;
1865 }
1866 
1867 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1868 {
1869 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1870 }
1871 
1872 static const struct file_operations i915_hpd_storm_ctl_fops = {
1873 	.owner = THIS_MODULE,
1874 	.open = i915_hpd_storm_ctl_open,
1875 	.read = seq_read,
1876 	.llseek = seq_lseek,
1877 	.release = single_release,
1878 	.write = i915_hpd_storm_ctl_write
1879 };
1880 
1881 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1882 {
1883 	struct drm_i915_private *dev_priv = m->private;
1884 
1885 	seq_printf(m, "Enabled: %s\n",
1886 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1887 
1888 	return 0;
1889 }
1890 
1891 static int
1892 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1893 {
1894 	return single_open(file, i915_hpd_short_storm_ctl_show,
1895 			   inode->i_private);
1896 }
1897 
1898 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1899 					      const char __user *ubuf,
1900 					      size_t len, loff_t *offp)
1901 {
1902 	struct seq_file *m = file->private_data;
1903 	struct drm_i915_private *dev_priv = m->private;
1904 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1905 	char *newline;
1906 	char tmp[16];
1907 	int i;
1908 	bool new_state;
1909 
1910 	if (len >= sizeof(tmp))
1911 		return -EINVAL;
1912 
1913 	if (copy_from_user(tmp, ubuf, len))
1914 		return -EFAULT;
1915 
1916 	tmp[len] = '\0';
1917 
1918 	/* Strip newline, if any */
1919 	newline = strchr(tmp, '\n');
1920 	if (newline)
1921 		*newline = '\0';
1922 
1923 	/* Reset to the "default" state for this system */
1924 	if (strcmp(tmp, "reset") == 0)
1925 		new_state = !HAS_DP_MST(dev_priv);
1926 	else if (kstrtobool(tmp, &new_state) != 0)
1927 		return -EINVAL;
1928 
1929 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1930 		    new_state ? "En" : "Dis");
1931 
1932 	spin_lock_irq(&dev_priv->irq_lock);
1933 	hotplug->hpd_short_storm_enabled = new_state;
1934 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1935 	for_each_hpd_pin(i)
1936 		hotplug->stats[i].count = 0;
1937 	spin_unlock_irq(&dev_priv->irq_lock);
1938 
1939 	/* Re-enable hpd immediately if we were in an irq storm */
1940 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1941 
1942 	return len;
1943 }
1944 
1945 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1946 	.owner = THIS_MODULE,
1947 	.open = i915_hpd_short_storm_ctl_open,
1948 	.read = seq_read,
1949 	.llseek = seq_lseek,
1950 	.release = single_release,
1951 	.write = i915_hpd_short_storm_ctl_write,
1952 };
1953 
1954 static int i915_drrs_ctl_set(void *data, u64 val)
1955 {
1956 	struct drm_i915_private *dev_priv = data;
1957 	struct drm_device *dev = &dev_priv->drm;
1958 	struct intel_crtc *crtc;
1959 
1960 	if (INTEL_GEN(dev_priv) < 7)
1961 		return -ENODEV;
1962 
1963 	for_each_intel_crtc(dev, crtc) {
1964 		struct drm_connector_list_iter conn_iter;
1965 		struct intel_crtc_state *crtc_state;
1966 		struct drm_connector *connector;
1967 		struct drm_crtc_commit *commit;
1968 		int ret;
1969 
1970 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1971 		if (ret)
1972 			return ret;
1973 
1974 		crtc_state = to_intel_crtc_state(crtc->base.state);
1975 
1976 		if (!crtc_state->hw.active ||
1977 		    !crtc_state->has_drrs)
1978 			goto out;
1979 
1980 		commit = crtc_state->uapi.commit;
1981 		if (commit) {
1982 			ret = wait_for_completion_interruptible(&commit->hw_done);
1983 			if (ret)
1984 				goto out;
1985 		}
1986 
1987 		drm_connector_list_iter_begin(dev, &conn_iter);
1988 		drm_for_each_connector_iter(connector, &conn_iter) {
1989 			struct intel_encoder *encoder;
1990 			struct intel_dp *intel_dp;
1991 
1992 			if (!(crtc_state->uapi.connector_mask &
1993 			      drm_connector_mask(connector)))
1994 				continue;
1995 
1996 			encoder = intel_attached_encoder(to_intel_connector(connector));
1997 			if (encoder->type != INTEL_OUTPUT_EDP)
1998 				continue;
1999 
2000 			drm_dbg(&dev_priv->drm,
2001 				"Manually %sabling DRRS. %llu\n",
2002 				val ? "en" : "dis", val);
2003 
2004 			intel_dp = enc_to_intel_dp(encoder);
2005 			if (val)
2006 				intel_edp_drrs_enable(intel_dp,
2007 						      crtc_state);
2008 			else
2009 				intel_edp_drrs_disable(intel_dp,
2010 						       crtc_state);
2011 		}
2012 		drm_connector_list_iter_end(&conn_iter);
2013 
2014 out:
2015 		drm_modeset_unlock(&crtc->base.mutex);
2016 		if (ret)
2017 			return ret;
2018 	}
2019 
2020 	return 0;
2021 }
2022 
2023 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
2024 
2025 static ssize_t
2026 i915_fifo_underrun_reset_write(struct file *filp,
2027 			       const char __user *ubuf,
2028 			       size_t cnt, loff_t *ppos)
2029 {
2030 	struct drm_i915_private *dev_priv = filp->private_data;
2031 	struct intel_crtc *intel_crtc;
2032 	struct drm_device *dev = &dev_priv->drm;
2033 	int ret;
2034 	bool reset;
2035 
2036 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
2037 	if (ret)
2038 		return ret;
2039 
2040 	if (!reset)
2041 		return cnt;
2042 
2043 	for_each_intel_crtc(dev, intel_crtc) {
2044 		struct drm_crtc_commit *commit;
2045 		struct intel_crtc_state *crtc_state;
2046 
2047 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
2048 		if (ret)
2049 			return ret;
2050 
2051 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
2052 		commit = crtc_state->uapi.commit;
2053 		if (commit) {
2054 			ret = wait_for_completion_interruptible(&commit->hw_done);
2055 			if (!ret)
2056 				ret = wait_for_completion_interruptible(&commit->flip_done);
2057 		}
2058 
2059 		if (!ret && crtc_state->hw.active) {
2060 			drm_dbg_kms(&dev_priv->drm,
2061 				    "Re-arming FIFO underruns on pipe %c\n",
2062 				    pipe_name(intel_crtc->pipe));
2063 
2064 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
2065 		}
2066 
2067 		drm_modeset_unlock(&intel_crtc->base.mutex);
2068 
2069 		if (ret)
2070 			return ret;
2071 	}
2072 
2073 	ret = intel_fbc_reset_underrun(dev_priv);
2074 	if (ret)
2075 		return ret;
2076 
2077 	return cnt;
2078 }
2079 
2080 static const struct file_operations i915_fifo_underrun_reset_ops = {
2081 	.owner = THIS_MODULE,
2082 	.open = simple_open,
2083 	.write = i915_fifo_underrun_reset_write,
2084 	.llseek = default_llseek,
2085 };
2086 
2087 static const struct drm_info_list intel_display_debugfs_list[] = {
2088 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2089 	{"i915_fbc_status", i915_fbc_status, 0},
2090 	{"i915_ips_status", i915_ips_status, 0},
2091 	{"i915_sr_status", i915_sr_status, 0},
2092 	{"i915_opregion", i915_opregion, 0},
2093 	{"i915_vbt", i915_vbt, 0},
2094 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2095 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2096 	{"i915_power_domain_info", i915_power_domain_info, 0},
2097 	{"i915_dmc_info", i915_dmc_info, 0},
2098 	{"i915_display_info", i915_display_info, 0},
2099 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2100 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2101 	{"i915_ddb_info", i915_ddb_info, 0},
2102 	{"i915_drrs_status", i915_drrs_status, 0},
2103 	{"i915_lpsp_status", i915_lpsp_status, 0},
2104 };
2105 
2106 static const struct {
2107 	const char *name;
2108 	const struct file_operations *fops;
2109 } intel_display_debugfs_files[] = {
2110 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2111 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2112 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2113 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2114 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
2115 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2116 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2117 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2118 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2119 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2120 	{"i915_ipc_status", &i915_ipc_status_fops},
2121 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2122 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2123 };
2124 
2125 void intel_display_debugfs_register(struct drm_i915_private *i915)
2126 {
2127 	struct drm_minor *minor = i915->drm.primary;
2128 	int i;
2129 
2130 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2131 		debugfs_create_file(intel_display_debugfs_files[i].name,
2132 				    S_IRUGO | S_IWUSR,
2133 				    minor->debugfs_root,
2134 				    to_i915(minor->dev),
2135 				    intel_display_debugfs_files[i].fops);
2136 	}
2137 
2138 	drm_debugfs_create_files(intel_display_debugfs_list,
2139 				 ARRAY_SIZE(intel_display_debugfs_list),
2140 				 minor->debugfs_root, minor);
2141 }
2142 
2143 static int i915_panel_show(struct seq_file *m, void *data)
2144 {
2145 	struct drm_connector *connector = m->private;
2146 	struct intel_dp *intel_dp =
2147 		intel_attached_dp(to_intel_connector(connector));
2148 
2149 	if (connector->status != connector_status_connected)
2150 		return -ENODEV;
2151 
2152 	seq_printf(m, "Panel power up delay: %d\n",
2153 		   intel_dp->pps.panel_power_up_delay);
2154 	seq_printf(m, "Panel power down delay: %d\n",
2155 		   intel_dp->pps.panel_power_down_delay);
2156 	seq_printf(m, "Backlight on delay: %d\n",
2157 		   intel_dp->pps.backlight_on_delay);
2158 	seq_printf(m, "Backlight off delay: %d\n",
2159 		   intel_dp->pps.backlight_off_delay);
2160 
2161 	return 0;
2162 }
2163 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2164 
2165 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2166 {
2167 	struct drm_connector *connector = m->private;
2168 	struct intel_connector *intel_connector = to_intel_connector(connector);
2169 
2170 	if (connector->status != connector_status_connected)
2171 		return -ENODEV;
2172 
2173 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2174 		   connector->base.id);
2175 	intel_hdcp_info(m, intel_connector);
2176 
2177 	return 0;
2178 }
2179 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2180 
2181 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2182 				seq_puts(m, "LPSP: incapable\n"))
2183 
2184 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2185 {
2186 	struct drm_connector *connector = m->private;
2187 	struct drm_i915_private *i915 = to_i915(connector->dev);
2188 	struct intel_encoder *encoder;
2189 
2190 	encoder = intel_attached_encoder(to_intel_connector(connector));
2191 	if (!encoder)
2192 		return -ENODEV;
2193 
2194 	if (connector->status != connector_status_connected)
2195 		return -ENODEV;
2196 
2197 	switch (INTEL_GEN(i915)) {
2198 	case 12:
2199 		/*
2200 		 * Actually TGL can drive LPSP on port till DDI_C
2201 		 * but there is no physical connected DDI_C on TGL sku's,
2202 		 * even driver is not initilizing DDI_C port for gen12.
2203 		 */
2204 		LPSP_CAPABLE(encoder->port <= PORT_B);
2205 		break;
2206 	case 11:
2207 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2208 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2209 		break;
2210 	case 10:
2211 	case 9:
2212 		LPSP_CAPABLE(encoder->port == PORT_A &&
2213 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2214 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2215 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2216 		break;
2217 	default:
2218 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2219 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2220 	}
2221 
2222 	return 0;
2223 }
2224 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2225 
2226 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2227 {
2228 	struct drm_connector *connector = m->private;
2229 	struct drm_device *dev = connector->dev;
2230 	struct drm_crtc *crtc;
2231 	struct intel_dp *intel_dp;
2232 	struct drm_modeset_acquire_ctx ctx;
2233 	struct intel_crtc_state *crtc_state = NULL;
2234 	int ret = 0;
2235 	bool try_again = false;
2236 
2237 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2238 
2239 	do {
2240 		try_again = false;
2241 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2242 				       &ctx);
2243 		if (ret) {
2244 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2245 				try_again = true;
2246 				continue;
2247 			}
2248 			break;
2249 		}
2250 		crtc = connector->state->crtc;
2251 		if (connector->status != connector_status_connected || !crtc) {
2252 			ret = -ENODEV;
2253 			break;
2254 		}
2255 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2256 		if (ret == -EDEADLK) {
2257 			ret = drm_modeset_backoff(&ctx);
2258 			if (!ret) {
2259 				try_again = true;
2260 				continue;
2261 			}
2262 			break;
2263 		} else if (ret) {
2264 			break;
2265 		}
2266 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2267 		crtc_state = to_intel_crtc_state(crtc->state);
2268 		seq_printf(m, "DSC_Enabled: %s\n",
2269 			   yesno(crtc_state->dsc.compression_enable));
2270 		seq_printf(m, "DSC_Sink_Support: %s\n",
2271 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2272 		seq_printf(m, "Force_DSC_Enable: %s\n",
2273 			   yesno(intel_dp->force_dsc_en));
2274 		if (!intel_dp_is_edp(intel_dp))
2275 			seq_printf(m, "FEC_Sink_Support: %s\n",
2276 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2277 	} while (try_again);
2278 
2279 	drm_modeset_drop_locks(&ctx);
2280 	drm_modeset_acquire_fini(&ctx);
2281 
2282 	return ret;
2283 }
2284 
2285 static ssize_t i915_dsc_fec_support_write(struct file *file,
2286 					  const char __user *ubuf,
2287 					  size_t len, loff_t *offp)
2288 {
2289 	bool dsc_enable = false;
2290 	int ret;
2291 	struct drm_connector *connector =
2292 		((struct seq_file *)file->private_data)->private;
2293 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2294 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2295 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2296 
2297 	if (len == 0)
2298 		return 0;
2299 
2300 	drm_dbg(&i915->drm,
2301 		"Copied %zu bytes from user to force DSC\n", len);
2302 
2303 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2304 	if (ret < 0)
2305 		return ret;
2306 
2307 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2308 		(dsc_enable) ? "true" : "false");
2309 	intel_dp->force_dsc_en = dsc_enable;
2310 
2311 	*offp += len;
2312 	return len;
2313 }
2314 
2315 static int i915_dsc_fec_support_open(struct inode *inode,
2316 				     struct file *file)
2317 {
2318 	return single_open(file, i915_dsc_fec_support_show,
2319 			   inode->i_private);
2320 }
2321 
2322 static const struct file_operations i915_dsc_fec_support_fops = {
2323 	.owner = THIS_MODULE,
2324 	.open = i915_dsc_fec_support_open,
2325 	.read = seq_read,
2326 	.llseek = seq_lseek,
2327 	.release = single_release,
2328 	.write = i915_dsc_fec_support_write
2329 };
2330 
2331 /**
2332  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2333  * @connector: pointer to a registered drm_connector
2334  *
2335  * Cleanup will be done by drm_connector_unregister() through a call to
2336  * drm_debugfs_connector_remove().
2337  *
2338  * Returns 0 on success, negative error codes on error.
2339  */
2340 int intel_connector_debugfs_add(struct drm_connector *connector)
2341 {
2342 	struct dentry *root = connector->debugfs_entry;
2343 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2344 
2345 	/* The connector must have been registered beforehands. */
2346 	if (!root)
2347 		return -ENODEV;
2348 
2349 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2350 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2351 				    connector, &i915_panel_fops);
2352 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2353 				    connector, &i915_psr_sink_status_fops);
2354 	}
2355 
2356 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2357 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2358 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2359 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2360 				    connector, &i915_hdcp_sink_capability_fops);
2361 	}
2362 
2363 	if (INTEL_GEN(dev_priv) >= 10 &&
2364 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2365 	      !to_intel_connector(connector)->mst_port) ||
2366 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2367 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2368 				    connector, &i915_dsc_fec_support_fops);
2369 
2370 	/* Legacy panels doesn't lpsp on any platform */
2371 	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2372 	     IS_BROADWELL(dev_priv)) &&
2373 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2374 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2375 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2376 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2377 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2378 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2379 				    connector, &i915_lpsp_capability_fops);
2380 
2381 	return 0;
2382 }
2383 
2384 /**
2385  * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2386  * @crtc: pointer to a drm_crtc
2387  *
2388  * Returns 0 on success, negative error codes on error.
2389  *
2390  * Failure to add debugfs entries should generally be ignored.
2391  */
2392 int intel_crtc_debugfs_add(struct drm_crtc *crtc)
2393 {
2394 	if (!crtc->debugfs_entry)
2395 		return -ENODEV;
2396 
2397 	crtc_updates_add(crtc);
2398 	return 0;
2399 }
2400