xref: /linux/drivers/gpu/drm/i915/display/intel_display_debugfs.c (revision 5a7eeb8ba143d860050ecea924a8f074f02d8023)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 
22 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
23 {
24 	return to_i915(node->minor->dev);
25 }
26 
27 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
28 {
29 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
30 
31 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
32 		   dev_priv->fb_tracking.busy_bits);
33 
34 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.flip_bits);
36 
37 	return 0;
38 }
39 
40 static int i915_fbc_status(struct seq_file *m, void *unused)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	struct intel_fbc *fbc = &dev_priv->fbc;
44 	intel_wakeref_t wakeref;
45 
46 	if (!HAS_FBC(dev_priv))
47 		return -ENODEV;
48 
49 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
50 	mutex_lock(&fbc->lock);
51 
52 	if (intel_fbc_is_active(dev_priv))
53 		seq_puts(m, "FBC enabled\n");
54 	else
55 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
56 
57 	if (intel_fbc_is_active(dev_priv)) {
58 		u32 mask;
59 
60 		if (INTEL_GEN(dev_priv) >= 8)
61 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
62 		else if (INTEL_GEN(dev_priv) >= 7)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
64 		else if (INTEL_GEN(dev_priv) >= 5)
65 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
66 		else if (IS_G4X(dev_priv))
67 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
68 		else
69 			mask = intel_de_read(dev_priv, FBC_STATUS) &
70 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
71 
72 		seq_printf(m, "Compressing: %s\n", yesno(mask));
73 	}
74 
75 	mutex_unlock(&fbc->lock);
76 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
77 
78 	return 0;
79 }
80 
81 static int i915_fbc_false_color_get(void *data, u64 *val)
82 {
83 	struct drm_i915_private *dev_priv = data;
84 
85 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
86 		return -ENODEV;
87 
88 	*val = dev_priv->fbc.false_color;
89 
90 	return 0;
91 }
92 
93 static int i915_fbc_false_color_set(void *data, u64 val)
94 {
95 	struct drm_i915_private *dev_priv = data;
96 	u32 reg;
97 
98 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
99 		return -ENODEV;
100 
101 	mutex_lock(&dev_priv->fbc.lock);
102 
103 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
104 	dev_priv->fbc.false_color = val;
105 
106 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
107 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
108 
109 	mutex_unlock(&dev_priv->fbc.lock);
110 	return 0;
111 }
112 
113 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
114 			i915_fbc_false_color_get, i915_fbc_false_color_set,
115 			"%llu\n");
116 
117 static int i915_ips_status(struct seq_file *m, void *unused)
118 {
119 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
120 	intel_wakeref_t wakeref;
121 
122 	if (!HAS_IPS(dev_priv))
123 		return -ENODEV;
124 
125 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
126 
127 	seq_printf(m, "Enabled by kernel parameter: %s\n",
128 		   yesno(i915_modparams.enable_ips));
129 
130 	if (INTEL_GEN(dev_priv) >= 8) {
131 		seq_puts(m, "Currently: unknown\n");
132 	} else {
133 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
134 			seq_puts(m, "Currently: enabled\n");
135 		else
136 			seq_puts(m, "Currently: disabled\n");
137 	}
138 
139 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
140 
141 	return 0;
142 }
143 
144 static int i915_sr_status(struct seq_file *m, void *unused)
145 {
146 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
147 	intel_wakeref_t wakeref;
148 	bool sr_enabled = false;
149 
150 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
151 
152 	if (INTEL_GEN(dev_priv) >= 9)
153 		/* no global SR status; inspect per-plane WM */;
154 	else if (HAS_PCH_SPLIT(dev_priv))
155 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
156 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
157 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
158 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 	else if (IS_I915GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
161 	else if (IS_PINEVIEW(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
163 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
165 
166 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
167 
168 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
169 
170 	return 0;
171 }
172 
173 static int i915_opregion(struct seq_file *m, void *unused)
174 {
175 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
176 
177 	if (opregion->header)
178 		seq_write(m, opregion->header, OPREGION_SIZE);
179 
180 	return 0;
181 }
182 
183 static int i915_vbt(struct seq_file *m, void *unused)
184 {
185 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
186 
187 	if (opregion->vbt)
188 		seq_write(m, opregion->vbt, opregion->vbt_size);
189 
190 	return 0;
191 }
192 
193 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
194 {
195 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
196 	struct drm_device *dev = &dev_priv->drm;
197 	struct intel_framebuffer *fbdev_fb = NULL;
198 	struct drm_framebuffer *drm_fb;
199 
200 #ifdef CONFIG_DRM_FBDEV_EMULATION
201 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
202 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
203 
204 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
205 			   fbdev_fb->base.width,
206 			   fbdev_fb->base.height,
207 			   fbdev_fb->base.format->depth,
208 			   fbdev_fb->base.format->cpp[0] * 8,
209 			   fbdev_fb->base.modifier,
210 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
211 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
212 		seq_putc(m, '\n');
213 	}
214 #endif
215 
216 	mutex_lock(&dev->mode_config.fb_lock);
217 	drm_for_each_fb(drm_fb, dev) {
218 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
219 		if (fb == fbdev_fb)
220 			continue;
221 
222 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
223 			   fb->base.width,
224 			   fb->base.height,
225 			   fb->base.format->depth,
226 			   fb->base.format->cpp[0] * 8,
227 			   fb->base.modifier,
228 			   drm_framebuffer_read_refcount(&fb->base));
229 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
230 		seq_putc(m, '\n');
231 	}
232 	mutex_unlock(&dev->mode_config.fb_lock);
233 
234 	return 0;
235 }
236 
237 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
238 {
239 	u8 val;
240 	static const char * const sink_status[] = {
241 		"inactive",
242 		"transition to active, capture and display",
243 		"active, display from RFB",
244 		"active, capture and display on sink device timings",
245 		"transition to inactive, capture and display, timing re-sync",
246 		"reserved",
247 		"reserved",
248 		"sink internal error",
249 	};
250 	struct drm_connector *connector = m->private;
251 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
252 	struct intel_dp *intel_dp =
253 		intel_attached_dp(to_intel_connector(connector));
254 	int ret;
255 
256 	if (!CAN_PSR(dev_priv)) {
257 		seq_puts(m, "PSR Unsupported\n");
258 		return -ENODEV;
259 	}
260 
261 	if (connector->status != connector_status_connected)
262 		return -ENODEV;
263 
264 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
265 
266 	if (ret == 1) {
267 		const char *str = "unknown";
268 
269 		val &= DP_PSR_SINK_STATE_MASK;
270 		if (val < ARRAY_SIZE(sink_status))
271 			str = sink_status[val];
272 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 	} else {
274 		return ret;
275 	}
276 
277 	return 0;
278 }
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
280 
281 static void
282 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
283 {
284 	u32 val, status_val;
285 	const char *status = "unknown";
286 
287 	if (dev_priv->psr.psr2_enabled) {
288 		static const char * const live_status[] = {
289 			"IDLE",
290 			"CAPTURE",
291 			"CAPTURE_FS",
292 			"SLEEP",
293 			"BUFON_FW",
294 			"ML_UP",
295 			"SU_STANDBY",
296 			"FAST_SLEEP",
297 			"DEEP_SLEEP",
298 			"BUF_ON",
299 			"TG_ON"
300 		};
301 		val = intel_de_read(dev_priv,
302 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
303 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
304 			      EDP_PSR2_STATUS_STATE_SHIFT;
305 		if (status_val < ARRAY_SIZE(live_status))
306 			status = live_status[status_val];
307 	} else {
308 		static const char * const live_status[] = {
309 			"IDLE",
310 			"SRDONACK",
311 			"SRDENT",
312 			"BUFOFF",
313 			"BUFON",
314 			"AUXACK",
315 			"SRDOFFACK",
316 			"SRDENT_ON",
317 		};
318 		val = intel_de_read(dev_priv,
319 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
320 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
321 			      EDP_PSR_STATUS_STATE_SHIFT;
322 		if (status_val < ARRAY_SIZE(live_status))
323 			status = live_status[status_val];
324 	}
325 
326 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
327 }
328 
329 static int i915_edp_psr_status(struct seq_file *m, void *data)
330 {
331 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
332 	struct i915_psr *psr = &dev_priv->psr;
333 	intel_wakeref_t wakeref;
334 	const char *status;
335 	bool enabled;
336 	u32 val;
337 
338 	if (!HAS_PSR(dev_priv))
339 		return -ENODEV;
340 
341 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 	if (psr->dp)
343 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
344 	seq_puts(m, "\n");
345 
346 	if (!psr->sink_support)
347 		return 0;
348 
349 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 	mutex_lock(&psr->lock);
351 
352 	if (psr->enabled)
353 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 	else
355 		status = "disabled";
356 	seq_printf(m, "PSR mode: %s\n", status);
357 
358 	if (!psr->enabled) {
359 		seq_printf(m, "PSR sink not reliable: %s\n",
360 			   yesno(psr->sink_not_reliable));
361 
362 		goto unlock;
363 	}
364 
365 	if (psr->psr2_enabled) {
366 		val = intel_de_read(dev_priv,
367 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
368 		enabled = val & EDP_PSR2_ENABLE;
369 	} else {
370 		val = intel_de_read(dev_priv,
371 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
372 		enabled = val & EDP_PSR_ENABLE;
373 	}
374 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 		   enableddisabled(enabled), val);
376 	psr_source_status(dev_priv, m);
377 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 		   psr->busy_frontbuffer_bits);
379 
380 	/*
381 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
382 	 */
383 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 		val = intel_de_read(dev_priv,
385 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
386 		val &= EDP_PSR_PERF_CNT_MASK;
387 		seq_printf(m, "Performance counter: %u\n", val);
388 	}
389 
390 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 		seq_printf(m, "Last attempted entry at: %lld\n",
392 			   psr->last_entry_attempt);
393 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
394 	}
395 
396 	if (psr->psr2_enabled) {
397 		u32 su_frames_val[3];
398 		int frame;
399 
400 		/*
401 		 * Reading all 3 registers before hand to minimize crossing a
402 		 * frame boundary between register reads
403 		 */
404 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 			val = intel_de_read(dev_priv,
406 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
407 			su_frames_val[frame / 3] = val;
408 		}
409 
410 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
411 
412 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 			u32 su_blocks;
414 
415 			su_blocks = su_frames_val[frame / 3] &
416 				    PSR2_SU_STATUS_MASK(frame);
417 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
419 		}
420 	}
421 
422 unlock:
423 	mutex_unlock(&psr->lock);
424 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
425 
426 	return 0;
427 }
428 
429 static int
430 i915_edp_psr_debug_set(void *data, u64 val)
431 {
432 	struct drm_i915_private *dev_priv = data;
433 	intel_wakeref_t wakeref;
434 	int ret;
435 
436 	if (!CAN_PSR(dev_priv))
437 		return -ENODEV;
438 
439 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
440 
441 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
442 
443 	ret = intel_psr_debug_set(dev_priv, val);
444 
445 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
446 
447 	return ret;
448 }
449 
450 static int
451 i915_edp_psr_debug_get(void *data, u64 *val)
452 {
453 	struct drm_i915_private *dev_priv = data;
454 
455 	if (!CAN_PSR(dev_priv))
456 		return -ENODEV;
457 
458 	*val = READ_ONCE(dev_priv->psr.debug);
459 	return 0;
460 }
461 
462 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
463 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
464 			"%llu\n");
465 
466 static int i915_power_domain_info(struct seq_file *m, void *unused)
467 {
468 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
469 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
470 	int i;
471 
472 	mutex_lock(&power_domains->lock);
473 
474 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
475 	for (i = 0; i < power_domains->power_well_count; i++) {
476 		struct i915_power_well *power_well;
477 		enum intel_display_power_domain power_domain;
478 
479 		power_well = &power_domains->power_wells[i];
480 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
481 			   power_well->count);
482 
483 		for_each_power_domain(power_domain, power_well->desc->domains)
484 			seq_printf(m, "  %-23s %d\n",
485 				 intel_display_power_domain_str(power_domain),
486 				 power_domains->domain_use_count[power_domain]);
487 	}
488 
489 	mutex_unlock(&power_domains->lock);
490 
491 	return 0;
492 }
493 
494 static int i915_dmc_info(struct seq_file *m, void *unused)
495 {
496 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
497 	intel_wakeref_t wakeref;
498 	struct intel_csr *csr;
499 	i915_reg_t dc5_reg, dc6_reg = {};
500 
501 	if (!HAS_CSR(dev_priv))
502 		return -ENODEV;
503 
504 	csr = &dev_priv->csr;
505 
506 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
507 
508 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
509 	seq_printf(m, "path: %s\n", csr->fw_path);
510 
511 	if (!csr->dmc_payload)
512 		goto out;
513 
514 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
515 		   CSR_VERSION_MINOR(csr->version));
516 
517 	if (INTEL_GEN(dev_priv) >= 12) {
518 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
519 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
520 		/*
521 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
522 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
523 		 * reg for DC3CO debugging and validation,
524 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
525 		 */
526 		seq_printf(m, "DC3CO count: %d\n",
527 			   intel_de_read(dev_priv, DMC_DEBUG3));
528 	} else {
529 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
530 						 SKL_CSR_DC3_DC5_COUNT;
531 		if (!IS_GEN9_LP(dev_priv))
532 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
533 	}
534 
535 	seq_printf(m, "DC3 -> DC5 count: %d\n",
536 		   intel_de_read(dev_priv, dc5_reg));
537 	if (dc6_reg.reg)
538 		seq_printf(m, "DC5 -> DC6 count: %d\n",
539 			   intel_de_read(dev_priv, dc6_reg));
540 
541 out:
542 	seq_printf(m, "program base: 0x%08x\n",
543 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
544 	seq_printf(m, "ssp base: 0x%08x\n",
545 		   intel_de_read(dev_priv, CSR_SSP_BASE));
546 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
547 
548 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
549 
550 	return 0;
551 }
552 
553 static void intel_seq_print_mode(struct seq_file *m, int tabs,
554 				 const struct drm_display_mode *mode)
555 {
556 	int i;
557 
558 	for (i = 0; i < tabs; i++)
559 		seq_putc(m, '\t');
560 
561 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
562 }
563 
564 static void intel_encoder_info(struct seq_file *m,
565 			       struct intel_crtc *crtc,
566 			       struct intel_encoder *encoder)
567 {
568 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
569 	struct drm_connector_list_iter conn_iter;
570 	struct drm_connector *connector;
571 
572 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
573 		   encoder->base.base.id, encoder->base.name);
574 
575 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
576 	drm_for_each_connector_iter(connector, &conn_iter) {
577 		const struct drm_connector_state *conn_state =
578 			connector->state;
579 
580 		if (conn_state->best_encoder != &encoder->base)
581 			continue;
582 
583 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
584 			   connector->base.id, connector->name);
585 	}
586 	drm_connector_list_iter_end(&conn_iter);
587 }
588 
589 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
590 {
591 	const struct drm_display_mode *mode = panel->fixed_mode;
592 
593 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
594 }
595 
596 static void intel_hdcp_info(struct seq_file *m,
597 			    struct intel_connector *intel_connector)
598 {
599 	bool hdcp_cap, hdcp2_cap;
600 
601 	hdcp_cap = intel_hdcp_capable(intel_connector);
602 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
603 
604 	if (hdcp_cap)
605 		seq_puts(m, "HDCP1.4 ");
606 	if (hdcp2_cap)
607 		seq_puts(m, "HDCP2.2 ");
608 
609 	if (!hdcp_cap && !hdcp2_cap)
610 		seq_puts(m, "None");
611 
612 	seq_puts(m, "\n");
613 }
614 
615 static void intel_dp_info(struct seq_file *m,
616 			  struct intel_connector *intel_connector)
617 {
618 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
619 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
620 
621 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
622 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
623 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
624 		intel_panel_info(m, &intel_connector->panel);
625 
626 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
627 				&intel_dp->aux);
628 	if (intel_connector->hdcp.shim) {
629 		seq_puts(m, "\tHDCP version: ");
630 		intel_hdcp_info(m, intel_connector);
631 	}
632 }
633 
634 static void intel_dp_mst_info(struct seq_file *m,
635 			  struct intel_connector *intel_connector)
636 {
637 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
638 	struct intel_dp_mst_encoder *intel_mst =
639 		enc_to_mst(intel_encoder);
640 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
641 	struct intel_dp *intel_dp = &intel_dig_port->dp;
642 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
643 					intel_connector->port);
644 
645 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
646 }
647 
648 static void intel_hdmi_info(struct seq_file *m,
649 			    struct intel_connector *intel_connector)
650 {
651 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
652 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
653 
654 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
655 	if (intel_connector->hdcp.shim) {
656 		seq_puts(m, "\tHDCP version: ");
657 		intel_hdcp_info(m, intel_connector);
658 	}
659 }
660 
661 static void intel_lvds_info(struct seq_file *m,
662 			    struct intel_connector *intel_connector)
663 {
664 	intel_panel_info(m, &intel_connector->panel);
665 }
666 
667 static void intel_connector_info(struct seq_file *m,
668 				 struct drm_connector *connector)
669 {
670 	struct intel_connector *intel_connector = to_intel_connector(connector);
671 	const struct drm_connector_state *conn_state = connector->state;
672 	struct intel_encoder *encoder =
673 		to_intel_encoder(conn_state->best_encoder);
674 	const struct drm_display_mode *mode;
675 
676 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
677 		   connector->base.id, connector->name,
678 		   drm_get_connector_status_name(connector->status));
679 
680 	if (connector->status == connector_status_disconnected)
681 		return;
682 
683 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
684 		   connector->display_info.width_mm,
685 		   connector->display_info.height_mm);
686 	seq_printf(m, "\tsubpixel order: %s\n",
687 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
688 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
689 
690 	if (!encoder)
691 		return;
692 
693 	switch (connector->connector_type) {
694 	case DRM_MODE_CONNECTOR_DisplayPort:
695 	case DRM_MODE_CONNECTOR_eDP:
696 		if (encoder->type == INTEL_OUTPUT_DP_MST)
697 			intel_dp_mst_info(m, intel_connector);
698 		else
699 			intel_dp_info(m, intel_connector);
700 		break;
701 	case DRM_MODE_CONNECTOR_LVDS:
702 		if (encoder->type == INTEL_OUTPUT_LVDS)
703 			intel_lvds_info(m, intel_connector);
704 		break;
705 	case DRM_MODE_CONNECTOR_HDMIA:
706 		if (encoder->type == INTEL_OUTPUT_HDMI ||
707 		    encoder->type == INTEL_OUTPUT_DDI)
708 			intel_hdmi_info(m, intel_connector);
709 		break;
710 	default:
711 		break;
712 	}
713 
714 	seq_printf(m, "\tmodes:\n");
715 	list_for_each_entry(mode, &connector->modes, head)
716 		intel_seq_print_mode(m, 2, mode);
717 }
718 
719 static const char *plane_type(enum drm_plane_type type)
720 {
721 	switch (type) {
722 	case DRM_PLANE_TYPE_OVERLAY:
723 		return "OVL";
724 	case DRM_PLANE_TYPE_PRIMARY:
725 		return "PRI";
726 	case DRM_PLANE_TYPE_CURSOR:
727 		return "CUR";
728 	/*
729 	 * Deliberately omitting default: to generate compiler warnings
730 	 * when a new drm_plane_type gets added.
731 	 */
732 	}
733 
734 	return "unknown";
735 }
736 
737 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
738 {
739 	/*
740 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
741 	 * will print them all to visualize if the values are misused
742 	 */
743 	snprintf(buf, bufsize,
744 		 "%s%s%s%s%s%s(0x%08x)",
745 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
746 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
747 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
748 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
749 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
750 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
751 		 rotation);
752 }
753 
754 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
755 {
756 	const struct intel_plane_state *plane_state =
757 		to_intel_plane_state(plane->base.state);
758 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
759 	struct drm_format_name_buf format_name;
760 	struct drm_rect src, dst;
761 	char rot_str[48];
762 
763 	src = drm_plane_state_src(&plane_state->uapi);
764 	dst = drm_plane_state_dest(&plane_state->uapi);
765 
766 	if (fb)
767 		drm_get_format_name(fb->format->format, &format_name);
768 
769 	plane_rotation(rot_str, sizeof(rot_str),
770 		       plane_state->uapi.rotation);
771 
772 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
773 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
774 		   fb ? fb->width : 0, fb ? fb->height : 0,
775 		   DRM_RECT_FP_ARG(&src),
776 		   DRM_RECT_ARG(&dst),
777 		   rot_str);
778 }
779 
780 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
781 {
782 	const struct intel_plane_state *plane_state =
783 		to_intel_plane_state(plane->base.state);
784 	const struct drm_framebuffer *fb = plane_state->hw.fb;
785 	struct drm_format_name_buf format_name;
786 	char rot_str[48];
787 
788 	if (!fb)
789 		return;
790 
791 	drm_get_format_name(fb->format->format, &format_name);
792 
793 	plane_rotation(rot_str, sizeof(rot_str),
794 		       plane_state->hw.rotation);
795 
796 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
797 		   fb->base.id, format_name.str,
798 		   fb->width, fb->height,
799 		   yesno(plane_state->uapi.visible),
800 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
801 		   DRM_RECT_ARG(&plane_state->uapi.dst),
802 		   rot_str);
803 }
804 
805 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
806 {
807 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
808 	struct intel_plane *plane;
809 
810 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
811 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
812 			   plane->base.base.id, plane->base.name,
813 			   plane_type(plane->base.type));
814 		intel_plane_uapi_info(m, plane);
815 		intel_plane_hw_info(m, plane);
816 	}
817 }
818 
819 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
820 {
821 	const struct intel_crtc_state *crtc_state =
822 		to_intel_crtc_state(crtc->base.state);
823 	int num_scalers = crtc->num_scalers;
824 	int i;
825 
826 	/* Not all platformas have a scaler */
827 	if (num_scalers) {
828 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
829 			   num_scalers,
830 			   crtc_state->scaler_state.scaler_users,
831 			   crtc_state->scaler_state.scaler_id);
832 
833 		for (i = 0; i < num_scalers; i++) {
834 			const struct intel_scaler *sc =
835 				&crtc_state->scaler_state.scalers[i];
836 
837 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
838 				   i, yesno(sc->in_use), sc->mode);
839 		}
840 		seq_puts(m, "\n");
841 	} else {
842 		seq_puts(m, "\tNo scalers available on this platform\n");
843 	}
844 }
845 
846 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
847 {
848 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
849 	const struct intel_crtc_state *crtc_state =
850 		to_intel_crtc_state(crtc->base.state);
851 	struct intel_encoder *encoder;
852 
853 	seq_printf(m, "[CRTC:%d:%s]:\n",
854 		   crtc->base.base.id, crtc->base.name);
855 
856 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
857 		   yesno(crtc_state->uapi.enable),
858 		   yesno(crtc_state->uapi.active),
859 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
860 
861 	if (crtc_state->hw.enable) {
862 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
863 			   yesno(crtc_state->hw.active),
864 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
865 
866 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
867 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
868 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
869 
870 		intel_scaler_info(m, crtc);
871 	}
872 
873 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
874 				    crtc_state->uapi.encoder_mask)
875 		intel_encoder_info(m, crtc, encoder);
876 
877 	intel_plane_info(m, crtc);
878 
879 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
880 		   yesno(!crtc->cpu_fifo_underrun_disabled),
881 		   yesno(!crtc->pch_fifo_underrun_disabled));
882 }
883 
884 static int i915_display_info(struct seq_file *m, void *unused)
885 {
886 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
887 	struct drm_device *dev = &dev_priv->drm;
888 	struct intel_crtc *crtc;
889 	struct drm_connector *connector;
890 	struct drm_connector_list_iter conn_iter;
891 	intel_wakeref_t wakeref;
892 
893 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
894 
895 	drm_modeset_lock_all(dev);
896 
897 	seq_printf(m, "CRTC info\n");
898 	seq_printf(m, "---------\n");
899 	for_each_intel_crtc(dev, crtc)
900 		intel_crtc_info(m, crtc);
901 
902 	seq_printf(m, "\n");
903 	seq_printf(m, "Connector info\n");
904 	seq_printf(m, "--------------\n");
905 	drm_connector_list_iter_begin(dev, &conn_iter);
906 	drm_for_each_connector_iter(connector, &conn_iter)
907 		intel_connector_info(m, connector);
908 	drm_connector_list_iter_end(&conn_iter);
909 
910 	drm_modeset_unlock_all(dev);
911 
912 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
913 
914 	return 0;
915 }
916 
917 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
918 {
919 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
920 	struct drm_device *dev = &dev_priv->drm;
921 	int i;
922 
923 	drm_modeset_lock_all(dev);
924 
925 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
926 		   dev_priv->dpll.ref_clks.nssc,
927 		   dev_priv->dpll.ref_clks.ssc);
928 
929 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
930 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
931 
932 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
933 			   pll->info->id);
934 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
935 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
936 		seq_printf(m, " tracked hardware state:\n");
937 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
938 		seq_printf(m, " dpll_md: 0x%08x\n",
939 			   pll->state.hw_state.dpll_md);
940 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
941 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
942 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
943 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
944 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
945 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
946 			   pll->state.hw_state.mg_refclkin_ctl);
947 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
948 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
949 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
950 			   pll->state.hw_state.mg_clktop2_hsclkctl);
951 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
952 			   pll->state.hw_state.mg_pll_div0);
953 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
954 			   pll->state.hw_state.mg_pll_div1);
955 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
956 			   pll->state.hw_state.mg_pll_lf);
957 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
958 			   pll->state.hw_state.mg_pll_frac_lock);
959 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
960 			   pll->state.hw_state.mg_pll_ssc);
961 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
962 			   pll->state.hw_state.mg_pll_bias);
963 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
964 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
965 	}
966 	drm_modeset_unlock_all(dev);
967 
968 	return 0;
969 }
970 
971 static int i915_ipc_status_show(struct seq_file *m, void *data)
972 {
973 	struct drm_i915_private *dev_priv = m->private;
974 
975 	seq_printf(m, "Isochronous Priority Control: %s\n",
976 			yesno(dev_priv->ipc_enabled));
977 	return 0;
978 }
979 
980 static int i915_ipc_status_open(struct inode *inode, struct file *file)
981 {
982 	struct drm_i915_private *dev_priv = inode->i_private;
983 
984 	if (!HAS_IPC(dev_priv))
985 		return -ENODEV;
986 
987 	return single_open(file, i915_ipc_status_show, dev_priv);
988 }
989 
990 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
991 				     size_t len, loff_t *offp)
992 {
993 	struct seq_file *m = file->private_data;
994 	struct drm_i915_private *dev_priv = m->private;
995 	intel_wakeref_t wakeref;
996 	bool enable;
997 	int ret;
998 
999 	ret = kstrtobool_from_user(ubuf, len, &enable);
1000 	if (ret < 0)
1001 		return ret;
1002 
1003 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1004 		if (!dev_priv->ipc_enabled && enable)
1005 			drm_info(&dev_priv->drm,
1006 				 "Enabling IPC: WM will be proper only after next commit\n");
1007 		dev_priv->wm.distrust_bios_wm = true;
1008 		dev_priv->ipc_enabled = enable;
1009 		intel_enable_ipc(dev_priv);
1010 	}
1011 
1012 	return len;
1013 }
1014 
1015 static const struct file_operations i915_ipc_status_fops = {
1016 	.owner = THIS_MODULE,
1017 	.open = i915_ipc_status_open,
1018 	.read = seq_read,
1019 	.llseek = seq_lseek,
1020 	.release = single_release,
1021 	.write = i915_ipc_status_write
1022 };
1023 
1024 static int i915_ddb_info(struct seq_file *m, void *unused)
1025 {
1026 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1027 	struct drm_device *dev = &dev_priv->drm;
1028 	struct skl_ddb_entry *entry;
1029 	struct intel_crtc *crtc;
1030 
1031 	if (INTEL_GEN(dev_priv) < 9)
1032 		return -ENODEV;
1033 
1034 	drm_modeset_lock_all(dev);
1035 
1036 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1037 
1038 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1039 		struct intel_crtc_state *crtc_state =
1040 			to_intel_crtc_state(crtc->base.state);
1041 		enum pipe pipe = crtc->pipe;
1042 		enum plane_id plane_id;
1043 
1044 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1045 
1046 		for_each_plane_id_on_crtc(crtc, plane_id) {
1047 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1048 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1049 				   entry->start, entry->end,
1050 				   skl_ddb_entry_size(entry));
1051 		}
1052 
1053 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1054 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1055 			   entry->end, skl_ddb_entry_size(entry));
1056 	}
1057 
1058 	drm_modeset_unlock_all(dev);
1059 
1060 	return 0;
1061 }
1062 
1063 static void drrs_status_per_crtc(struct seq_file *m,
1064 				 struct drm_device *dev,
1065 				 struct intel_crtc *intel_crtc)
1066 {
1067 	struct drm_i915_private *dev_priv = to_i915(dev);
1068 	struct i915_drrs *drrs = &dev_priv->drrs;
1069 	int vrefresh = 0;
1070 	struct drm_connector *connector;
1071 	struct drm_connector_list_iter conn_iter;
1072 
1073 	drm_connector_list_iter_begin(dev, &conn_iter);
1074 	drm_for_each_connector_iter(connector, &conn_iter) {
1075 		if (connector->state->crtc != &intel_crtc->base)
1076 			continue;
1077 
1078 		seq_printf(m, "%s:\n", connector->name);
1079 	}
1080 	drm_connector_list_iter_end(&conn_iter);
1081 
1082 	seq_puts(m, "\n");
1083 
1084 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1085 		struct intel_panel *panel;
1086 
1087 		mutex_lock(&drrs->mutex);
1088 		/* DRRS Supported */
1089 		seq_puts(m, "\tDRRS Supported: Yes\n");
1090 
1091 		/* disable_drrs() will make drrs->dp NULL */
1092 		if (!drrs->dp) {
1093 			seq_puts(m, "Idleness DRRS: Disabled\n");
1094 			if (dev_priv->psr.enabled)
1095 				seq_puts(m,
1096 				"\tAs PSR is enabled, DRRS is not enabled\n");
1097 			mutex_unlock(&drrs->mutex);
1098 			return;
1099 		}
1100 
1101 		panel = &drrs->dp->attached_connector->panel;
1102 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1103 					drrs->busy_frontbuffer_bits);
1104 
1105 		seq_puts(m, "\n\t\t");
1106 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1107 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1108 			vrefresh = panel->fixed_mode->vrefresh;
1109 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1110 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1111 			vrefresh = panel->downclock_mode->vrefresh;
1112 		} else {
1113 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1114 						drrs->refresh_rate_type);
1115 			mutex_unlock(&drrs->mutex);
1116 			return;
1117 		}
1118 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1119 
1120 		seq_puts(m, "\n\t\t");
1121 		mutex_unlock(&drrs->mutex);
1122 	} else {
1123 		/* DRRS not supported. Print the VBT parameter*/
1124 		seq_puts(m, "\tDRRS Supported : No");
1125 	}
1126 	seq_puts(m, "\n");
1127 }
1128 
1129 static int i915_drrs_status(struct seq_file *m, void *unused)
1130 {
1131 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1132 	struct drm_device *dev = &dev_priv->drm;
1133 	struct intel_crtc *intel_crtc;
1134 	int active_crtc_cnt = 0;
1135 
1136 	drm_modeset_lock_all(dev);
1137 	for_each_intel_crtc(dev, intel_crtc) {
1138 		if (intel_crtc->base.state->active) {
1139 			active_crtc_cnt++;
1140 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1141 
1142 			drrs_status_per_crtc(m, dev, intel_crtc);
1143 		}
1144 	}
1145 	drm_modeset_unlock_all(dev);
1146 
1147 	if (!active_crtc_cnt)
1148 		seq_puts(m, "No active crtc found\n");
1149 
1150 	return 0;
1151 }
1152 
1153 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1154 				seq_puts(m, "LPSP: disabled\n"))
1155 
1156 static bool
1157 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1158 			      enum i915_power_well_id power_well_id)
1159 {
1160 	intel_wakeref_t wakeref;
1161 	bool is_enabled;
1162 
1163 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1164 	is_enabled = intel_display_power_well_is_enabled(i915,
1165 							 power_well_id);
1166 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1167 
1168 	return is_enabled;
1169 }
1170 
1171 static int i915_lpsp_status(struct seq_file *m, void *unused)
1172 {
1173 	struct drm_i915_private *i915 = node_to_i915(m->private);
1174 
1175 	switch (INTEL_GEN(i915)) {
1176 	case 12:
1177 	case 11:
1178 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1179 		break;
1180 	case 10:
1181 	case 9:
1182 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1183 		break;
1184 	default:
1185 		/*
1186 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1187 		 * support lpsp.
1188 		 */
1189 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1190 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1191 		else
1192 			seq_puts(m, "LPSP: not supported\n");
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1199 {
1200 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1201 	struct drm_device *dev = &dev_priv->drm;
1202 	struct intel_encoder *intel_encoder;
1203 	struct intel_digital_port *intel_dig_port;
1204 	struct drm_connector *connector;
1205 	struct drm_connector_list_iter conn_iter;
1206 
1207 	drm_connector_list_iter_begin(dev, &conn_iter);
1208 	drm_for_each_connector_iter(connector, &conn_iter) {
1209 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1210 			continue;
1211 
1212 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1213 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1214 			continue;
1215 
1216 		intel_dig_port = enc_to_dig_port(intel_encoder);
1217 		if (!intel_dig_port->dp.can_mst)
1218 			continue;
1219 
1220 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1221 			   intel_dig_port->base.base.base.id,
1222 			   intel_dig_port->base.base.name);
1223 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
1224 	}
1225 	drm_connector_list_iter_end(&conn_iter);
1226 
1227 	return 0;
1228 }
1229 
1230 static ssize_t i915_displayport_test_active_write(struct file *file,
1231 						  const char __user *ubuf,
1232 						  size_t len, loff_t *offp)
1233 {
1234 	char *input_buffer;
1235 	int status = 0;
1236 	struct drm_device *dev;
1237 	struct drm_connector *connector;
1238 	struct drm_connector_list_iter conn_iter;
1239 	struct intel_dp *intel_dp;
1240 	int val = 0;
1241 
1242 	dev = ((struct seq_file *)file->private_data)->private;
1243 
1244 	if (len == 0)
1245 		return 0;
1246 
1247 	input_buffer = memdup_user_nul(ubuf, len);
1248 	if (IS_ERR(input_buffer))
1249 		return PTR_ERR(input_buffer);
1250 
1251 	drm_dbg(&to_i915(dev)->drm,
1252 		"Copied %d bytes from user\n", (unsigned int)len);
1253 
1254 	drm_connector_list_iter_begin(dev, &conn_iter);
1255 	drm_for_each_connector_iter(connector, &conn_iter) {
1256 		struct intel_encoder *encoder;
1257 
1258 		if (connector->connector_type !=
1259 		    DRM_MODE_CONNECTOR_DisplayPort)
1260 			continue;
1261 
1262 		encoder = to_intel_encoder(connector->encoder);
1263 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1264 			continue;
1265 
1266 		if (encoder && connector->status == connector_status_connected) {
1267 			intel_dp = enc_to_intel_dp(encoder);
1268 			status = kstrtoint(input_buffer, 10, &val);
1269 			if (status < 0)
1270 				break;
1271 			drm_dbg(&to_i915(dev)->drm,
1272 				"Got %d for test active\n", val);
1273 			/* To prevent erroneous activation of the compliance
1274 			 * testing code, only accept an actual value of 1 here
1275 			 */
1276 			if (val == 1)
1277 				intel_dp->compliance.test_active = true;
1278 			else
1279 				intel_dp->compliance.test_active = false;
1280 		}
1281 	}
1282 	drm_connector_list_iter_end(&conn_iter);
1283 	kfree(input_buffer);
1284 	if (status < 0)
1285 		return status;
1286 
1287 	*offp += len;
1288 	return len;
1289 }
1290 
1291 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1292 {
1293 	struct drm_i915_private *dev_priv = m->private;
1294 	struct drm_device *dev = &dev_priv->drm;
1295 	struct drm_connector *connector;
1296 	struct drm_connector_list_iter conn_iter;
1297 	struct intel_dp *intel_dp;
1298 
1299 	drm_connector_list_iter_begin(dev, &conn_iter);
1300 	drm_for_each_connector_iter(connector, &conn_iter) {
1301 		struct intel_encoder *encoder;
1302 
1303 		if (connector->connector_type !=
1304 		    DRM_MODE_CONNECTOR_DisplayPort)
1305 			continue;
1306 
1307 		encoder = to_intel_encoder(connector->encoder);
1308 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1309 			continue;
1310 
1311 		if (encoder && connector->status == connector_status_connected) {
1312 			intel_dp = enc_to_intel_dp(encoder);
1313 			if (intel_dp->compliance.test_active)
1314 				seq_puts(m, "1");
1315 			else
1316 				seq_puts(m, "0");
1317 		} else
1318 			seq_puts(m, "0");
1319 	}
1320 	drm_connector_list_iter_end(&conn_iter);
1321 
1322 	return 0;
1323 }
1324 
1325 static int i915_displayport_test_active_open(struct inode *inode,
1326 					     struct file *file)
1327 {
1328 	return single_open(file, i915_displayport_test_active_show,
1329 			   inode->i_private);
1330 }
1331 
1332 static const struct file_operations i915_displayport_test_active_fops = {
1333 	.owner = THIS_MODULE,
1334 	.open = i915_displayport_test_active_open,
1335 	.read = seq_read,
1336 	.llseek = seq_lseek,
1337 	.release = single_release,
1338 	.write = i915_displayport_test_active_write
1339 };
1340 
1341 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1342 {
1343 	struct drm_i915_private *dev_priv = m->private;
1344 	struct drm_device *dev = &dev_priv->drm;
1345 	struct drm_connector *connector;
1346 	struct drm_connector_list_iter conn_iter;
1347 	struct intel_dp *intel_dp;
1348 
1349 	drm_connector_list_iter_begin(dev, &conn_iter);
1350 	drm_for_each_connector_iter(connector, &conn_iter) {
1351 		struct intel_encoder *encoder;
1352 
1353 		if (connector->connector_type !=
1354 		    DRM_MODE_CONNECTOR_DisplayPort)
1355 			continue;
1356 
1357 		encoder = to_intel_encoder(connector->encoder);
1358 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1359 			continue;
1360 
1361 		if (encoder && connector->status == connector_status_connected) {
1362 			intel_dp = enc_to_intel_dp(encoder);
1363 			if (intel_dp->compliance.test_type ==
1364 			    DP_TEST_LINK_EDID_READ)
1365 				seq_printf(m, "%lx",
1366 					   intel_dp->compliance.test_data.edid);
1367 			else if (intel_dp->compliance.test_type ==
1368 				 DP_TEST_LINK_VIDEO_PATTERN) {
1369 				seq_printf(m, "hdisplay: %d\n",
1370 					   intel_dp->compliance.test_data.hdisplay);
1371 				seq_printf(m, "vdisplay: %d\n",
1372 					   intel_dp->compliance.test_data.vdisplay);
1373 				seq_printf(m, "bpc: %u\n",
1374 					   intel_dp->compliance.test_data.bpc);
1375 			} else if (intel_dp->compliance.test_type ==
1376 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1377 				seq_printf(m, "pattern: %d\n",
1378 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1379 				seq_printf(m, "Number of lanes: %d\n",
1380 					   intel_dp->compliance.test_data.phytest.num_lanes);
1381 				seq_printf(m, "Link Rate: %d\n",
1382 					   intel_dp->compliance.test_data.phytest.link_rate);
1383 				seq_printf(m, "level: %02x\n",
1384 					   intel_dp->train_set[0]);
1385 			}
1386 		} else
1387 			seq_puts(m, "0");
1388 	}
1389 	drm_connector_list_iter_end(&conn_iter);
1390 
1391 	return 0;
1392 }
1393 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1394 
1395 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1396 {
1397 	struct drm_i915_private *dev_priv = m->private;
1398 	struct drm_device *dev = &dev_priv->drm;
1399 	struct drm_connector *connector;
1400 	struct drm_connector_list_iter conn_iter;
1401 	struct intel_dp *intel_dp;
1402 
1403 	drm_connector_list_iter_begin(dev, &conn_iter);
1404 	drm_for_each_connector_iter(connector, &conn_iter) {
1405 		struct intel_encoder *encoder;
1406 
1407 		if (connector->connector_type !=
1408 		    DRM_MODE_CONNECTOR_DisplayPort)
1409 			continue;
1410 
1411 		encoder = to_intel_encoder(connector->encoder);
1412 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1413 			continue;
1414 
1415 		if (encoder && connector->status == connector_status_connected) {
1416 			intel_dp = enc_to_intel_dp(encoder);
1417 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1418 		} else
1419 			seq_puts(m, "0");
1420 	}
1421 	drm_connector_list_iter_end(&conn_iter);
1422 
1423 	return 0;
1424 }
1425 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1426 
1427 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1428 {
1429 	struct drm_i915_private *dev_priv = m->private;
1430 	struct drm_device *dev = &dev_priv->drm;
1431 	int level;
1432 	int num_levels;
1433 
1434 	if (IS_CHERRYVIEW(dev_priv))
1435 		num_levels = 3;
1436 	else if (IS_VALLEYVIEW(dev_priv))
1437 		num_levels = 1;
1438 	else if (IS_G4X(dev_priv))
1439 		num_levels = 3;
1440 	else
1441 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1442 
1443 	drm_modeset_lock_all(dev);
1444 
1445 	for (level = 0; level < num_levels; level++) {
1446 		unsigned int latency = wm[level];
1447 
1448 		/*
1449 		 * - WM1+ latency values in 0.5us units
1450 		 * - latencies are in us on gen9/vlv/chv
1451 		 */
1452 		if (INTEL_GEN(dev_priv) >= 9 ||
1453 		    IS_VALLEYVIEW(dev_priv) ||
1454 		    IS_CHERRYVIEW(dev_priv) ||
1455 		    IS_G4X(dev_priv))
1456 			latency *= 10;
1457 		else if (level > 0)
1458 			latency *= 5;
1459 
1460 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1461 			   level, wm[level], latency / 10, latency % 10);
1462 	}
1463 
1464 	drm_modeset_unlock_all(dev);
1465 }
1466 
1467 static int pri_wm_latency_show(struct seq_file *m, void *data)
1468 {
1469 	struct drm_i915_private *dev_priv = m->private;
1470 	const u16 *latencies;
1471 
1472 	if (INTEL_GEN(dev_priv) >= 9)
1473 		latencies = dev_priv->wm.skl_latency;
1474 	else
1475 		latencies = dev_priv->wm.pri_latency;
1476 
1477 	wm_latency_show(m, latencies);
1478 
1479 	return 0;
1480 }
1481 
1482 static int spr_wm_latency_show(struct seq_file *m, void *data)
1483 {
1484 	struct drm_i915_private *dev_priv = m->private;
1485 	const u16 *latencies;
1486 
1487 	if (INTEL_GEN(dev_priv) >= 9)
1488 		latencies = dev_priv->wm.skl_latency;
1489 	else
1490 		latencies = dev_priv->wm.spr_latency;
1491 
1492 	wm_latency_show(m, latencies);
1493 
1494 	return 0;
1495 }
1496 
1497 static int cur_wm_latency_show(struct seq_file *m, void *data)
1498 {
1499 	struct drm_i915_private *dev_priv = m->private;
1500 	const u16 *latencies;
1501 
1502 	if (INTEL_GEN(dev_priv) >= 9)
1503 		latencies = dev_priv->wm.skl_latency;
1504 	else
1505 		latencies = dev_priv->wm.cur_latency;
1506 
1507 	wm_latency_show(m, latencies);
1508 
1509 	return 0;
1510 }
1511 
1512 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1513 {
1514 	struct drm_i915_private *dev_priv = inode->i_private;
1515 
1516 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1517 		return -ENODEV;
1518 
1519 	return single_open(file, pri_wm_latency_show, dev_priv);
1520 }
1521 
1522 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1523 {
1524 	struct drm_i915_private *dev_priv = inode->i_private;
1525 
1526 	if (HAS_GMCH(dev_priv))
1527 		return -ENODEV;
1528 
1529 	return single_open(file, spr_wm_latency_show, dev_priv);
1530 }
1531 
1532 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1533 {
1534 	struct drm_i915_private *dev_priv = inode->i_private;
1535 
1536 	if (HAS_GMCH(dev_priv))
1537 		return -ENODEV;
1538 
1539 	return single_open(file, cur_wm_latency_show, dev_priv);
1540 }
1541 
1542 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1543 				size_t len, loff_t *offp, u16 wm[8])
1544 {
1545 	struct seq_file *m = file->private_data;
1546 	struct drm_i915_private *dev_priv = m->private;
1547 	struct drm_device *dev = &dev_priv->drm;
1548 	u16 new[8] = { 0 };
1549 	int num_levels;
1550 	int level;
1551 	int ret;
1552 	char tmp[32];
1553 
1554 	if (IS_CHERRYVIEW(dev_priv))
1555 		num_levels = 3;
1556 	else if (IS_VALLEYVIEW(dev_priv))
1557 		num_levels = 1;
1558 	else if (IS_G4X(dev_priv))
1559 		num_levels = 3;
1560 	else
1561 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1562 
1563 	if (len >= sizeof(tmp))
1564 		return -EINVAL;
1565 
1566 	if (copy_from_user(tmp, ubuf, len))
1567 		return -EFAULT;
1568 
1569 	tmp[len] = '\0';
1570 
1571 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1572 		     &new[0], &new[1], &new[2], &new[3],
1573 		     &new[4], &new[5], &new[6], &new[7]);
1574 	if (ret != num_levels)
1575 		return -EINVAL;
1576 
1577 	drm_modeset_lock_all(dev);
1578 
1579 	for (level = 0; level < num_levels; level++)
1580 		wm[level] = new[level];
1581 
1582 	drm_modeset_unlock_all(dev);
1583 
1584 	return len;
1585 }
1586 
1587 
1588 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1589 				    size_t len, loff_t *offp)
1590 {
1591 	struct seq_file *m = file->private_data;
1592 	struct drm_i915_private *dev_priv = m->private;
1593 	u16 *latencies;
1594 
1595 	if (INTEL_GEN(dev_priv) >= 9)
1596 		latencies = dev_priv->wm.skl_latency;
1597 	else
1598 		latencies = dev_priv->wm.pri_latency;
1599 
1600 	return wm_latency_write(file, ubuf, len, offp, latencies);
1601 }
1602 
1603 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1604 				    size_t len, loff_t *offp)
1605 {
1606 	struct seq_file *m = file->private_data;
1607 	struct drm_i915_private *dev_priv = m->private;
1608 	u16 *latencies;
1609 
1610 	if (INTEL_GEN(dev_priv) >= 9)
1611 		latencies = dev_priv->wm.skl_latency;
1612 	else
1613 		latencies = dev_priv->wm.spr_latency;
1614 
1615 	return wm_latency_write(file, ubuf, len, offp, latencies);
1616 }
1617 
1618 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1619 				    size_t len, loff_t *offp)
1620 {
1621 	struct seq_file *m = file->private_data;
1622 	struct drm_i915_private *dev_priv = m->private;
1623 	u16 *latencies;
1624 
1625 	if (INTEL_GEN(dev_priv) >= 9)
1626 		latencies = dev_priv->wm.skl_latency;
1627 	else
1628 		latencies = dev_priv->wm.cur_latency;
1629 
1630 	return wm_latency_write(file, ubuf, len, offp, latencies);
1631 }
1632 
1633 static const struct file_operations i915_pri_wm_latency_fops = {
1634 	.owner = THIS_MODULE,
1635 	.open = pri_wm_latency_open,
1636 	.read = seq_read,
1637 	.llseek = seq_lseek,
1638 	.release = single_release,
1639 	.write = pri_wm_latency_write
1640 };
1641 
1642 static const struct file_operations i915_spr_wm_latency_fops = {
1643 	.owner = THIS_MODULE,
1644 	.open = spr_wm_latency_open,
1645 	.read = seq_read,
1646 	.llseek = seq_lseek,
1647 	.release = single_release,
1648 	.write = spr_wm_latency_write
1649 };
1650 
1651 static const struct file_operations i915_cur_wm_latency_fops = {
1652 	.owner = THIS_MODULE,
1653 	.open = cur_wm_latency_open,
1654 	.read = seq_read,
1655 	.llseek = seq_lseek,
1656 	.release = single_release,
1657 	.write = cur_wm_latency_write
1658 };
1659 
1660 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1661 {
1662 	struct drm_i915_private *dev_priv = m->private;
1663 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1664 
1665 	/* Synchronize with everything first in case there's been an HPD
1666 	 * storm, but we haven't finished handling it in the kernel yet
1667 	 */
1668 	intel_synchronize_irq(dev_priv);
1669 	flush_work(&dev_priv->hotplug.dig_port_work);
1670 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1671 
1672 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1673 	seq_printf(m, "Detected: %s\n",
1674 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1675 
1676 	return 0;
1677 }
1678 
1679 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1680 					const char __user *ubuf, size_t len,
1681 					loff_t *offp)
1682 {
1683 	struct seq_file *m = file->private_data;
1684 	struct drm_i915_private *dev_priv = m->private;
1685 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1686 	unsigned int new_threshold;
1687 	int i;
1688 	char *newline;
1689 	char tmp[16];
1690 
1691 	if (len >= sizeof(tmp))
1692 		return -EINVAL;
1693 
1694 	if (copy_from_user(tmp, ubuf, len))
1695 		return -EFAULT;
1696 
1697 	tmp[len] = '\0';
1698 
1699 	/* Strip newline, if any */
1700 	newline = strchr(tmp, '\n');
1701 	if (newline)
1702 		*newline = '\0';
1703 
1704 	if (strcmp(tmp, "reset") == 0)
1705 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1706 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1707 		return -EINVAL;
1708 
1709 	if (new_threshold > 0)
1710 		drm_dbg_kms(&dev_priv->drm,
1711 			    "Setting HPD storm detection threshold to %d\n",
1712 			    new_threshold);
1713 	else
1714 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1715 
1716 	spin_lock_irq(&dev_priv->irq_lock);
1717 	hotplug->hpd_storm_threshold = new_threshold;
1718 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1719 	for_each_hpd_pin(i)
1720 		hotplug->stats[i].count = 0;
1721 	spin_unlock_irq(&dev_priv->irq_lock);
1722 
1723 	/* Re-enable hpd immediately if we were in an irq storm */
1724 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1725 
1726 	return len;
1727 }
1728 
1729 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1730 {
1731 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1732 }
1733 
1734 static const struct file_operations i915_hpd_storm_ctl_fops = {
1735 	.owner = THIS_MODULE,
1736 	.open = i915_hpd_storm_ctl_open,
1737 	.read = seq_read,
1738 	.llseek = seq_lseek,
1739 	.release = single_release,
1740 	.write = i915_hpd_storm_ctl_write
1741 };
1742 
1743 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1744 {
1745 	struct drm_i915_private *dev_priv = m->private;
1746 
1747 	seq_printf(m, "Enabled: %s\n",
1748 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1749 
1750 	return 0;
1751 }
1752 
1753 static int
1754 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1755 {
1756 	return single_open(file, i915_hpd_short_storm_ctl_show,
1757 			   inode->i_private);
1758 }
1759 
1760 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1761 					      const char __user *ubuf,
1762 					      size_t len, loff_t *offp)
1763 {
1764 	struct seq_file *m = file->private_data;
1765 	struct drm_i915_private *dev_priv = m->private;
1766 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1767 	char *newline;
1768 	char tmp[16];
1769 	int i;
1770 	bool new_state;
1771 
1772 	if (len >= sizeof(tmp))
1773 		return -EINVAL;
1774 
1775 	if (copy_from_user(tmp, ubuf, len))
1776 		return -EFAULT;
1777 
1778 	tmp[len] = '\0';
1779 
1780 	/* Strip newline, if any */
1781 	newline = strchr(tmp, '\n');
1782 	if (newline)
1783 		*newline = '\0';
1784 
1785 	/* Reset to the "default" state for this system */
1786 	if (strcmp(tmp, "reset") == 0)
1787 		new_state = !HAS_DP_MST(dev_priv);
1788 	else if (kstrtobool(tmp, &new_state) != 0)
1789 		return -EINVAL;
1790 
1791 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1792 		    new_state ? "En" : "Dis");
1793 
1794 	spin_lock_irq(&dev_priv->irq_lock);
1795 	hotplug->hpd_short_storm_enabled = new_state;
1796 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1797 	for_each_hpd_pin(i)
1798 		hotplug->stats[i].count = 0;
1799 	spin_unlock_irq(&dev_priv->irq_lock);
1800 
1801 	/* Re-enable hpd immediately if we were in an irq storm */
1802 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1803 
1804 	return len;
1805 }
1806 
1807 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1808 	.owner = THIS_MODULE,
1809 	.open = i915_hpd_short_storm_ctl_open,
1810 	.read = seq_read,
1811 	.llseek = seq_lseek,
1812 	.release = single_release,
1813 	.write = i915_hpd_short_storm_ctl_write,
1814 };
1815 
1816 static int i915_drrs_ctl_set(void *data, u64 val)
1817 {
1818 	struct drm_i915_private *dev_priv = data;
1819 	struct drm_device *dev = &dev_priv->drm;
1820 	struct intel_crtc *crtc;
1821 
1822 	if (INTEL_GEN(dev_priv) < 7)
1823 		return -ENODEV;
1824 
1825 	for_each_intel_crtc(dev, crtc) {
1826 		struct drm_connector_list_iter conn_iter;
1827 		struct intel_crtc_state *crtc_state;
1828 		struct drm_connector *connector;
1829 		struct drm_crtc_commit *commit;
1830 		int ret;
1831 
1832 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1833 		if (ret)
1834 			return ret;
1835 
1836 		crtc_state = to_intel_crtc_state(crtc->base.state);
1837 
1838 		if (!crtc_state->hw.active ||
1839 		    !crtc_state->has_drrs)
1840 			goto out;
1841 
1842 		commit = crtc_state->uapi.commit;
1843 		if (commit) {
1844 			ret = wait_for_completion_interruptible(&commit->hw_done);
1845 			if (ret)
1846 				goto out;
1847 		}
1848 
1849 		drm_connector_list_iter_begin(dev, &conn_iter);
1850 		drm_for_each_connector_iter(connector, &conn_iter) {
1851 			struct intel_encoder *encoder;
1852 			struct intel_dp *intel_dp;
1853 
1854 			if (!(crtc_state->uapi.connector_mask &
1855 			      drm_connector_mask(connector)))
1856 				continue;
1857 
1858 			encoder = intel_attached_encoder(to_intel_connector(connector));
1859 			if (encoder->type != INTEL_OUTPUT_EDP)
1860 				continue;
1861 
1862 			drm_dbg(&dev_priv->drm,
1863 				"Manually %sabling DRRS. %llu\n",
1864 				val ? "en" : "dis", val);
1865 
1866 			intel_dp = enc_to_intel_dp(encoder);
1867 			if (val)
1868 				intel_edp_drrs_enable(intel_dp,
1869 						      crtc_state);
1870 			else
1871 				intel_edp_drrs_disable(intel_dp,
1872 						       crtc_state);
1873 		}
1874 		drm_connector_list_iter_end(&conn_iter);
1875 
1876 out:
1877 		drm_modeset_unlock(&crtc->base.mutex);
1878 		if (ret)
1879 			return ret;
1880 	}
1881 
1882 	return 0;
1883 }
1884 
1885 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1886 
1887 static ssize_t
1888 i915_fifo_underrun_reset_write(struct file *filp,
1889 			       const char __user *ubuf,
1890 			       size_t cnt, loff_t *ppos)
1891 {
1892 	struct drm_i915_private *dev_priv = filp->private_data;
1893 	struct intel_crtc *intel_crtc;
1894 	struct drm_device *dev = &dev_priv->drm;
1895 	int ret;
1896 	bool reset;
1897 
1898 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1899 	if (ret)
1900 		return ret;
1901 
1902 	if (!reset)
1903 		return cnt;
1904 
1905 	for_each_intel_crtc(dev, intel_crtc) {
1906 		struct drm_crtc_commit *commit;
1907 		struct intel_crtc_state *crtc_state;
1908 
1909 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1910 		if (ret)
1911 			return ret;
1912 
1913 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1914 		commit = crtc_state->uapi.commit;
1915 		if (commit) {
1916 			ret = wait_for_completion_interruptible(&commit->hw_done);
1917 			if (!ret)
1918 				ret = wait_for_completion_interruptible(&commit->flip_done);
1919 		}
1920 
1921 		if (!ret && crtc_state->hw.active) {
1922 			drm_dbg_kms(&dev_priv->drm,
1923 				    "Re-arming FIFO underruns on pipe %c\n",
1924 				    pipe_name(intel_crtc->pipe));
1925 
1926 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1927 		}
1928 
1929 		drm_modeset_unlock(&intel_crtc->base.mutex);
1930 
1931 		if (ret)
1932 			return ret;
1933 	}
1934 
1935 	ret = intel_fbc_reset_underrun(dev_priv);
1936 	if (ret)
1937 		return ret;
1938 
1939 	return cnt;
1940 }
1941 
1942 static const struct file_operations i915_fifo_underrun_reset_ops = {
1943 	.owner = THIS_MODULE,
1944 	.open = simple_open,
1945 	.write = i915_fifo_underrun_reset_write,
1946 	.llseek = default_llseek,
1947 };
1948 
1949 static const struct drm_info_list intel_display_debugfs_list[] = {
1950 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1951 	{"i915_fbc_status", i915_fbc_status, 0},
1952 	{"i915_ips_status", i915_ips_status, 0},
1953 	{"i915_sr_status", i915_sr_status, 0},
1954 	{"i915_opregion", i915_opregion, 0},
1955 	{"i915_vbt", i915_vbt, 0},
1956 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1957 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1958 	{"i915_power_domain_info", i915_power_domain_info, 0},
1959 	{"i915_dmc_info", i915_dmc_info, 0},
1960 	{"i915_display_info", i915_display_info, 0},
1961 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1962 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1963 	{"i915_ddb_info", i915_ddb_info, 0},
1964 	{"i915_drrs_status", i915_drrs_status, 0},
1965 	{"i915_lpsp_status", i915_lpsp_status, 0},
1966 };
1967 
1968 static const struct {
1969 	const char *name;
1970 	const struct file_operations *fops;
1971 } intel_display_debugfs_files[] = {
1972 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1973 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1974 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1975 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1976 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1977 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1978 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1979 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1980 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1981 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1982 	{"i915_ipc_status", &i915_ipc_status_fops},
1983 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1984 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1985 };
1986 
1987 int intel_display_debugfs_register(struct drm_i915_private *i915)
1988 {
1989 	struct drm_minor *minor = i915->drm.primary;
1990 	int i;
1991 
1992 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
1993 		debugfs_create_file(intel_display_debugfs_files[i].name,
1994 				    S_IRUGO | S_IWUSR,
1995 				    minor->debugfs_root,
1996 				    to_i915(minor->dev),
1997 				    intel_display_debugfs_files[i].fops);
1998 	}
1999 
2000 	return drm_debugfs_create_files(intel_display_debugfs_list,
2001 					ARRAY_SIZE(intel_display_debugfs_list),
2002 					minor->debugfs_root, minor);
2003 }
2004 
2005 static int i915_panel_show(struct seq_file *m, void *data)
2006 {
2007 	struct drm_connector *connector = m->private;
2008 	struct intel_dp *intel_dp =
2009 		intel_attached_dp(to_intel_connector(connector));
2010 
2011 	if (connector->status != connector_status_connected)
2012 		return -ENODEV;
2013 
2014 	seq_printf(m, "Panel power up delay: %d\n",
2015 		   intel_dp->panel_power_up_delay);
2016 	seq_printf(m, "Panel power down delay: %d\n",
2017 		   intel_dp->panel_power_down_delay);
2018 	seq_printf(m, "Backlight on delay: %d\n",
2019 		   intel_dp->backlight_on_delay);
2020 	seq_printf(m, "Backlight off delay: %d\n",
2021 		   intel_dp->backlight_off_delay);
2022 
2023 	return 0;
2024 }
2025 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2026 
2027 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2028 {
2029 	struct drm_connector *connector = m->private;
2030 	struct intel_connector *intel_connector = to_intel_connector(connector);
2031 
2032 	if (connector->status != connector_status_connected)
2033 		return -ENODEV;
2034 
2035 	/* HDCP is supported by connector */
2036 	if (!intel_connector->hdcp.shim)
2037 		return -EINVAL;
2038 
2039 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2040 		   connector->base.id);
2041 	intel_hdcp_info(m, intel_connector);
2042 
2043 	return 0;
2044 }
2045 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2046 
2047 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2048 				seq_puts(m, "LPSP: incapable\n"))
2049 
2050 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2051 {
2052 	struct drm_connector *connector = m->private;
2053 	struct intel_encoder *encoder =
2054 			intel_attached_encoder(to_intel_connector(connector));
2055 	struct drm_i915_private *i915 = to_i915(connector->dev);
2056 
2057 	if (connector->status != connector_status_connected)
2058 		return -ENODEV;
2059 
2060 	switch (INTEL_GEN(i915)) {
2061 	case 12:
2062 		/*
2063 		 * Actually TGL can drive LPSP on port till DDI_C
2064 		 * but there is no physical connected DDI_C on TGL sku's,
2065 		 * even driver is not initilizing DDI_C port for gen12.
2066 		 */
2067 		LPSP_CAPABLE(encoder->port <= PORT_B);
2068 		break;
2069 	case 11:
2070 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2071 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2072 		break;
2073 	case 10:
2074 	case 9:
2075 		LPSP_CAPABLE(encoder->port == PORT_A &&
2076 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2077 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2078 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2079 		break;
2080 	default:
2081 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2082 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2083 	}
2084 
2085 	return 0;
2086 }
2087 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2088 
2089 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2090 {
2091 	struct drm_connector *connector = m->private;
2092 	struct drm_device *dev = connector->dev;
2093 	struct drm_crtc *crtc;
2094 	struct intel_dp *intel_dp;
2095 	struct drm_modeset_acquire_ctx ctx;
2096 	struct intel_crtc_state *crtc_state = NULL;
2097 	int ret = 0;
2098 	bool try_again = false;
2099 
2100 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2101 
2102 	do {
2103 		try_again = false;
2104 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2105 				       &ctx);
2106 		if (ret) {
2107 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2108 				try_again = true;
2109 				continue;
2110 			}
2111 			break;
2112 		}
2113 		crtc = connector->state->crtc;
2114 		if (connector->status != connector_status_connected || !crtc) {
2115 			ret = -ENODEV;
2116 			break;
2117 		}
2118 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2119 		if (ret == -EDEADLK) {
2120 			ret = drm_modeset_backoff(&ctx);
2121 			if (!ret) {
2122 				try_again = true;
2123 				continue;
2124 			}
2125 			break;
2126 		} else if (ret) {
2127 			break;
2128 		}
2129 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2130 		crtc_state = to_intel_crtc_state(crtc->state);
2131 		seq_printf(m, "DSC_Enabled: %s\n",
2132 			   yesno(crtc_state->dsc.compression_enable));
2133 		seq_printf(m, "DSC_Sink_Support: %s\n",
2134 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2135 		seq_printf(m, "Force_DSC_Enable: %s\n",
2136 			   yesno(intel_dp->force_dsc_en));
2137 		if (!intel_dp_is_edp(intel_dp))
2138 			seq_printf(m, "FEC_Sink_Support: %s\n",
2139 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2140 	} while (try_again);
2141 
2142 	drm_modeset_drop_locks(&ctx);
2143 	drm_modeset_acquire_fini(&ctx);
2144 
2145 	return ret;
2146 }
2147 
2148 static ssize_t i915_dsc_fec_support_write(struct file *file,
2149 					  const char __user *ubuf,
2150 					  size_t len, loff_t *offp)
2151 {
2152 	bool dsc_enable = false;
2153 	int ret;
2154 	struct drm_connector *connector =
2155 		((struct seq_file *)file->private_data)->private;
2156 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2157 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2158 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2159 
2160 	if (len == 0)
2161 		return 0;
2162 
2163 	drm_dbg(&i915->drm,
2164 		"Copied %zu bytes from user to force DSC\n", len);
2165 
2166 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2167 	if (ret < 0)
2168 		return ret;
2169 
2170 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2171 		(dsc_enable) ? "true" : "false");
2172 	intel_dp->force_dsc_en = dsc_enable;
2173 
2174 	*offp += len;
2175 	return len;
2176 }
2177 
2178 static int i915_dsc_fec_support_open(struct inode *inode,
2179 				     struct file *file)
2180 {
2181 	return single_open(file, i915_dsc_fec_support_show,
2182 			   inode->i_private);
2183 }
2184 
2185 static const struct file_operations i915_dsc_fec_support_fops = {
2186 	.owner = THIS_MODULE,
2187 	.open = i915_dsc_fec_support_open,
2188 	.read = seq_read,
2189 	.llseek = seq_lseek,
2190 	.release = single_release,
2191 	.write = i915_dsc_fec_support_write
2192 };
2193 
2194 /**
2195  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2196  * @connector: pointer to a registered drm_connector
2197  *
2198  * Cleanup will be done by drm_connector_unregister() through a call to
2199  * drm_debugfs_connector_remove().
2200  *
2201  * Returns 0 on success, negative error codes on error.
2202  */
2203 int intel_connector_debugfs_add(struct drm_connector *connector)
2204 {
2205 	struct dentry *root = connector->debugfs_entry;
2206 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2207 
2208 	/* The connector must have been registered beforehands. */
2209 	if (!root)
2210 		return -ENODEV;
2211 
2212 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2213 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2214 				    connector, &i915_panel_fops);
2215 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2216 				    connector, &i915_psr_sink_status_fops);
2217 	}
2218 
2219 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2220 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2221 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2222 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2223 				    connector, &i915_hdcp_sink_capability_fops);
2224 	}
2225 
2226 	if (INTEL_GEN(dev_priv) >= 10 &&
2227 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2228 	      !to_intel_connector(connector)->mst_port) ||
2229 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2230 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2231 				    connector, &i915_dsc_fec_support_fops);
2232 
2233 	/* Legacy panels doesn't lpsp on any platform */
2234 	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2235 	     IS_BROADWELL(dev_priv)) &&
2236 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2237 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2238 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2239 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2240 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2241 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2242 				    connector, &i915_lpsp_capability_fops);
2243 
2244 	return 0;
2245 }
2246