xref: /linux/drivers/gpu/drm/i915/display/intel_hotplug.c (revision 25489a4f556414445d342951615178368ee45cde)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/kernel.h>
26 
27 #include <drm/drm_probe_helper.h>
28 
29 #include "i915_drv.h"
30 #include "i915_irq.h"
31 #include "intel_connector.h"
32 #include "intel_display_power.h"
33 #include "intel_display_rpm.h"
34 #include "intel_display_types.h"
35 #include "intel_hdcp.h"
36 #include "intel_hotplug.h"
37 #include "intel_hotplug_irq.h"
38 
39 /**
40  * DOC: Hotplug
41  *
42  * Simply put, hotplug occurs when a display is connected to or disconnected
43  * from the system. However, there may be adapters and docking stations and
44  * Display Port short pulses and MST devices involved, complicating matters.
45  *
46  * Hotplug in i915 is handled in many different levels of abstraction.
47  *
48  * The platform dependent interrupt handling code in i915_irq.c enables,
49  * disables, and does preliminary handling of the interrupts. The interrupt
50  * handlers gather the hotplug detect (HPD) information from relevant registers
51  * into a platform independent mask of hotplug pins that have fired.
52  *
53  * The platform independent interrupt handler intel_hpd_irq_handler() in
54  * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
55  * further processing to appropriate bottom halves (Display Port specific and
56  * regular hotplug).
57  *
58  * The Display Port work function i915_digport_work_func() calls into
59  * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
60  * pulses, with failures and non-MST long pulses triggering regular hotplug
61  * processing on the connector.
62  *
63  * The regular hotplug work function i915_hotplug_work_func() calls connector
64  * detect hooks, and, if connector status changes, triggers sending of hotplug
65  * uevent to userspace via drm_kms_helper_hotplug_event().
66  *
67  * Finally, the userspace is responsible for triggering a modeset upon receiving
68  * the hotplug uevent, disabling or enabling the crtc as needed.
69  *
70  * The hotplug interrupt storm detection and mitigation code keeps track of the
71  * number of interrupts per hotplug pin per a period of time, and if the number
72  * of interrupts exceeds a certain threshold, the interrupt is disabled for a
73  * while before being re-enabled. The intention is to mitigate issues raising
74  * from broken hardware triggering massive amounts of interrupts and grinding
75  * the system to a halt.
76  *
77  * Current implementation expects that hotplug interrupt storm will not be
78  * seen when display port sink is connected, hence on platforms whose DP
79  * callback is handled by i915_digport_work_func reenabling of hpd is not
80  * performed (it was never expected to be disabled in the first place ;) )
81  * this is specific to DP sinks handled by this routine and any other display
82  * such as HDMI or DVI enabled on the same port will have proper logic since
83  * it will use i915_hotplug_work_func where this logic is handled.
84  */
85 
86 /**
87  * intel_hpd_pin_default - return default pin associated with certain port.
88  * @port: the hpd port to get associated pin
89  *
90  * It is only valid and used by digital port encoder.
91  *
92  * Return pin that is associatade with @port.
93  */
94 enum hpd_pin intel_hpd_pin_default(enum port port)
95 {
96 	return HPD_PORT_A + port - PORT_A;
97 }
98 
99 /* Threshold == 5 for long IRQs, 50 for short */
100 #define HPD_STORM_DEFAULT_THRESHOLD	50
101 
102 #define HPD_STORM_DETECT_PERIOD		1000
103 #define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
104 #define HPD_RETRY_DELAY			1000
105 
106 static enum hpd_pin
107 intel_connector_hpd_pin(struct intel_connector *connector)
108 {
109 	struct intel_encoder *encoder = intel_attached_encoder(connector);
110 
111 	/*
112 	 * MST connectors get their encoder attached dynamically
113 	 * so need to make sure we have an encoder here. But since
114 	 * MST encoders have their hpd_pin set to HPD_NONE we don't
115 	 * have to special case them beyond that.
116 	 */
117 	return encoder ? encoder->hpd_pin : HPD_NONE;
118 }
119 
120 /**
121  * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
122  * @display: display device
123  * @pin: the pin to gather stats on
124  * @long_hpd: whether the HPD IRQ was long or short
125  *
126  * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
127  * storms. Only the pin specific stats and state are changed, the caller is
128  * responsible for further action.
129  *
130  * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
131  * stored in @display->hotplug.hpd_storm_threshold which defaults to
132  * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
133  * short IRQs count as +1. If this threshold is exceeded, it's considered an
134  * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
135  *
136  * By default, most systems will only count long IRQs towards
137  * &display->hotplug.hpd_storm_threshold. However, some older systems also
138  * suffer from short IRQ storms and must also track these. Because short IRQ
139  * storms are naturally caused by sideband interactions with DP MST devices,
140  * short IRQ detection is only enabled for systems without DP MST support.
141  * Systems which are new enough to support DP MST are far less likely to
142  * suffer from IRQ storms at all, so this is fine.
143  *
144  * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
145  * and should only be adjusted for automated hotplug testing.
146  *
147  * Return true if an IRQ storm was detected on @pin.
148  */
149 static bool intel_hpd_irq_storm_detect(struct intel_display *display,
150 				       enum hpd_pin pin, bool long_hpd)
151 {
152 	struct intel_hotplug *hpd = &display->hotplug;
153 	unsigned long start = hpd->stats[pin].last_jiffies;
154 	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
155 	const int increment = long_hpd ? 10 : 1;
156 	const int threshold = hpd->hpd_storm_threshold;
157 	bool storm = false;
158 
159 	if (!threshold ||
160 	    (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
161 		return false;
162 
163 	if (!time_in_range(jiffies, start, end)) {
164 		hpd->stats[pin].last_jiffies = jiffies;
165 		hpd->stats[pin].count = 0;
166 	}
167 
168 	hpd->stats[pin].count += increment;
169 	if (hpd->stats[pin].count > threshold) {
170 		hpd->stats[pin].state = HPD_MARK_DISABLED;
171 		drm_dbg_kms(display->drm,
172 			    "HPD interrupt storm detected on PIN %d\n", pin);
173 		storm = true;
174 	} else {
175 		drm_dbg_kms(display->drm,
176 			    "Received HPD interrupt on PIN %d - cnt: %d\n",
177 			      pin,
178 			      hpd->stats[pin].count);
179 	}
180 
181 	return storm;
182 }
183 
184 static bool detection_work_enabled(struct intel_display *display)
185 {
186 	lockdep_assert_held(&display->irq.lock);
187 
188 	return display->hotplug.detection_work_enabled;
189 }
190 
191 static bool
192 mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
193 {
194 	struct drm_i915_private *i915 = to_i915(display->drm);
195 
196 	lockdep_assert_held(&display->irq.lock);
197 
198 	if (!detection_work_enabled(display))
199 		return false;
200 
201 	return mod_delayed_work(i915->unordered_wq, work, delay);
202 }
203 
204 static bool
205 queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
206 {
207 	struct drm_i915_private *i915 = to_i915(display->drm);
208 
209 	lockdep_assert_held(&display->irq.lock);
210 
211 	if (!detection_work_enabled(display))
212 		return false;
213 
214 	return queue_delayed_work(i915->unordered_wq, work, delay);
215 }
216 
217 static bool
218 queue_detection_work(struct intel_display *display, struct work_struct *work)
219 {
220 	struct drm_i915_private *i915 = to_i915(display->drm);
221 
222 	lockdep_assert_held(&display->irq.lock);
223 
224 	if (!detection_work_enabled(display))
225 		return false;
226 
227 	return queue_work(i915->unordered_wq, work);
228 }
229 
230 static void
231 intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
232 {
233 	struct drm_connector_list_iter conn_iter;
234 	struct intel_connector *connector;
235 	bool hpd_disabled = false;
236 
237 	lockdep_assert_held(&display->irq.lock);
238 
239 	drm_connector_list_iter_begin(display->drm, &conn_iter);
240 	for_each_intel_connector_iter(connector, &conn_iter) {
241 		enum hpd_pin pin;
242 
243 		if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
244 			continue;
245 
246 		pin = intel_connector_hpd_pin(connector);
247 		if (pin == HPD_NONE ||
248 		    display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
249 			continue;
250 
251 		drm_info(display->drm,
252 			 "HPD interrupt storm detected on connector %s: "
253 			 "switching from hotplug detection to polling\n",
254 			 connector->base.name);
255 
256 		display->hotplug.stats[pin].state = HPD_DISABLED;
257 		connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
258 			DRM_CONNECTOR_POLL_DISCONNECT;
259 		hpd_disabled = true;
260 	}
261 	drm_connector_list_iter_end(&conn_iter);
262 
263 	/* Enable polling and queue hotplug re-enabling. */
264 	if (hpd_disabled) {
265 		drm_kms_helper_poll_reschedule(display->drm);
266 		mod_delayed_detection_work(display,
267 					   &display->hotplug.reenable_work,
268 					   msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
269 	}
270 }
271 
272 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
273 {
274 	struct intel_display *display =
275 		container_of(work, typeof(*display), hotplug.reenable_work.work);
276 	struct drm_connector_list_iter conn_iter;
277 	struct intel_connector *connector;
278 	struct ref_tracker *wakeref;
279 	enum hpd_pin pin;
280 
281 	wakeref = intel_display_rpm_get(display);
282 
283 	spin_lock_irq(&display->irq.lock);
284 
285 	drm_connector_list_iter_begin(display->drm, &conn_iter);
286 	for_each_intel_connector_iter(connector, &conn_iter) {
287 		pin = intel_connector_hpd_pin(connector);
288 		if (pin == HPD_NONE ||
289 		    display->hotplug.stats[pin].state != HPD_DISABLED)
290 			continue;
291 
292 		if (connector->base.polled != connector->polled)
293 			drm_dbg(display->drm,
294 				"Reenabling HPD on connector %s\n",
295 				connector->base.name);
296 		connector->base.polled = connector->polled;
297 	}
298 	drm_connector_list_iter_end(&conn_iter);
299 
300 	for_each_hpd_pin(pin) {
301 		if (display->hotplug.stats[pin].state == HPD_DISABLED)
302 			display->hotplug.stats[pin].state = HPD_ENABLED;
303 	}
304 
305 	intel_hpd_irq_setup(display);
306 
307 	spin_unlock_irq(&display->irq.lock);
308 
309 	intel_display_rpm_put(display, wakeref);
310 }
311 
312 static enum intel_hotplug_state
313 intel_hotplug_detect_connector(struct intel_connector *connector)
314 {
315 	struct drm_device *dev = connector->base.dev;
316 	enum drm_connector_status old_status;
317 	u64 old_epoch_counter;
318 	int status;
319 	bool ret = false;
320 
321 	drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
322 	old_status = connector->base.status;
323 	old_epoch_counter = connector->base.epoch_counter;
324 
325 	status = drm_helper_probe_detect(&connector->base, NULL, false);
326 	if (!connector->base.force)
327 		connector->base.status = status;
328 
329 	if (old_epoch_counter != connector->base.epoch_counter)
330 		ret = true;
331 
332 	if (ret) {
333 		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
334 			    connector->base.base.id,
335 			    connector->base.name,
336 			    drm_get_connector_status_name(old_status),
337 			    drm_get_connector_status_name(connector->base.status),
338 			    old_epoch_counter,
339 			    connector->base.epoch_counter);
340 		return INTEL_HOTPLUG_CHANGED;
341 	}
342 	return INTEL_HOTPLUG_UNCHANGED;
343 }
344 
345 enum intel_hotplug_state
346 intel_encoder_hotplug(struct intel_encoder *encoder,
347 		      struct intel_connector *connector)
348 {
349 	return intel_hotplug_detect_connector(connector);
350 }
351 
352 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
353 {
354 	return intel_encoder_is_dig_port(encoder) &&
355 		enc_to_dig_port(encoder)->hpd_pulse != NULL;
356 }
357 
358 static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
359 {
360 	struct intel_encoder *encoder;
361 
362 	for_each_intel_encoder(display->drm, encoder) {
363 		if (encoder->hpd_pin != pin)
364 			continue;
365 
366 		if (intel_encoder_has_hpd_pulse(encoder))
367 			return true;
368 	}
369 
370 	return false;
371 }
372 
373 static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
374 {
375 	lockdep_assert_held(&display->irq.lock);
376 
377 	return display->hotplug.stats[pin].blocked_count;
378 }
379 
380 static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
381 {
382 	enum hpd_pin pin;
383 	u32 hpd_pin_mask = 0;
384 
385 	for_each_hpd_pin(pin) {
386 		if (hpd_pin_is_blocked(display, pin))
387 			hpd_pin_mask |= BIT(pin);
388 	}
389 
390 	return hpd_pin_mask;
391 }
392 
393 static void i915_digport_work_func(struct work_struct *work)
394 {
395 	struct intel_display *display =
396 		container_of(work, struct intel_display, hotplug.dig_port_work);
397 	struct intel_hotplug *hotplug = &display->hotplug;
398 	u32 long_hpd_pin_mask, short_hpd_pin_mask;
399 	struct intel_encoder *encoder;
400 	u32 blocked_hpd_pin_mask;
401 	u32 old_bits = 0;
402 
403 	spin_lock_irq(&display->irq.lock);
404 
405 	blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
406 	long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
407 	hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
408 	short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
409 	hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
410 
411 	spin_unlock_irq(&display->irq.lock);
412 
413 	for_each_intel_encoder(display->drm, encoder) {
414 		struct intel_digital_port *dig_port;
415 		enum hpd_pin pin = encoder->hpd_pin;
416 		bool long_hpd, short_hpd;
417 		enum irqreturn ret;
418 
419 		if (!intel_encoder_has_hpd_pulse(encoder))
420 			continue;
421 
422 		long_hpd = long_hpd_pin_mask & BIT(pin);
423 		short_hpd = short_hpd_pin_mask & BIT(pin);
424 
425 		if (!long_hpd && !short_hpd)
426 			continue;
427 
428 		dig_port = enc_to_dig_port(encoder);
429 
430 		ret = dig_port->hpd_pulse(dig_port, long_hpd);
431 		if (ret == IRQ_NONE) {
432 			/* fall back to old school hpd */
433 			old_bits |= BIT(pin);
434 		}
435 	}
436 
437 	if (old_bits) {
438 		spin_lock_irq(&display->irq.lock);
439 		display->hotplug.event_bits |= old_bits;
440 		queue_delayed_detection_work(display,
441 					     &display->hotplug.hotplug_work, 0);
442 		spin_unlock_irq(&display->irq.lock);
443 	}
444 }
445 
446 /**
447  * intel_hpd_trigger_irq - trigger an hpd irq event for a port
448  * @dig_port: digital port
449  *
450  * Trigger an HPD interrupt event for the given port, emulating a short pulse
451  * generated by the sink, and schedule the dig port work to handle it.
452  */
453 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
454 {
455 	struct intel_display *display = to_intel_display(dig_port);
456 	struct intel_hotplug *hotplug = &display->hotplug;
457 	struct intel_encoder *encoder = &dig_port->base;
458 
459 	spin_lock_irq(&display->irq.lock);
460 
461 	hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
462 	if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
463 		queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
464 
465 	spin_unlock_irq(&display->irq.lock);
466 }
467 
468 /*
469  * Handle hotplug events outside the interrupt handler proper.
470  */
471 static void i915_hotplug_work_func(struct work_struct *work)
472 {
473 	struct intel_display *display =
474 		container_of(work, struct intel_display, hotplug.hotplug_work.work);
475 	struct intel_hotplug *hotplug = &display->hotplug;
476 	struct drm_connector_list_iter conn_iter;
477 	struct intel_connector *connector;
478 	u32 changed = 0, retry = 0;
479 	u32 hpd_event_bits;
480 	u32 hpd_retry_bits;
481 	struct drm_connector *first_changed_connector = NULL;
482 	int changed_connectors = 0;
483 	u32 blocked_hpd_pin_mask;
484 
485 	mutex_lock(&display->drm->mode_config.mutex);
486 	drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
487 
488 	spin_lock_irq(&display->irq.lock);
489 
490 	blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
491 	hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
492 	hotplug->event_bits &= ~hpd_event_bits;
493 	hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
494 	hotplug->retry_bits &= ~hpd_retry_bits;
495 
496 	/* Enable polling for connectors which had HPD IRQ storms */
497 	intel_hpd_irq_storm_switch_to_polling(display);
498 
499 	spin_unlock_irq(&display->irq.lock);
500 
501 	/* Skip calling encode hotplug handlers if ignore long HPD set*/
502 	if (display->hotplug.ignore_long_hpd) {
503 		drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
504 		mutex_unlock(&display->drm->mode_config.mutex);
505 		return;
506 	}
507 
508 	drm_connector_list_iter_begin(display->drm, &conn_iter);
509 	for_each_intel_connector_iter(connector, &conn_iter) {
510 		enum hpd_pin pin;
511 		u32 hpd_bit;
512 
513 		pin = intel_connector_hpd_pin(connector);
514 		if (pin == HPD_NONE)
515 			continue;
516 
517 		hpd_bit = BIT(pin);
518 		if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
519 			struct intel_encoder *encoder =
520 				intel_attached_encoder(connector);
521 
522 			if (hpd_event_bits & hpd_bit)
523 				connector->hotplug_retries = 0;
524 			else
525 				connector->hotplug_retries++;
526 
527 			drm_dbg_kms(display->drm,
528 				    "Connector %s (pin %i) received hotplug event. (retry %d)\n",
529 				    connector->base.name, pin,
530 				    connector->hotplug_retries);
531 
532 			switch (encoder->hotplug(encoder, connector)) {
533 			case INTEL_HOTPLUG_UNCHANGED:
534 				break;
535 			case INTEL_HOTPLUG_CHANGED:
536 				changed |= hpd_bit;
537 				changed_connectors++;
538 				if (!first_changed_connector) {
539 					drm_connector_get(&connector->base);
540 					first_changed_connector = &connector->base;
541 				}
542 				break;
543 			case INTEL_HOTPLUG_RETRY:
544 				retry |= hpd_bit;
545 				break;
546 			}
547 		}
548 	}
549 	drm_connector_list_iter_end(&conn_iter);
550 	mutex_unlock(&display->drm->mode_config.mutex);
551 
552 	if (changed_connectors == 1)
553 		drm_kms_helper_connector_hotplug_event(first_changed_connector);
554 	else if (changed_connectors > 0)
555 		drm_kms_helper_hotplug_event(display->drm);
556 
557 	if (first_changed_connector)
558 		drm_connector_put(first_changed_connector);
559 
560 	/* Remove shared HPD pins that have changed */
561 	retry &= ~changed;
562 	if (retry) {
563 		spin_lock_irq(&display->irq.lock);
564 		display->hotplug.retry_bits |= retry;
565 
566 		mod_delayed_detection_work(display,
567 					   &display->hotplug.hotplug_work,
568 					   msecs_to_jiffies(HPD_RETRY_DELAY));
569 		spin_unlock_irq(&display->irq.lock);
570 	}
571 }
572 
573 
574 /**
575  * intel_hpd_irq_handler - main hotplug irq handler
576  * @display: display device
577  * @pin_mask: a mask of hpd pins that have triggered the irq
578  * @long_mask: a mask of hpd pins that may be long hpd pulses
579  *
580  * This is the main hotplug irq handler for all platforms. The platform specific
581  * irq handlers call the platform specific hotplug irq handlers, which read and
582  * decode the appropriate registers into bitmasks about hpd pins that have
583  * triggered (@pin_mask), and which of those pins may be long pulses
584  * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
585  * is not a digital port.
586  *
587  * Here, we do hotplug irq storm detection and mitigation, and pass further
588  * processing to appropriate bottom halves.
589  */
590 void intel_hpd_irq_handler(struct intel_display *display,
591 			   u32 pin_mask, u32 long_mask)
592 {
593 	struct intel_encoder *encoder;
594 	bool storm_detected = false;
595 	bool queue_dig = false, queue_hp = false;
596 	u32 long_hpd_pulse_mask = 0;
597 	u32 short_hpd_pulse_mask = 0;
598 	enum hpd_pin pin;
599 
600 	if (!pin_mask)
601 		return;
602 
603 	spin_lock(&display->irq.lock);
604 
605 	/*
606 	 * Determine whether ->hpd_pulse() exists for each pin, and
607 	 * whether we have a short or a long pulse. This is needed
608 	 * as each pin may have up to two encoders (HDMI and DP) and
609 	 * only the one of them (DP) will have ->hpd_pulse().
610 	 */
611 	for_each_intel_encoder(display->drm, encoder) {
612 		bool long_hpd;
613 
614 		pin = encoder->hpd_pin;
615 		if (!(BIT(pin) & pin_mask))
616 			continue;
617 
618 		if (!intel_encoder_has_hpd_pulse(encoder))
619 			continue;
620 
621 		long_hpd = long_mask & BIT(pin);
622 
623 		drm_dbg(display->drm,
624 			"digital hpd on [ENCODER:%d:%s] - %s\n",
625 			encoder->base.base.id, encoder->base.name,
626 			long_hpd ? "long" : "short");
627 
628 		if (!hpd_pin_is_blocked(display, pin))
629 			queue_dig = true;
630 
631 		if (long_hpd) {
632 			long_hpd_pulse_mask |= BIT(pin);
633 			display->hotplug.long_hpd_pin_mask |= BIT(pin);
634 		} else {
635 			short_hpd_pulse_mask |= BIT(pin);
636 			display->hotplug.short_hpd_pin_mask |= BIT(pin);
637 		}
638 	}
639 
640 	/* Now process each pin just once */
641 	for_each_hpd_pin(pin) {
642 		bool long_hpd;
643 
644 		if (!(BIT(pin) & pin_mask))
645 			continue;
646 
647 		if (display->hotplug.stats[pin].state == HPD_DISABLED) {
648 			/*
649 			 * On GMCH platforms the interrupt mask bits only
650 			 * prevent irq generation, not the setting of the
651 			 * hotplug bits itself. So only WARN about unexpected
652 			 * interrupts on saner platforms.
653 			 */
654 			drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
655 				      "Received HPD interrupt on pin %d although disabled\n",
656 				      pin);
657 			continue;
658 		}
659 
660 		if (display->hotplug.stats[pin].state != HPD_ENABLED)
661 			continue;
662 
663 		/*
664 		 * Delegate to ->hpd_pulse() if one of the encoders for this
665 		 * pin has it, otherwise let the hotplug_work deal with this
666 		 * pin directly.
667 		 */
668 		if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
669 			long_hpd = long_hpd_pulse_mask & BIT(pin);
670 		} else {
671 			display->hotplug.event_bits |= BIT(pin);
672 			long_hpd = true;
673 
674 			if (!hpd_pin_is_blocked(display, pin))
675 				queue_hp = true;
676 		}
677 
678 		if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
679 			display->hotplug.event_bits &= ~BIT(pin);
680 			storm_detected = true;
681 			queue_hp = true;
682 		}
683 	}
684 
685 	/*
686 	 * Disable any IRQs that storms were detected on. Polling enablement
687 	 * happens later in our hotplug work.
688 	 */
689 	if (storm_detected)
690 		intel_hpd_irq_setup(display);
691 
692 	/*
693 	 * Our hotplug handler can grab modeset locks (by calling down into the
694 	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
695 	 * queue for otherwise the flush_work in the pageflip code will
696 	 * deadlock.
697 	 */
698 	if (queue_dig)
699 		queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
700 	if (queue_hp)
701 		queue_delayed_detection_work(display,
702 					     &display->hotplug.hotplug_work, 0);
703 
704 	spin_unlock(&display->irq.lock);
705 }
706 
707 /**
708  * intel_hpd_init - initializes and enables hpd support
709  * @display: display device instance
710  *
711  * This function enables the hotplug support. It requires that interrupts have
712  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
713  * poll request can run concurrently to other code, so locking rules must be
714  * obeyed.
715  *
716  * This is a separate step from interrupt enabling to simplify the locking rules
717  * in the driver load and resume code.
718  *
719  * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
720  */
721 void intel_hpd_init(struct intel_display *display)
722 {
723 	int i;
724 
725 	if (!HAS_DISPLAY(display))
726 		return;
727 
728 	for_each_hpd_pin(i) {
729 		display->hotplug.stats[i].count = 0;
730 		display->hotplug.stats[i].state = HPD_ENABLED;
731 	}
732 
733 	/*
734 	 * Interrupt setup is already guaranteed to be single-threaded, this is
735 	 * just to make the assert_spin_locked checks happy.
736 	 */
737 	spin_lock_irq(&display->irq.lock);
738 	intel_hpd_irq_setup(display);
739 	spin_unlock_irq(&display->irq.lock);
740 }
741 
742 static void i915_hpd_poll_detect_connectors(struct intel_display *display)
743 {
744 	struct drm_connector_list_iter conn_iter;
745 	struct intel_connector *connector;
746 	struct intel_connector *first_changed_connector = NULL;
747 	int changed = 0;
748 
749 	mutex_lock(&display->drm->mode_config.mutex);
750 
751 	if (!display->drm->mode_config.poll_enabled)
752 		goto out;
753 
754 	drm_connector_list_iter_begin(display->drm, &conn_iter);
755 	for_each_intel_connector_iter(connector, &conn_iter) {
756 		if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
757 			continue;
758 
759 		if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED)
760 			continue;
761 
762 		changed++;
763 
764 		if (changed == 1) {
765 			drm_connector_get(&connector->base);
766 			first_changed_connector = connector;
767 		}
768 	}
769 	drm_connector_list_iter_end(&conn_iter);
770 
771 out:
772 	mutex_unlock(&display->drm->mode_config.mutex);
773 
774 	if (!changed)
775 		return;
776 
777 	if (changed == 1)
778 		drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
779 	else
780 		drm_kms_helper_hotplug_event(display->drm);
781 
782 	drm_connector_put(&first_changed_connector->base);
783 }
784 
785 static void i915_hpd_poll_init_work(struct work_struct *work)
786 {
787 	struct intel_display *display =
788 		container_of(work, typeof(*display), hotplug.poll_init_work);
789 	struct drm_connector_list_iter conn_iter;
790 	struct intel_connector *connector;
791 	intel_wakeref_t wakeref;
792 	bool enabled;
793 
794 	mutex_lock(&display->drm->mode_config.mutex);
795 
796 	enabled = READ_ONCE(display->hotplug.poll_enabled);
797 	/*
798 	 * Prevent taking a power reference from this sequence of
799 	 * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
800 	 * connector detect which would requeue i915_hpd_poll_init_work()
801 	 * and so risk an endless loop of this same sequence.
802 	 */
803 	if (!enabled) {
804 		wakeref = intel_display_power_get(display,
805 						  POWER_DOMAIN_DISPLAY_CORE);
806 		drm_WARN_ON(display->drm,
807 			    READ_ONCE(display->hotplug.poll_enabled));
808 		cancel_work(&display->hotplug.poll_init_work);
809 	}
810 
811 	spin_lock_irq(&display->irq.lock);
812 
813 	drm_connector_list_iter_begin(display->drm, &conn_iter);
814 	for_each_intel_connector_iter(connector, &conn_iter) {
815 		enum hpd_pin pin;
816 
817 		pin = intel_connector_hpd_pin(connector);
818 		if (pin == HPD_NONE)
819 			continue;
820 
821 		if (display->hotplug.stats[pin].state == HPD_DISABLED)
822 			continue;
823 
824 		connector->base.polled = connector->polled;
825 
826 		if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
827 			connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
828 				DRM_CONNECTOR_POLL_DISCONNECT;
829 	}
830 	drm_connector_list_iter_end(&conn_iter);
831 
832 	spin_unlock_irq(&display->irq.lock);
833 
834 	if (enabled)
835 		drm_kms_helper_poll_reschedule(display->drm);
836 
837 	mutex_unlock(&display->drm->mode_config.mutex);
838 
839 	/*
840 	 * We might have missed any hotplugs that happened while we were
841 	 * in the middle of disabling polling
842 	 */
843 	if (!enabled) {
844 		i915_hpd_poll_detect_connectors(display);
845 
846 		intel_display_power_put(display,
847 					POWER_DOMAIN_DISPLAY_CORE,
848 					wakeref);
849 	}
850 }
851 
852 /**
853  * intel_hpd_poll_enable - enable polling for connectors with hpd
854  * @display: display device instance
855  *
856  * This function enables polling for all connectors which support HPD.
857  * Under certain conditions HPD may not be functional. On most Intel GPUs,
858  * this happens when we enter runtime suspend.
859  * On Valleyview and Cherryview systems, this also happens when we shut off all
860  * of the powerwells.
861  *
862  * Since this function can get called in contexts where we're already holding
863  * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
864  * worker.
865  *
866  * Also see: intel_hpd_init() and intel_hpd_poll_disable().
867  */
868 void intel_hpd_poll_enable(struct intel_display *display)
869 {
870 	if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
871 		return;
872 
873 	WRITE_ONCE(display->hotplug.poll_enabled, true);
874 
875 	/*
876 	 * We might already be holding dev->mode_config.mutex, so do this in a
877 	 * separate worker
878 	 * As well, there's no issue if we race here since we always reschedule
879 	 * this worker anyway
880 	 */
881 	spin_lock_irq(&display->irq.lock);
882 	queue_detection_work(display,
883 			     &display->hotplug.poll_init_work);
884 	spin_unlock_irq(&display->irq.lock);
885 }
886 
887 /**
888  * intel_hpd_poll_disable - disable polling for connectors with hpd
889  * @display: display device instance
890  *
891  * This function disables polling for all connectors which support HPD.
892  * Under certain conditions HPD may not be functional. On most Intel GPUs,
893  * this happens when we enter runtime suspend.
894  * On Valleyview and Cherryview systems, this also happens when we shut off all
895  * of the powerwells.
896  *
897  * Since this function can get called in contexts where we're already holding
898  * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
899  * worker.
900  *
901  * Also used during driver init to initialize connector->polled
902  * appropriately for all connectors.
903  *
904  * Also see: intel_hpd_init() and intel_hpd_poll_enable().
905  */
906 void intel_hpd_poll_disable(struct intel_display *display)
907 {
908 	if (!HAS_DISPLAY(display))
909 		return;
910 
911 	WRITE_ONCE(display->hotplug.poll_enabled, false);
912 
913 	spin_lock_irq(&display->irq.lock);
914 	queue_detection_work(display,
915 			     &display->hotplug.poll_init_work);
916 	spin_unlock_irq(&display->irq.lock);
917 }
918 
919 void intel_hpd_poll_fini(struct intel_display *display)
920 {
921 	struct intel_connector *connector;
922 	struct drm_connector_list_iter conn_iter;
923 
924 	/* Kill all the work that may have been queued by hpd. */
925 	drm_connector_list_iter_begin(display->drm, &conn_iter);
926 	for_each_intel_connector_iter(connector, &conn_iter) {
927 		intel_connector_cancel_modeset_retry_work(connector);
928 		intel_hdcp_cancel_works(connector);
929 	}
930 	drm_connector_list_iter_end(&conn_iter);
931 }
932 
933 void intel_hpd_init_early(struct intel_display *display)
934 {
935 	INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
936 			  i915_hotplug_work_func);
937 	INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
938 	INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
939 	INIT_DELAYED_WORK(&display->hotplug.reenable_work,
940 			  intel_hpd_irq_storm_reenable_work);
941 
942 	display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
943 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
944 	 * detection, as short HPD storms will occur as a natural part of
945 	 * sideband messaging with MST.
946 	 * On older platforms however, IRQ storms can occur with both long and
947 	 * short pulses, as seen on some G4x systems.
948 	 */
949 	display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
950 }
951 
952 static bool cancel_all_detection_work(struct intel_display *display)
953 {
954 	bool was_pending = false;
955 
956 	if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
957 		was_pending = true;
958 	if (cancel_work_sync(&display->hotplug.poll_init_work))
959 		was_pending = true;
960 	if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
961 		was_pending = true;
962 
963 	return was_pending;
964 }
965 
966 void intel_hpd_cancel_work(struct intel_display *display)
967 {
968 	if (!HAS_DISPLAY(display))
969 		return;
970 
971 	spin_lock_irq(&display->irq.lock);
972 
973 	drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
974 
975 	display->hotplug.long_hpd_pin_mask = 0;
976 	display->hotplug.short_hpd_pin_mask = 0;
977 	display->hotplug.event_bits = 0;
978 	display->hotplug.retry_bits = 0;
979 
980 	spin_unlock_irq(&display->irq.lock);
981 
982 	cancel_work_sync(&display->hotplug.dig_port_work);
983 
984 	/*
985 	 * All other work triggered by hotplug events should be canceled by
986 	 * now.
987 	 */
988 	if (cancel_all_detection_work(display))
989 		drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
990 }
991 
992 static void queue_work_for_missed_irqs(struct intel_display *display)
993 {
994 	struct intel_hotplug *hotplug = &display->hotplug;
995 	bool queue_hp_work = false;
996 	u32 blocked_hpd_pin_mask;
997 	enum hpd_pin pin;
998 
999 	lockdep_assert_held(&display->irq.lock);
1000 
1001 	blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
1002 	if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
1003 		queue_hp_work = true;
1004 
1005 	for_each_hpd_pin(pin) {
1006 		switch (display->hotplug.stats[pin].state) {
1007 		case HPD_MARK_DISABLED:
1008 			queue_hp_work = true;
1009 			break;
1010 		case HPD_DISABLED:
1011 		case HPD_ENABLED:
1012 			break;
1013 		default:
1014 			MISSING_CASE(display->hotplug.stats[pin].state);
1015 		}
1016 	}
1017 
1018 	if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
1019 		queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
1020 
1021 	if (queue_hp_work)
1022 		queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
1023 }
1024 
1025 static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
1026 {
1027 	struct intel_hotplug *hotplug = &display->hotplug;
1028 
1029 	lockdep_assert_held(&display->irq.lock);
1030 
1031 	hotplug->stats[pin].blocked_count++;
1032 
1033 	return hotplug->stats[pin].blocked_count == 1;
1034 }
1035 
1036 static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
1037 {
1038 	struct intel_hotplug *hotplug = &display->hotplug;
1039 
1040 	lockdep_assert_held(&display->irq.lock);
1041 
1042 	if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
1043 		return true;
1044 
1045 	hotplug->stats[pin].blocked_count--;
1046 
1047 	return hotplug->stats[pin].blocked_count == 0;
1048 }
1049 
1050 /**
1051  * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
1052  * @encoder: Encoder to block the HPD handling for
1053  *
1054  * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
1055  *
1056  * On return:
1057  *
1058  * - It's guaranteed that the blocked encoders' HPD pulse handler
1059  *   (via intel_digital_port::hpd_pulse()) is not running.
1060  * - The hotplug event handling (via intel_encoder::hotplug()) of an
1061  *   HPD IRQ pending at the time this function is called may be still
1062  *   running.
1063  * - Detection on the encoder's connector (via
1064  *   drm_connector_helper_funcs::detect_ctx(),
1065  *   drm_connector_funcs::detect()) remains allowed, for instance as part of
1066  *   userspace connector probing, or DRM core's connector polling.
1067  *
1068  * The call must be followed by calling intel_hpd_unblock(), or
1069  * intel_hpd_clear_and_unblock().
1070  *
1071  * Note that the handling of HPD IRQs for another encoder using the same HPD
1072  * pin as that of @encoder will be also blocked.
1073  */
1074 void intel_hpd_block(struct intel_encoder *encoder)
1075 {
1076 	struct intel_display *display = to_intel_display(encoder);
1077 	struct intel_hotplug *hotplug = &display->hotplug;
1078 	bool do_flush = false;
1079 
1080 	if (encoder->hpd_pin == HPD_NONE)
1081 		return;
1082 
1083 	spin_lock_irq(&display->irq.lock);
1084 
1085 	if (block_hpd_pin(display, encoder->hpd_pin))
1086 		do_flush = true;
1087 
1088 	spin_unlock_irq(&display->irq.lock);
1089 
1090 	if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
1091 		flush_work(&hotplug->dig_port_work);
1092 }
1093 
1094 /**
1095  * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
1096  * @encoder: Encoder to unblock the HPD handling for
1097  *
1098  * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
1099  * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
1100  * HPD pin while it was blocked will be handled for @encoder and for any
1101  * other encoder sharing the same HPD pin.
1102  */
1103 void intel_hpd_unblock(struct intel_encoder *encoder)
1104 {
1105 	struct intel_display *display = to_intel_display(encoder);
1106 
1107 	if (encoder->hpd_pin == HPD_NONE)
1108 		return;
1109 
1110 	spin_lock_irq(&display->irq.lock);
1111 
1112 	if (unblock_hpd_pin(display, encoder->hpd_pin))
1113 		queue_work_for_missed_irqs(display);
1114 
1115 	spin_unlock_irq(&display->irq.lock);
1116 }
1117 
1118 /**
1119  * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
1120  * @encoder: Encoder to unblock the HPD handling for
1121  *
1122  * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
1123  * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
1124  * HPD pin while it was blocked will be cleared, handling only new IRQs.
1125  */
1126 void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
1127 {
1128 	struct intel_display *display = to_intel_display(encoder);
1129 	struct intel_hotplug *hotplug = &display->hotplug;
1130 	enum hpd_pin pin = encoder->hpd_pin;
1131 
1132 	if (pin == HPD_NONE)
1133 		return;
1134 
1135 	spin_lock_irq(&display->irq.lock);
1136 
1137 	if (unblock_hpd_pin(display, pin)) {
1138 		hotplug->event_bits &= ~BIT(pin);
1139 		hotplug->retry_bits &= ~BIT(pin);
1140 		hotplug->short_hpd_pin_mask &= ~BIT(pin);
1141 		hotplug->long_hpd_pin_mask &= ~BIT(pin);
1142 	}
1143 
1144 	spin_unlock_irq(&display->irq.lock);
1145 }
1146 
1147 void intel_hpd_enable_detection_work(struct intel_display *display)
1148 {
1149 	spin_lock_irq(&display->irq.lock);
1150 	display->hotplug.detection_work_enabled = true;
1151 	queue_work_for_missed_irqs(display);
1152 	spin_unlock_irq(&display->irq.lock);
1153 }
1154 
1155 void intel_hpd_disable_detection_work(struct intel_display *display)
1156 {
1157 	spin_lock_irq(&display->irq.lock);
1158 	display->hotplug.detection_work_enabled = false;
1159 	spin_unlock_irq(&display->irq.lock);
1160 
1161 	cancel_all_detection_work(display);
1162 }
1163 
1164 bool intel_hpd_schedule_detection(struct intel_display *display)
1165 {
1166 	unsigned long flags;
1167 	bool ret;
1168 
1169 	spin_lock_irqsave(&display->irq.lock, flags);
1170 	ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
1171 	spin_unlock_irqrestore(&display->irq.lock, flags);
1172 
1173 	return ret;
1174 }
1175 
1176 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1177 {
1178 	struct intel_display *display = m->private;
1179 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1180 	struct intel_hotplug *hotplug = &display->hotplug;
1181 
1182 	/* Synchronize with everything first in case there's been an HPD
1183 	 * storm, but we haven't finished handling it in the kernel yet
1184 	 */
1185 	intel_synchronize_irq(dev_priv);
1186 	flush_work(&display->hotplug.dig_port_work);
1187 	flush_delayed_work(&display->hotplug.hotplug_work);
1188 
1189 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1190 	seq_printf(m, "Detected: %s\n",
1191 		   str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
1192 
1193 	return 0;
1194 }
1195 
1196 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1197 					const char __user *ubuf, size_t len,
1198 					loff_t *offp)
1199 {
1200 	struct seq_file *m = file->private_data;
1201 	struct intel_display *display = m->private;
1202 	struct intel_hotplug *hotplug = &display->hotplug;
1203 	unsigned int new_threshold;
1204 	int i;
1205 	char *newline;
1206 	char tmp[16];
1207 
1208 	if (len >= sizeof(tmp))
1209 		return -EINVAL;
1210 
1211 	if (copy_from_user(tmp, ubuf, len))
1212 		return -EFAULT;
1213 
1214 	tmp[len] = '\0';
1215 
1216 	/* Strip newline, if any */
1217 	newline = strchr(tmp, '\n');
1218 	if (newline)
1219 		*newline = '\0';
1220 
1221 	if (strcmp(tmp, "reset") == 0)
1222 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1223 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1224 		return -EINVAL;
1225 
1226 	if (new_threshold > 0)
1227 		drm_dbg_kms(display->drm,
1228 			    "Setting HPD storm detection threshold to %d\n",
1229 			    new_threshold);
1230 	else
1231 		drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
1232 
1233 	spin_lock_irq(&display->irq.lock);
1234 	hotplug->hpd_storm_threshold = new_threshold;
1235 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1236 	for_each_hpd_pin(i)
1237 		hotplug->stats[i].count = 0;
1238 	spin_unlock_irq(&display->irq.lock);
1239 
1240 	/* Re-enable hpd immediately if we were in an irq storm */
1241 	flush_delayed_work(&display->hotplug.reenable_work);
1242 
1243 	return len;
1244 }
1245 
1246 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1247 {
1248 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1249 }
1250 
1251 static const struct file_operations i915_hpd_storm_ctl_fops = {
1252 	.owner = THIS_MODULE,
1253 	.open = i915_hpd_storm_ctl_open,
1254 	.read = seq_read,
1255 	.llseek = seq_lseek,
1256 	.release = single_release,
1257 	.write = i915_hpd_storm_ctl_write
1258 };
1259 
1260 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1261 {
1262 	struct intel_display *display = m->private;
1263 
1264 	seq_printf(m, "Enabled: %s\n",
1265 		   str_yes_no(display->hotplug.hpd_short_storm_enabled));
1266 
1267 	return 0;
1268 }
1269 
1270 static int
1271 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1272 {
1273 	return single_open(file, i915_hpd_short_storm_ctl_show,
1274 			   inode->i_private);
1275 }
1276 
1277 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1278 					      const char __user *ubuf,
1279 					      size_t len, loff_t *offp)
1280 {
1281 	struct seq_file *m = file->private_data;
1282 	struct intel_display *display = m->private;
1283 	struct intel_hotplug *hotplug = &display->hotplug;
1284 	char *newline;
1285 	char tmp[16];
1286 	int i;
1287 	bool new_state;
1288 
1289 	if (len >= sizeof(tmp))
1290 		return -EINVAL;
1291 
1292 	if (copy_from_user(tmp, ubuf, len))
1293 		return -EFAULT;
1294 
1295 	tmp[len] = '\0';
1296 
1297 	/* Strip newline, if any */
1298 	newline = strchr(tmp, '\n');
1299 	if (newline)
1300 		*newline = '\0';
1301 
1302 	/* Reset to the "default" state for this system */
1303 	if (strcmp(tmp, "reset") == 0)
1304 		new_state = !HAS_DP_MST(display);
1305 	else if (kstrtobool(tmp, &new_state) != 0)
1306 		return -EINVAL;
1307 
1308 	drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
1309 		    new_state ? "En" : "Dis");
1310 
1311 	spin_lock_irq(&display->irq.lock);
1312 	hotplug->hpd_short_storm_enabled = new_state;
1313 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1314 	for_each_hpd_pin(i)
1315 		hotplug->stats[i].count = 0;
1316 	spin_unlock_irq(&display->irq.lock);
1317 
1318 	/* Re-enable hpd immediately if we were in an irq storm */
1319 	flush_delayed_work(&display->hotplug.reenable_work);
1320 
1321 	return len;
1322 }
1323 
1324 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1325 	.owner = THIS_MODULE,
1326 	.open = i915_hpd_short_storm_ctl_open,
1327 	.read = seq_read,
1328 	.llseek = seq_lseek,
1329 	.release = single_release,
1330 	.write = i915_hpd_short_storm_ctl_write,
1331 };
1332 
1333 void intel_hpd_debugfs_register(struct intel_display *display)
1334 {
1335 	struct drm_minor *minor = display->drm->primary;
1336 
1337 	debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
1338 			    display, &i915_hpd_storm_ctl_fops);
1339 	debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
1340 			    display, &i915_hpd_short_storm_ctl_fops);
1341 	debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
1342 			    &display->hotplug.ignore_long_hpd);
1343 }
1344