xref: /linux/drivers/gpu/drm/i915/display/intel_hotplug.c (revision f86ad0ed620cb3c91ec7d5468e93ac68d727539d)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/kernel.h>
26 
27 #include <drm/drm_probe_helper.h>
28 
29 #include "i915_drv.h"
30 #include "i915_irq.h"
31 #include "intel_connector.h"
32 #include "intel_display_power.h"
33 #include "intel_display_core.h"
34 #include "intel_display_rpm.h"
35 #include "intel_display_types.h"
36 #include "intel_dp.h"
37 #include "intel_hdcp.h"
38 #include "intel_hotplug.h"
39 #include "intel_hotplug_irq.h"
40 
41 /**
42  * DOC: Hotplug
43  *
44  * Simply put, hotplug occurs when a display is connected to or disconnected
45  * from the system. However, there may be adapters and docking stations and
46  * Display Port short pulses and MST devices involved, complicating matters.
47  *
48  * Hotplug in i915 is handled in many different levels of abstraction.
49  *
50  * The platform dependent interrupt handling code in i915_irq.c enables,
51  * disables, and does preliminary handling of the interrupts. The interrupt
52  * handlers gather the hotplug detect (HPD) information from relevant registers
53  * into a platform independent mask of hotplug pins that have fired.
54  *
55  * The platform independent interrupt handler intel_hpd_irq_handler() in
56  * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
57  * further processing to appropriate bottom halves (Display Port specific and
58  * regular hotplug).
59  *
60  * The Display Port work function i915_digport_work_func() calls into
61  * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
62  * pulses, with failures and non-MST long pulses triggering regular hotplug
63  * processing on the connector.
64  *
65  * The regular hotplug work function i915_hotplug_work_func() calls connector
66  * detect hooks, and, if connector status changes, triggers sending of hotplug
67  * uevent to userspace via drm_kms_helper_hotplug_event().
68  *
69  * Finally, the userspace is responsible for triggering a modeset upon receiving
70  * the hotplug uevent, disabling or enabling the crtc as needed.
71  *
72  * The hotplug interrupt storm detection and mitigation code keeps track of the
73  * number of interrupts per hotplug pin per a period of time, and if the number
74  * of interrupts exceeds a certain threshold, the interrupt is disabled for a
75  * while before being re-enabled. The intention is to mitigate issues raising
76  * from broken hardware triggering massive amounts of interrupts and grinding
77  * the system to a halt.
78  *
79  * Current implementation expects that hotplug interrupt storm will not be
80  * seen when display port sink is connected, hence on platforms whose DP
81  * callback is handled by i915_digport_work_func reenabling of hpd is not
82  * performed (it was never expected to be disabled in the first place ;) )
83  * this is specific to DP sinks handled by this routine and any other display
84  * such as HDMI or DVI enabled on the same port will have proper logic since
85  * it will use i915_hotplug_work_func where this logic is handled.
86  */
87 
88 /**
89  * intel_hpd_pin_default - return default pin associated with certain port.
90  * @port: the hpd port to get associated pin
91  *
92  * It is only valid and used by digital port encoder.
93  *
94  * Return pin that is associatade with @port.
95  */
96 enum hpd_pin intel_hpd_pin_default(enum port port)
97 {
98 	return HPD_PORT_A + port - PORT_A;
99 }
100 
101 /* Threshold == 5 for long IRQs, 50 for short */
102 #define HPD_STORM_DEFAULT_THRESHOLD	50
103 
104 #define HPD_STORM_DETECT_PERIOD		1000
105 #define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
106 #define HPD_RETRY_DELAY			1000
107 
108 static enum hpd_pin
109 intel_connector_hpd_pin(struct intel_connector *connector)
110 {
111 	struct intel_encoder *encoder = intel_attached_encoder(connector);
112 
113 	/*
114 	 * MST connectors get their encoder attached dynamically
115 	 * so need to make sure we have an encoder here. But since
116 	 * MST encoders have their hpd_pin set to HPD_NONE we don't
117 	 * have to special case them beyond that.
118 	 */
119 	return encoder ? encoder->hpd_pin : HPD_NONE;
120 }
121 
122 /**
123  * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
124  * @display: display device
125  * @pin: the pin to gather stats on
126  * @long_hpd: whether the HPD IRQ was long or short
127  *
128  * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
129  * storms. Only the pin specific stats and state are changed, the caller is
130  * responsible for further action.
131  *
132  * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
133  * stored in @display->hotplug.hpd_storm_threshold which defaults to
134  * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
135  * short IRQs count as +1. If this threshold is exceeded, it's considered an
136  * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
137  *
138  * By default, most systems will only count long IRQs towards
139  * &display->hotplug.hpd_storm_threshold. However, some older systems also
140  * suffer from short IRQ storms and must also track these. Because short IRQ
141  * storms are naturally caused by sideband interactions with DP MST devices,
142  * short IRQ detection is only enabled for systems without DP MST support.
143  * Systems which are new enough to support DP MST are far less likely to
144  * suffer from IRQ storms at all, so this is fine.
145  *
146  * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
147  * and should only be adjusted for automated hotplug testing.
148  *
149  * Return true if an IRQ storm was detected on @pin.
150  */
151 static bool intel_hpd_irq_storm_detect(struct intel_display *display,
152 				       enum hpd_pin pin, bool long_hpd)
153 {
154 	struct intel_hotplug *hpd = &display->hotplug;
155 	unsigned long start = hpd->stats[pin].last_jiffies;
156 	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
157 	const int increment = long_hpd ? 10 : 1;
158 	const int threshold = hpd->hpd_storm_threshold;
159 	bool storm = false;
160 
161 	if (!threshold ||
162 	    (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
163 		return false;
164 
165 	if (!time_in_range(jiffies, start, end)) {
166 		hpd->stats[pin].last_jiffies = jiffies;
167 		hpd->stats[pin].count = 0;
168 	}
169 
170 	hpd->stats[pin].count += increment;
171 	if (hpd->stats[pin].count > threshold) {
172 		hpd->stats[pin].state = HPD_MARK_DISABLED;
173 		drm_dbg_kms(display->drm,
174 			    "HPD interrupt storm detected on PIN %d\n", pin);
175 		storm = true;
176 	} else {
177 		drm_dbg_kms(display->drm,
178 			    "Received HPD interrupt on PIN %d - cnt: %d\n",
179 			      pin,
180 			      hpd->stats[pin].count);
181 	}
182 
183 	return storm;
184 }
185 
186 static bool detection_work_enabled(struct intel_display *display)
187 {
188 	lockdep_assert_held(&display->irq.lock);
189 
190 	return display->hotplug.detection_work_enabled;
191 }
192 
193 static bool
194 mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
195 {
196 	struct drm_i915_private *i915 = to_i915(display->drm);
197 
198 	lockdep_assert_held(&display->irq.lock);
199 
200 	if (!detection_work_enabled(display))
201 		return false;
202 
203 	return mod_delayed_work(i915->unordered_wq, work, delay);
204 }
205 
206 static bool
207 queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
208 {
209 	struct drm_i915_private *i915 = to_i915(display->drm);
210 
211 	lockdep_assert_held(&display->irq.lock);
212 
213 	if (!detection_work_enabled(display))
214 		return false;
215 
216 	return queue_delayed_work(i915->unordered_wq, work, delay);
217 }
218 
219 static bool
220 queue_detection_work(struct intel_display *display, struct work_struct *work)
221 {
222 	struct drm_i915_private *i915 = to_i915(display->drm);
223 
224 	lockdep_assert_held(&display->irq.lock);
225 
226 	if (!detection_work_enabled(display))
227 		return false;
228 
229 	return queue_work(i915->unordered_wq, work);
230 }
231 
232 static void
233 intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
234 {
235 	struct drm_connector_list_iter conn_iter;
236 	struct intel_connector *connector;
237 	bool hpd_disabled = false;
238 
239 	lockdep_assert_held(&display->irq.lock);
240 
241 	drm_connector_list_iter_begin(display->drm, &conn_iter);
242 	for_each_intel_connector_iter(connector, &conn_iter) {
243 		enum hpd_pin pin;
244 
245 		if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
246 			continue;
247 
248 		pin = intel_connector_hpd_pin(connector);
249 		if (pin == HPD_NONE ||
250 		    display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
251 			continue;
252 
253 		drm_info(display->drm,
254 			 "HPD interrupt storm detected on connector %s: "
255 			 "switching from hotplug detection to polling\n",
256 			 connector->base.name);
257 
258 		display->hotplug.stats[pin].state = HPD_DISABLED;
259 		connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
260 			DRM_CONNECTOR_POLL_DISCONNECT;
261 		hpd_disabled = true;
262 	}
263 	drm_connector_list_iter_end(&conn_iter);
264 
265 	/* Enable polling and queue hotplug re-enabling. */
266 	if (hpd_disabled) {
267 		drm_kms_helper_poll_reschedule(display->drm);
268 		mod_delayed_detection_work(display,
269 					   &display->hotplug.reenable_work,
270 					   msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
271 	}
272 }
273 
274 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
275 {
276 	struct intel_display *display =
277 		container_of(work, typeof(*display), hotplug.reenable_work.work);
278 	struct drm_connector_list_iter conn_iter;
279 	struct intel_connector *connector;
280 	struct ref_tracker *wakeref;
281 	enum hpd_pin pin;
282 
283 	wakeref = intel_display_rpm_get(display);
284 
285 	spin_lock_irq(&display->irq.lock);
286 
287 	drm_connector_list_iter_begin(display->drm, &conn_iter);
288 	for_each_intel_connector_iter(connector, &conn_iter) {
289 		pin = intel_connector_hpd_pin(connector);
290 		if (pin == HPD_NONE ||
291 		    display->hotplug.stats[pin].state != HPD_DISABLED)
292 			continue;
293 
294 		if (connector->base.polled != connector->polled)
295 			drm_dbg(display->drm,
296 				"Reenabling HPD on connector %s\n",
297 				connector->base.name);
298 		connector->base.polled = connector->polled;
299 	}
300 	drm_connector_list_iter_end(&conn_iter);
301 
302 	for_each_hpd_pin(pin) {
303 		if (display->hotplug.stats[pin].state == HPD_DISABLED)
304 			display->hotplug.stats[pin].state = HPD_ENABLED;
305 	}
306 
307 	intel_hpd_irq_setup(display);
308 
309 	spin_unlock_irq(&display->irq.lock);
310 
311 	intel_display_rpm_put(display, wakeref);
312 }
313 
314 static enum intel_hotplug_state
315 intel_hotplug_detect_connector(struct intel_connector *connector)
316 {
317 	struct drm_device *dev = connector->base.dev;
318 	enum drm_connector_status old_status;
319 	u64 old_epoch_counter;
320 	int status;
321 	bool ret = false;
322 
323 	drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
324 	old_status = connector->base.status;
325 	old_epoch_counter = connector->base.epoch_counter;
326 
327 	status = drm_helper_probe_detect(&connector->base, NULL, false);
328 	if (!connector->base.force)
329 		connector->base.status = status;
330 
331 	if (old_epoch_counter != connector->base.epoch_counter)
332 		ret = true;
333 
334 	if (ret) {
335 		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
336 			    connector->base.base.id,
337 			    connector->base.name,
338 			    drm_get_connector_status_name(old_status),
339 			    drm_get_connector_status_name(connector->base.status),
340 			    old_epoch_counter,
341 			    connector->base.epoch_counter);
342 		return INTEL_HOTPLUG_CHANGED;
343 	}
344 	return INTEL_HOTPLUG_UNCHANGED;
345 }
346 
347 enum intel_hotplug_state
348 intel_encoder_hotplug(struct intel_encoder *encoder,
349 		      struct intel_connector *connector)
350 {
351 	return intel_hotplug_detect_connector(connector);
352 }
353 
354 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
355 {
356 	return intel_encoder_is_dig_port(encoder) &&
357 		enc_to_dig_port(encoder)->hpd_pulse != NULL;
358 }
359 
360 static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
361 {
362 	struct intel_encoder *encoder;
363 
364 	for_each_intel_encoder(display->drm, encoder) {
365 		if (encoder->hpd_pin != pin)
366 			continue;
367 
368 		if (intel_encoder_has_hpd_pulse(encoder))
369 			return true;
370 	}
371 
372 	return false;
373 }
374 
375 static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
376 {
377 	lockdep_assert_held(&display->irq.lock);
378 
379 	return display->hotplug.stats[pin].blocked_count;
380 }
381 
382 static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
383 {
384 	enum hpd_pin pin;
385 	u32 hpd_pin_mask = 0;
386 
387 	for_each_hpd_pin(pin) {
388 		if (hpd_pin_is_blocked(display, pin))
389 			hpd_pin_mask |= BIT(pin);
390 	}
391 
392 	return hpd_pin_mask;
393 }
394 
395 static void i915_digport_work_func(struct work_struct *work)
396 {
397 	struct intel_display *display =
398 		container_of(work, struct intel_display, hotplug.dig_port_work);
399 	struct intel_hotplug *hotplug = &display->hotplug;
400 	u32 long_hpd_pin_mask, short_hpd_pin_mask;
401 	struct intel_encoder *encoder;
402 	u32 blocked_hpd_pin_mask;
403 	u32 old_bits = 0;
404 
405 	spin_lock_irq(&display->irq.lock);
406 
407 	blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
408 	long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
409 	hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
410 	short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
411 	hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
412 
413 	spin_unlock_irq(&display->irq.lock);
414 
415 	for_each_intel_encoder(display->drm, encoder) {
416 		struct intel_digital_port *dig_port;
417 		enum hpd_pin pin = encoder->hpd_pin;
418 		bool long_hpd, short_hpd;
419 		enum irqreturn ret;
420 
421 		if (!intel_encoder_has_hpd_pulse(encoder))
422 			continue;
423 
424 		long_hpd = long_hpd_pin_mask & BIT(pin);
425 		short_hpd = short_hpd_pin_mask & BIT(pin);
426 
427 		if (!long_hpd && !short_hpd)
428 			continue;
429 
430 		dig_port = enc_to_dig_port(encoder);
431 
432 		ret = dig_port->hpd_pulse(dig_port, long_hpd);
433 		if (ret == IRQ_NONE) {
434 			/* fall back to old school hpd */
435 			old_bits |= BIT(pin);
436 		}
437 	}
438 
439 	if (old_bits) {
440 		spin_lock_irq(&display->irq.lock);
441 		display->hotplug.event_bits |= old_bits;
442 		queue_delayed_detection_work(display,
443 					     &display->hotplug.hotplug_work, 0);
444 		spin_unlock_irq(&display->irq.lock);
445 	}
446 }
447 
448 /**
449  * intel_hpd_trigger_irq - trigger an hpd irq event for a port
450  * @dig_port: digital port
451  *
452  * Trigger an HPD interrupt event for the given port, emulating a short pulse
453  * generated by the sink, and schedule the dig port work to handle it.
454  */
455 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
456 {
457 	struct intel_display *display = to_intel_display(dig_port);
458 	struct intel_hotplug *hotplug = &display->hotplug;
459 	struct intel_encoder *encoder = &dig_port->base;
460 
461 	spin_lock_irq(&display->irq.lock);
462 
463 	hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
464 	if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
465 		queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
466 
467 	spin_unlock_irq(&display->irq.lock);
468 }
469 
470 /*
471  * Handle hotplug events outside the interrupt handler proper.
472  */
473 static void i915_hotplug_work_func(struct work_struct *work)
474 {
475 	struct intel_display *display =
476 		container_of(work, struct intel_display, hotplug.hotplug_work.work);
477 	struct intel_hotplug *hotplug = &display->hotplug;
478 	struct drm_connector_list_iter conn_iter;
479 	struct intel_connector *connector;
480 	u32 changed = 0, retry = 0;
481 	u32 hpd_event_bits;
482 	u32 hpd_retry_bits;
483 	struct drm_connector *first_changed_connector = NULL;
484 	int changed_connectors = 0;
485 	u32 blocked_hpd_pin_mask;
486 
487 	mutex_lock(&display->drm->mode_config.mutex);
488 	drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
489 
490 	spin_lock_irq(&display->irq.lock);
491 
492 	blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
493 	hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
494 	hotplug->event_bits &= ~hpd_event_bits;
495 	hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
496 	hotplug->retry_bits &= ~hpd_retry_bits;
497 
498 	/* Enable polling for connectors which had HPD IRQ storms */
499 	intel_hpd_irq_storm_switch_to_polling(display);
500 
501 	spin_unlock_irq(&display->irq.lock);
502 
503 	/* Skip calling encode hotplug handlers if ignore long HPD set*/
504 	if (display->hotplug.ignore_long_hpd) {
505 		drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
506 		mutex_unlock(&display->drm->mode_config.mutex);
507 		return;
508 	}
509 
510 	drm_connector_list_iter_begin(display->drm, &conn_iter);
511 	for_each_intel_connector_iter(connector, &conn_iter) {
512 		enum hpd_pin pin;
513 		u32 hpd_bit;
514 
515 		pin = intel_connector_hpd_pin(connector);
516 		if (pin == HPD_NONE)
517 			continue;
518 
519 		hpd_bit = BIT(pin);
520 		if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
521 			struct intel_encoder *encoder =
522 				intel_attached_encoder(connector);
523 
524 			if (hpd_event_bits & hpd_bit)
525 				connector->hotplug_retries = 0;
526 			else
527 				connector->hotplug_retries++;
528 
529 			drm_dbg_kms(display->drm,
530 				    "Connector %s (pin %i) received hotplug event. (retry %d)\n",
531 				    connector->base.name, pin,
532 				    connector->hotplug_retries);
533 
534 			switch (encoder->hotplug(encoder, connector)) {
535 			case INTEL_HOTPLUG_UNCHANGED:
536 				break;
537 			case INTEL_HOTPLUG_CHANGED:
538 				changed |= hpd_bit;
539 				changed_connectors++;
540 				if (!first_changed_connector) {
541 					drm_connector_get(&connector->base);
542 					first_changed_connector = &connector->base;
543 				}
544 				break;
545 			case INTEL_HOTPLUG_RETRY:
546 				retry |= hpd_bit;
547 				break;
548 			}
549 		}
550 	}
551 	drm_connector_list_iter_end(&conn_iter);
552 	mutex_unlock(&display->drm->mode_config.mutex);
553 
554 	if (changed_connectors == 1)
555 		drm_kms_helper_connector_hotplug_event(first_changed_connector);
556 	else if (changed_connectors > 0)
557 		drm_kms_helper_hotplug_event(display->drm);
558 
559 	if (first_changed_connector)
560 		drm_connector_put(first_changed_connector);
561 
562 	/* Remove shared HPD pins that have changed */
563 	retry &= ~changed;
564 	if (retry) {
565 		spin_lock_irq(&display->irq.lock);
566 		display->hotplug.retry_bits |= retry;
567 
568 		mod_delayed_detection_work(display,
569 					   &display->hotplug.hotplug_work,
570 					   msecs_to_jiffies(HPD_RETRY_DELAY));
571 		spin_unlock_irq(&display->irq.lock);
572 	}
573 }
574 
575 
576 /**
577  * intel_hpd_irq_handler - main hotplug irq handler
578  * @display: display device
579  * @pin_mask: a mask of hpd pins that have triggered the irq
580  * @long_mask: a mask of hpd pins that may be long hpd pulses
581  *
582  * This is the main hotplug irq handler for all platforms. The platform specific
583  * irq handlers call the platform specific hotplug irq handlers, which read and
584  * decode the appropriate registers into bitmasks about hpd pins that have
585  * triggered (@pin_mask), and which of those pins may be long pulses
586  * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
587  * is not a digital port.
588  *
589  * Here, we do hotplug irq storm detection and mitigation, and pass further
590  * processing to appropriate bottom halves.
591  */
592 void intel_hpd_irq_handler(struct intel_display *display,
593 			   u32 pin_mask, u32 long_mask)
594 {
595 	struct intel_encoder *encoder;
596 	bool storm_detected = false;
597 	bool queue_dig = false, queue_hp = false;
598 	u32 long_hpd_pulse_mask = 0;
599 	u32 short_hpd_pulse_mask = 0;
600 	enum hpd_pin pin;
601 
602 	if (!pin_mask)
603 		return;
604 
605 	spin_lock(&display->irq.lock);
606 
607 	/*
608 	 * Determine whether ->hpd_pulse() exists for each pin, and
609 	 * whether we have a short or a long pulse. This is needed
610 	 * as each pin may have up to two encoders (HDMI and DP) and
611 	 * only the one of them (DP) will have ->hpd_pulse().
612 	 */
613 	for_each_intel_encoder(display->drm, encoder) {
614 		bool long_hpd;
615 
616 		pin = encoder->hpd_pin;
617 		if (!(BIT(pin) & pin_mask))
618 			continue;
619 
620 		if (!intel_encoder_has_hpd_pulse(encoder))
621 			continue;
622 
623 		long_hpd = long_mask & BIT(pin);
624 
625 		drm_dbg(display->drm,
626 			"digital hpd on [ENCODER:%d:%s] - %s\n",
627 			encoder->base.base.id, encoder->base.name,
628 			long_hpd ? "long" : "short");
629 
630 		if (!hpd_pin_is_blocked(display, pin))
631 			queue_dig = true;
632 
633 		if (long_hpd) {
634 			long_hpd_pulse_mask |= BIT(pin);
635 			display->hotplug.long_hpd_pin_mask |= BIT(pin);
636 		} else {
637 			short_hpd_pulse_mask |= BIT(pin);
638 			display->hotplug.short_hpd_pin_mask |= BIT(pin);
639 		}
640 	}
641 
642 	/* Now process each pin just once */
643 	for_each_hpd_pin(pin) {
644 		bool long_hpd;
645 
646 		if (!(BIT(pin) & pin_mask))
647 			continue;
648 
649 		if (display->hotplug.stats[pin].state == HPD_DISABLED) {
650 			/*
651 			 * On GMCH platforms the interrupt mask bits only
652 			 * prevent irq generation, not the setting of the
653 			 * hotplug bits itself. So only WARN about unexpected
654 			 * interrupts on saner platforms.
655 			 */
656 			drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
657 				      "Received HPD interrupt on pin %d although disabled\n",
658 				      pin);
659 			continue;
660 		}
661 
662 		if (display->hotplug.stats[pin].state != HPD_ENABLED)
663 			continue;
664 
665 		/*
666 		 * Delegate to ->hpd_pulse() if one of the encoders for this
667 		 * pin has it, otherwise let the hotplug_work deal with this
668 		 * pin directly.
669 		 */
670 		if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
671 			long_hpd = long_hpd_pulse_mask & BIT(pin);
672 		} else {
673 			display->hotplug.event_bits |= BIT(pin);
674 			long_hpd = true;
675 
676 			if (!hpd_pin_is_blocked(display, pin))
677 				queue_hp = true;
678 		}
679 
680 		if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
681 			display->hotplug.event_bits &= ~BIT(pin);
682 			storm_detected = true;
683 			queue_hp = true;
684 		}
685 	}
686 
687 	/*
688 	 * Disable any IRQs that storms were detected on. Polling enablement
689 	 * happens later in our hotplug work.
690 	 */
691 	if (storm_detected)
692 		intel_hpd_irq_setup(display);
693 
694 	/*
695 	 * Our hotplug handler can grab modeset locks (by calling down into the
696 	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
697 	 * queue for otherwise the flush_work in the pageflip code will
698 	 * deadlock.
699 	 */
700 	if (queue_dig)
701 		queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
702 	if (queue_hp)
703 		queue_delayed_detection_work(display,
704 					     &display->hotplug.hotplug_work, 0);
705 
706 	spin_unlock(&display->irq.lock);
707 }
708 
709 /**
710  * intel_hpd_init - initializes and enables hpd support
711  * @display: display device instance
712  *
713  * This function enables the hotplug support. It requires that interrupts have
714  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
715  * poll request can run concurrently to other code, so locking rules must be
716  * obeyed.
717  *
718  * This is a separate step from interrupt enabling to simplify the locking rules
719  * in the driver load and resume code.
720  *
721  * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
722  */
723 void intel_hpd_init(struct intel_display *display)
724 {
725 	int i;
726 
727 	if (!HAS_DISPLAY(display))
728 		return;
729 
730 	for_each_hpd_pin(i) {
731 		display->hotplug.stats[i].count = 0;
732 		display->hotplug.stats[i].state = HPD_ENABLED;
733 	}
734 
735 	/*
736 	 * Interrupt setup is already guaranteed to be single-threaded, this is
737 	 * just to make the assert_spin_locked checks happy.
738 	 */
739 	spin_lock_irq(&display->irq.lock);
740 	intel_hpd_irq_setup(display);
741 	spin_unlock_irq(&display->irq.lock);
742 }
743 
744 static void i915_hpd_poll_detect_connectors(struct intel_display *display)
745 {
746 	struct drm_connector_list_iter conn_iter;
747 	struct intel_connector *connector;
748 	struct intel_connector *first_changed_connector = NULL;
749 	int changed = 0;
750 
751 	mutex_lock(&display->drm->mode_config.mutex);
752 
753 	if (!display->drm->mode_config.poll_enabled)
754 		goto out;
755 
756 	drm_connector_list_iter_begin(display->drm, &conn_iter);
757 	for_each_intel_connector_iter(connector, &conn_iter) {
758 		if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
759 			continue;
760 
761 		if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED)
762 			continue;
763 
764 		changed++;
765 
766 		if (changed == 1) {
767 			drm_connector_get(&connector->base);
768 			first_changed_connector = connector;
769 		}
770 	}
771 	drm_connector_list_iter_end(&conn_iter);
772 
773 out:
774 	mutex_unlock(&display->drm->mode_config.mutex);
775 
776 	if (!changed)
777 		return;
778 
779 	if (changed == 1)
780 		drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
781 	else
782 		drm_kms_helper_hotplug_event(display->drm);
783 
784 	drm_connector_put(&first_changed_connector->base);
785 }
786 
787 static void i915_hpd_poll_init_work(struct work_struct *work)
788 {
789 	struct intel_display *display =
790 		container_of(work, typeof(*display), hotplug.poll_init_work);
791 	struct drm_connector_list_iter conn_iter;
792 	struct intel_connector *connector;
793 	intel_wakeref_t wakeref;
794 	bool enabled;
795 
796 	mutex_lock(&display->drm->mode_config.mutex);
797 
798 	enabled = READ_ONCE(display->hotplug.poll_enabled);
799 	/*
800 	 * Prevent taking a power reference from this sequence of
801 	 * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
802 	 * connector detect which would requeue i915_hpd_poll_init_work()
803 	 * and so risk an endless loop of this same sequence.
804 	 */
805 	if (!enabled) {
806 		wakeref = intel_display_power_get(display,
807 						  POWER_DOMAIN_DISPLAY_CORE);
808 		drm_WARN_ON(display->drm,
809 			    READ_ONCE(display->hotplug.poll_enabled));
810 		cancel_work(&display->hotplug.poll_init_work);
811 	}
812 
813 	spin_lock_irq(&display->irq.lock);
814 
815 	drm_connector_list_iter_begin(display->drm, &conn_iter);
816 	for_each_intel_connector_iter(connector, &conn_iter) {
817 		enum hpd_pin pin;
818 
819 		pin = intel_connector_hpd_pin(connector);
820 		if (pin == HPD_NONE)
821 			continue;
822 
823 		if (display->hotplug.stats[pin].state == HPD_DISABLED)
824 			continue;
825 
826 		connector->base.polled = connector->polled;
827 
828 		if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
829 			connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
830 				DRM_CONNECTOR_POLL_DISCONNECT;
831 	}
832 	drm_connector_list_iter_end(&conn_iter);
833 
834 	spin_unlock_irq(&display->irq.lock);
835 
836 	if (enabled)
837 		drm_kms_helper_poll_reschedule(display->drm);
838 
839 	mutex_unlock(&display->drm->mode_config.mutex);
840 
841 	/*
842 	 * We might have missed any hotplugs that happened while we were
843 	 * in the middle of disabling polling
844 	 */
845 	if (!enabled) {
846 		i915_hpd_poll_detect_connectors(display);
847 
848 		intel_display_power_put(display,
849 					POWER_DOMAIN_DISPLAY_CORE,
850 					wakeref);
851 	}
852 }
853 
854 /**
855  * intel_hpd_poll_enable - enable polling for connectors with hpd
856  * @display: display device instance
857  *
858  * This function enables polling for all connectors which support HPD.
859  * Under certain conditions HPD may not be functional. On most Intel GPUs,
860  * this happens when we enter runtime suspend.
861  * On Valleyview and Cherryview systems, this also happens when we shut off all
862  * of the powerwells.
863  *
864  * Since this function can get called in contexts where we're already holding
865  * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
866  * worker.
867  *
868  * Also see: intel_hpd_init() and intel_hpd_poll_disable().
869  */
870 void intel_hpd_poll_enable(struct intel_display *display)
871 {
872 	if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
873 		return;
874 
875 	WRITE_ONCE(display->hotplug.poll_enabled, true);
876 
877 	/*
878 	 * We might already be holding dev->mode_config.mutex, so do this in a
879 	 * separate worker
880 	 * As well, there's no issue if we race here since we always reschedule
881 	 * this worker anyway
882 	 */
883 	spin_lock_irq(&display->irq.lock);
884 	queue_detection_work(display,
885 			     &display->hotplug.poll_init_work);
886 	spin_unlock_irq(&display->irq.lock);
887 }
888 
889 /**
890  * intel_hpd_poll_disable - disable polling for connectors with hpd
891  * @display: display device instance
892  *
893  * This function disables polling for all connectors which support HPD.
894  * Under certain conditions HPD may not be functional. On most Intel GPUs,
895  * this happens when we enter runtime suspend.
896  * On Valleyview and Cherryview systems, this also happens when we shut off all
897  * of the powerwells.
898  *
899  * Since this function can get called in contexts where we're already holding
900  * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
901  * worker.
902  *
903  * Also used during driver init to initialize connector->polled
904  * appropriately for all connectors.
905  *
906  * Also see: intel_hpd_init() and intel_hpd_poll_enable().
907  */
908 void intel_hpd_poll_disable(struct intel_display *display)
909 {
910 	struct intel_encoder *encoder;
911 
912 	if (!HAS_DISPLAY(display))
913 		return;
914 
915 	for_each_intel_dp(display->drm, encoder)
916 		intel_dp_dpcd_set_probe(enc_to_intel_dp(encoder), true);
917 
918 	WRITE_ONCE(display->hotplug.poll_enabled, false);
919 
920 	spin_lock_irq(&display->irq.lock);
921 	queue_detection_work(display,
922 			     &display->hotplug.poll_init_work);
923 	spin_unlock_irq(&display->irq.lock);
924 }
925 
926 void intel_hpd_poll_fini(struct intel_display *display)
927 {
928 	struct intel_connector *connector;
929 	struct drm_connector_list_iter conn_iter;
930 
931 	/* Kill all the work that may have been queued by hpd. */
932 	drm_connector_list_iter_begin(display->drm, &conn_iter);
933 	for_each_intel_connector_iter(connector, &conn_iter) {
934 		intel_connector_cancel_modeset_retry_work(connector);
935 		intel_hdcp_cancel_works(connector);
936 	}
937 	drm_connector_list_iter_end(&conn_iter);
938 }
939 
940 void intel_hpd_init_early(struct intel_display *display)
941 {
942 	INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
943 			  i915_hotplug_work_func);
944 	INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
945 	INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
946 	INIT_DELAYED_WORK(&display->hotplug.reenable_work,
947 			  intel_hpd_irq_storm_reenable_work);
948 
949 	display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
950 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
951 	 * detection, as short HPD storms will occur as a natural part of
952 	 * sideband messaging with MST.
953 	 * On older platforms however, IRQ storms can occur with both long and
954 	 * short pulses, as seen on some G4x systems.
955 	 */
956 	display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
957 }
958 
959 static bool cancel_all_detection_work(struct intel_display *display)
960 {
961 	bool was_pending = false;
962 
963 	if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
964 		was_pending = true;
965 	if (cancel_work_sync(&display->hotplug.poll_init_work))
966 		was_pending = true;
967 	if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
968 		was_pending = true;
969 
970 	return was_pending;
971 }
972 
973 void intel_hpd_cancel_work(struct intel_display *display)
974 {
975 	if (!HAS_DISPLAY(display))
976 		return;
977 
978 	spin_lock_irq(&display->irq.lock);
979 
980 	drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
981 
982 	display->hotplug.long_hpd_pin_mask = 0;
983 	display->hotplug.short_hpd_pin_mask = 0;
984 	display->hotplug.event_bits = 0;
985 	display->hotplug.retry_bits = 0;
986 
987 	spin_unlock_irq(&display->irq.lock);
988 
989 	cancel_work_sync(&display->hotplug.dig_port_work);
990 
991 	/*
992 	 * All other work triggered by hotplug events should be canceled by
993 	 * now.
994 	 */
995 	if (cancel_all_detection_work(display))
996 		drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
997 }
998 
999 static void queue_work_for_missed_irqs(struct intel_display *display)
1000 {
1001 	struct intel_hotplug *hotplug = &display->hotplug;
1002 	bool queue_hp_work = false;
1003 	u32 blocked_hpd_pin_mask;
1004 	enum hpd_pin pin;
1005 
1006 	lockdep_assert_held(&display->irq.lock);
1007 
1008 	blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
1009 	if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
1010 		queue_hp_work = true;
1011 
1012 	for_each_hpd_pin(pin) {
1013 		switch (display->hotplug.stats[pin].state) {
1014 		case HPD_MARK_DISABLED:
1015 			queue_hp_work = true;
1016 			break;
1017 		case HPD_DISABLED:
1018 		case HPD_ENABLED:
1019 			break;
1020 		default:
1021 			MISSING_CASE(display->hotplug.stats[pin].state);
1022 		}
1023 	}
1024 
1025 	if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
1026 		queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
1027 
1028 	if (queue_hp_work)
1029 		queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
1030 }
1031 
1032 static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
1033 {
1034 	struct intel_hotplug *hotplug = &display->hotplug;
1035 
1036 	lockdep_assert_held(&display->irq.lock);
1037 
1038 	hotplug->stats[pin].blocked_count++;
1039 
1040 	return hotplug->stats[pin].blocked_count == 1;
1041 }
1042 
1043 static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
1044 {
1045 	struct intel_hotplug *hotplug = &display->hotplug;
1046 
1047 	lockdep_assert_held(&display->irq.lock);
1048 
1049 	if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
1050 		return true;
1051 
1052 	hotplug->stats[pin].blocked_count--;
1053 
1054 	return hotplug->stats[pin].blocked_count == 0;
1055 }
1056 
1057 /**
1058  * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
1059  * @encoder: Encoder to block the HPD handling for
1060  *
1061  * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
1062  *
1063  * On return:
1064  *
1065  * - It's guaranteed that the blocked encoders' HPD pulse handler
1066  *   (via intel_digital_port::hpd_pulse()) is not running.
1067  * - The hotplug event handling (via intel_encoder::hotplug()) of an
1068  *   HPD IRQ pending at the time this function is called may be still
1069  *   running.
1070  * - Detection on the encoder's connector (via
1071  *   drm_connector_helper_funcs::detect_ctx(),
1072  *   drm_connector_funcs::detect()) remains allowed, for instance as part of
1073  *   userspace connector probing, or DRM core's connector polling.
1074  *
1075  * The call must be followed by calling intel_hpd_unblock(), or
1076  * intel_hpd_clear_and_unblock().
1077  *
1078  * Note that the handling of HPD IRQs for another encoder using the same HPD
1079  * pin as that of @encoder will be also blocked.
1080  */
1081 void intel_hpd_block(struct intel_encoder *encoder)
1082 {
1083 	struct intel_display *display = to_intel_display(encoder);
1084 	struct intel_hotplug *hotplug = &display->hotplug;
1085 	bool do_flush = false;
1086 
1087 	if (encoder->hpd_pin == HPD_NONE)
1088 		return;
1089 
1090 	spin_lock_irq(&display->irq.lock);
1091 
1092 	if (block_hpd_pin(display, encoder->hpd_pin))
1093 		do_flush = true;
1094 
1095 	spin_unlock_irq(&display->irq.lock);
1096 
1097 	if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
1098 		flush_work(&hotplug->dig_port_work);
1099 }
1100 
1101 /**
1102  * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
1103  * @encoder: Encoder to unblock the HPD handling for
1104  *
1105  * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
1106  * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
1107  * HPD pin while it was blocked will be handled for @encoder and for any
1108  * other encoder sharing the same HPD pin.
1109  */
1110 void intel_hpd_unblock(struct intel_encoder *encoder)
1111 {
1112 	struct intel_display *display = to_intel_display(encoder);
1113 
1114 	if (encoder->hpd_pin == HPD_NONE)
1115 		return;
1116 
1117 	spin_lock_irq(&display->irq.lock);
1118 
1119 	if (unblock_hpd_pin(display, encoder->hpd_pin))
1120 		queue_work_for_missed_irqs(display);
1121 
1122 	spin_unlock_irq(&display->irq.lock);
1123 }
1124 
1125 /**
1126  * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
1127  * @encoder: Encoder to unblock the HPD handling for
1128  *
1129  * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
1130  * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
1131  * HPD pin while it was blocked will be cleared, handling only new IRQs.
1132  */
1133 void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
1134 {
1135 	struct intel_display *display = to_intel_display(encoder);
1136 	struct intel_hotplug *hotplug = &display->hotplug;
1137 	enum hpd_pin pin = encoder->hpd_pin;
1138 
1139 	if (pin == HPD_NONE)
1140 		return;
1141 
1142 	spin_lock_irq(&display->irq.lock);
1143 
1144 	if (unblock_hpd_pin(display, pin)) {
1145 		hotplug->event_bits &= ~BIT(pin);
1146 		hotplug->retry_bits &= ~BIT(pin);
1147 		hotplug->short_hpd_pin_mask &= ~BIT(pin);
1148 		hotplug->long_hpd_pin_mask &= ~BIT(pin);
1149 	}
1150 
1151 	spin_unlock_irq(&display->irq.lock);
1152 }
1153 
1154 void intel_hpd_enable_detection_work(struct intel_display *display)
1155 {
1156 	spin_lock_irq(&display->irq.lock);
1157 	display->hotplug.detection_work_enabled = true;
1158 	queue_work_for_missed_irqs(display);
1159 	spin_unlock_irq(&display->irq.lock);
1160 }
1161 
1162 void intel_hpd_disable_detection_work(struct intel_display *display)
1163 {
1164 	spin_lock_irq(&display->irq.lock);
1165 	display->hotplug.detection_work_enabled = false;
1166 	spin_unlock_irq(&display->irq.lock);
1167 
1168 	cancel_all_detection_work(display);
1169 }
1170 
1171 bool intel_hpd_schedule_detection(struct intel_display *display)
1172 {
1173 	unsigned long flags;
1174 	bool ret;
1175 
1176 	spin_lock_irqsave(&display->irq.lock, flags);
1177 	ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
1178 	spin_unlock_irqrestore(&display->irq.lock, flags);
1179 
1180 	return ret;
1181 }
1182 
1183 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1184 {
1185 	struct intel_display *display = m->private;
1186 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1187 	struct intel_hotplug *hotplug = &display->hotplug;
1188 
1189 	/* Synchronize with everything first in case there's been an HPD
1190 	 * storm, but we haven't finished handling it in the kernel yet
1191 	 */
1192 	intel_synchronize_irq(dev_priv);
1193 	flush_work(&display->hotplug.dig_port_work);
1194 	flush_delayed_work(&display->hotplug.hotplug_work);
1195 
1196 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1197 	seq_printf(m, "Detected: %s\n",
1198 		   str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
1199 
1200 	return 0;
1201 }
1202 
1203 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1204 					const char __user *ubuf, size_t len,
1205 					loff_t *offp)
1206 {
1207 	struct seq_file *m = file->private_data;
1208 	struct intel_display *display = m->private;
1209 	struct intel_hotplug *hotplug = &display->hotplug;
1210 	unsigned int new_threshold;
1211 	int i;
1212 	char *newline;
1213 	char tmp[16];
1214 
1215 	if (len >= sizeof(tmp))
1216 		return -EINVAL;
1217 
1218 	if (copy_from_user(tmp, ubuf, len))
1219 		return -EFAULT;
1220 
1221 	tmp[len] = '\0';
1222 
1223 	/* Strip newline, if any */
1224 	newline = strchr(tmp, '\n');
1225 	if (newline)
1226 		*newline = '\0';
1227 
1228 	if (strcmp(tmp, "reset") == 0)
1229 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1230 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1231 		return -EINVAL;
1232 
1233 	if (new_threshold > 0)
1234 		drm_dbg_kms(display->drm,
1235 			    "Setting HPD storm detection threshold to %d\n",
1236 			    new_threshold);
1237 	else
1238 		drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
1239 
1240 	spin_lock_irq(&display->irq.lock);
1241 	hotplug->hpd_storm_threshold = new_threshold;
1242 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1243 	for_each_hpd_pin(i)
1244 		hotplug->stats[i].count = 0;
1245 	spin_unlock_irq(&display->irq.lock);
1246 
1247 	/* Re-enable hpd immediately if we were in an irq storm */
1248 	flush_delayed_work(&display->hotplug.reenable_work);
1249 
1250 	return len;
1251 }
1252 
1253 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1254 {
1255 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1256 }
1257 
1258 static const struct file_operations i915_hpd_storm_ctl_fops = {
1259 	.owner = THIS_MODULE,
1260 	.open = i915_hpd_storm_ctl_open,
1261 	.read = seq_read,
1262 	.llseek = seq_lseek,
1263 	.release = single_release,
1264 	.write = i915_hpd_storm_ctl_write
1265 };
1266 
1267 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1268 {
1269 	struct intel_display *display = m->private;
1270 
1271 	seq_printf(m, "Enabled: %s\n",
1272 		   str_yes_no(display->hotplug.hpd_short_storm_enabled));
1273 
1274 	return 0;
1275 }
1276 
1277 static int
1278 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1279 {
1280 	return single_open(file, i915_hpd_short_storm_ctl_show,
1281 			   inode->i_private);
1282 }
1283 
1284 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1285 					      const char __user *ubuf,
1286 					      size_t len, loff_t *offp)
1287 {
1288 	struct seq_file *m = file->private_data;
1289 	struct intel_display *display = m->private;
1290 	struct intel_hotplug *hotplug = &display->hotplug;
1291 	char *newline;
1292 	char tmp[16];
1293 	int i;
1294 	bool new_state;
1295 
1296 	if (len >= sizeof(tmp))
1297 		return -EINVAL;
1298 
1299 	if (copy_from_user(tmp, ubuf, len))
1300 		return -EFAULT;
1301 
1302 	tmp[len] = '\0';
1303 
1304 	/* Strip newline, if any */
1305 	newline = strchr(tmp, '\n');
1306 	if (newline)
1307 		*newline = '\0';
1308 
1309 	/* Reset to the "default" state for this system */
1310 	if (strcmp(tmp, "reset") == 0)
1311 		new_state = !HAS_DP_MST(display);
1312 	else if (kstrtobool(tmp, &new_state) != 0)
1313 		return -EINVAL;
1314 
1315 	drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
1316 		    new_state ? "En" : "Dis");
1317 
1318 	spin_lock_irq(&display->irq.lock);
1319 	hotplug->hpd_short_storm_enabled = new_state;
1320 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1321 	for_each_hpd_pin(i)
1322 		hotplug->stats[i].count = 0;
1323 	spin_unlock_irq(&display->irq.lock);
1324 
1325 	/* Re-enable hpd immediately if we were in an irq storm */
1326 	flush_delayed_work(&display->hotplug.reenable_work);
1327 
1328 	return len;
1329 }
1330 
1331 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1332 	.owner = THIS_MODULE,
1333 	.open = i915_hpd_short_storm_ctl_open,
1334 	.read = seq_read,
1335 	.llseek = seq_lseek,
1336 	.release = single_release,
1337 	.write = i915_hpd_short_storm_ctl_write,
1338 };
1339 
1340 void intel_hpd_debugfs_register(struct intel_display *display)
1341 {
1342 	struct drm_minor *minor = display->drm->primary;
1343 
1344 	debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
1345 			    display, &i915_hpd_storm_ctl_fops);
1346 	debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
1347 			    display, &i915_hpd_short_storm_ctl_fops);
1348 	debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
1349 			    &display->hotplug.ignore_long_hpd);
1350 }
1351