1 /* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/debugfs.h> 25 #include <linux/kernel.h> 26 27 #include <drm/drm_probe_helper.h> 28 29 #include "i915_drv.h" 30 #include "i915_irq.h" 31 #include "intel_connector.h" 32 #include "intel_display_power.h" 33 #include "intel_display_core.h" 34 #include "intel_display_rpm.h" 35 #include "intel_display_types.h" 36 #include "intel_dp.h" 37 #include "intel_hdcp.h" 38 #include "intel_hotplug.h" 39 #include "intel_hotplug_irq.h" 40 41 /** 42 * DOC: Hotplug 43 * 44 * Simply put, hotplug occurs when a display is connected to or disconnected 45 * from the system. However, there may be adapters and docking stations and 46 * Display Port short pulses and MST devices involved, complicating matters. 47 * 48 * Hotplug in i915 is handled in many different levels of abstraction. 49 * 50 * The platform dependent interrupt handling code in i915_irq.c enables, 51 * disables, and does preliminary handling of the interrupts. The interrupt 52 * handlers gather the hotplug detect (HPD) information from relevant registers 53 * into a platform independent mask of hotplug pins that have fired. 54 * 55 * The platform independent interrupt handler intel_hpd_irq_handler() in 56 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes 57 * further processing to appropriate bottom halves (Display Port specific and 58 * regular hotplug). 59 * 60 * The Display Port work function i915_digport_work_func() calls into 61 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long 62 * pulses, with failures and non-MST long pulses triggering regular hotplug 63 * processing on the connector. 64 * 65 * The regular hotplug work function i915_hotplug_work_func() calls connector 66 * detect hooks, and, if connector status changes, triggers sending of hotplug 67 * uevent to userspace via drm_kms_helper_hotplug_event(). 68 * 69 * Finally, the userspace is responsible for triggering a modeset upon receiving 70 * the hotplug uevent, disabling or enabling the crtc as needed. 71 * 72 * The hotplug interrupt storm detection and mitigation code keeps track of the 73 * number of interrupts per hotplug pin per a period of time, and if the number 74 * of interrupts exceeds a certain threshold, the interrupt is disabled for a 75 * while before being re-enabled. The intention is to mitigate issues raising 76 * from broken hardware triggering massive amounts of interrupts and grinding 77 * the system to a halt. 78 * 79 * Current implementation expects that hotplug interrupt storm will not be 80 * seen when display port sink is connected, hence on platforms whose DP 81 * callback is handled by i915_digport_work_func reenabling of hpd is not 82 * performed (it was never expected to be disabled in the first place ;) ) 83 * this is specific to DP sinks handled by this routine and any other display 84 * such as HDMI or DVI enabled on the same port will have proper logic since 85 * it will use i915_hotplug_work_func where this logic is handled. 86 */ 87 88 /** 89 * intel_hpd_pin_default - return default pin associated with certain port. 90 * @port: the hpd port to get associated pin 91 * 92 * It is only valid and used by digital port encoder. 93 * 94 * Return pin that is associatade with @port. 95 */ 96 enum hpd_pin intel_hpd_pin_default(enum port port) 97 { 98 return HPD_PORT_A + port - PORT_A; 99 } 100 101 /* Threshold == 5 for long IRQs, 50 for short */ 102 #define HPD_STORM_DEFAULT_THRESHOLD 50 103 104 #define HPD_STORM_DETECT_PERIOD 1000 105 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 106 #define HPD_RETRY_DELAY 1000 107 108 static enum hpd_pin 109 intel_connector_hpd_pin(struct intel_connector *connector) 110 { 111 struct intel_encoder *encoder = intel_attached_encoder(connector); 112 113 /* 114 * MST connectors get their encoder attached dynamically 115 * so need to make sure we have an encoder here. But since 116 * MST encoders have their hpd_pin set to HPD_NONE we don't 117 * have to special case them beyond that. 118 */ 119 return encoder ? encoder->hpd_pin : HPD_NONE; 120 } 121 122 /** 123 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin 124 * @display: display device 125 * @pin: the pin to gather stats on 126 * @long_hpd: whether the HPD IRQ was long or short 127 * 128 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ 129 * storms. Only the pin specific stats and state are changed, the caller is 130 * responsible for further action. 131 * 132 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is 133 * stored in @display->hotplug.hpd_storm_threshold which defaults to 134 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and 135 * short IRQs count as +1. If this threshold is exceeded, it's considered an 136 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. 137 * 138 * By default, most systems will only count long IRQs towards 139 * &display->hotplug.hpd_storm_threshold. However, some older systems also 140 * suffer from short IRQ storms and must also track these. Because short IRQ 141 * storms are naturally caused by sideband interactions with DP MST devices, 142 * short IRQ detection is only enabled for systems without DP MST support. 143 * Systems which are new enough to support DP MST are far less likely to 144 * suffer from IRQ storms at all, so this is fine. 145 * 146 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 147 * and should only be adjusted for automated hotplug testing. 148 * 149 * Return true if an IRQ storm was detected on @pin. 150 */ 151 static bool intel_hpd_irq_storm_detect(struct intel_display *display, 152 enum hpd_pin pin, bool long_hpd) 153 { 154 struct intel_hotplug *hpd = &display->hotplug; 155 unsigned long start = hpd->stats[pin].last_jiffies; 156 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 157 const int increment = long_hpd ? 10 : 1; 158 const int threshold = hpd->hpd_storm_threshold; 159 bool storm = false; 160 161 if (!threshold || 162 (!long_hpd && !display->hotplug.hpd_short_storm_enabled)) 163 return false; 164 165 if (!time_in_range(jiffies, start, end)) { 166 hpd->stats[pin].last_jiffies = jiffies; 167 hpd->stats[pin].count = 0; 168 } 169 170 hpd->stats[pin].count += increment; 171 if (hpd->stats[pin].count > threshold) { 172 hpd->stats[pin].state = HPD_MARK_DISABLED; 173 drm_dbg_kms(display->drm, 174 "HPD interrupt storm detected on PIN %d\n", pin); 175 storm = true; 176 } else { 177 drm_dbg_kms(display->drm, 178 "Received HPD interrupt on PIN %d - cnt: %d\n", 179 pin, 180 hpd->stats[pin].count); 181 } 182 183 return storm; 184 } 185 186 static bool detection_work_enabled(struct intel_display *display) 187 { 188 lockdep_assert_held(&display->irq.lock); 189 190 return display->hotplug.detection_work_enabled; 191 } 192 193 static bool 194 mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) 195 { 196 lockdep_assert_held(&display->irq.lock); 197 198 if (!detection_work_enabled(display)) 199 return false; 200 201 return mod_delayed_work(display->wq.unordered, work, delay); 202 } 203 204 static bool 205 queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) 206 { 207 lockdep_assert_held(&display->irq.lock); 208 209 if (!detection_work_enabled(display)) 210 return false; 211 212 return queue_delayed_work(display->wq.unordered, work, delay); 213 } 214 215 static bool 216 queue_detection_work(struct intel_display *display, struct work_struct *work) 217 { 218 lockdep_assert_held(&display->irq.lock); 219 220 if (!detection_work_enabled(display)) 221 return false; 222 223 return queue_work(display->wq.unordered, work); 224 } 225 226 static void 227 intel_hpd_irq_storm_switch_to_polling(struct intel_display *display) 228 { 229 struct drm_connector_list_iter conn_iter; 230 struct intel_connector *connector; 231 bool hpd_disabled = false; 232 233 lockdep_assert_held(&display->irq.lock); 234 235 drm_connector_list_iter_begin(display->drm, &conn_iter); 236 for_each_intel_connector_iter(connector, &conn_iter) { 237 enum hpd_pin pin; 238 239 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) 240 continue; 241 242 pin = intel_connector_hpd_pin(connector); 243 if (pin == HPD_NONE || 244 display->hotplug.stats[pin].state != HPD_MARK_DISABLED) 245 continue; 246 247 drm_info(display->drm, 248 "HPD interrupt storm detected on connector %s: " 249 "switching from hotplug detection to polling\n", 250 connector->base.name); 251 252 display->hotplug.stats[pin].state = HPD_DISABLED; 253 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 254 DRM_CONNECTOR_POLL_DISCONNECT; 255 hpd_disabled = true; 256 } 257 drm_connector_list_iter_end(&conn_iter); 258 259 /* Enable polling and queue hotplug re-enabling. */ 260 if (hpd_disabled) { 261 drm_kms_helper_poll_reschedule(display->drm); 262 mod_delayed_detection_work(display, 263 &display->hotplug.reenable_work, 264 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 265 } 266 } 267 268 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) 269 { 270 struct intel_display *display = 271 container_of(work, typeof(*display), hotplug.reenable_work.work); 272 struct drm_connector_list_iter conn_iter; 273 struct intel_connector *connector; 274 struct ref_tracker *wakeref; 275 enum hpd_pin pin; 276 277 wakeref = intel_display_rpm_get(display); 278 279 spin_lock_irq(&display->irq.lock); 280 281 drm_connector_list_iter_begin(display->drm, &conn_iter); 282 for_each_intel_connector_iter(connector, &conn_iter) { 283 pin = intel_connector_hpd_pin(connector); 284 if (pin == HPD_NONE || 285 display->hotplug.stats[pin].state != HPD_DISABLED) 286 continue; 287 288 if (connector->base.polled != connector->polled) 289 drm_dbg(display->drm, 290 "Reenabling HPD on connector %s\n", 291 connector->base.name); 292 connector->base.polled = connector->polled; 293 } 294 drm_connector_list_iter_end(&conn_iter); 295 296 for_each_hpd_pin(pin) { 297 if (display->hotplug.stats[pin].state == HPD_DISABLED) 298 display->hotplug.stats[pin].state = HPD_ENABLED; 299 } 300 301 intel_hpd_irq_setup(display); 302 303 spin_unlock_irq(&display->irq.lock); 304 305 intel_display_rpm_put(display, wakeref); 306 } 307 308 static enum intel_hotplug_state 309 intel_hotplug_detect_connector(struct intel_connector *connector) 310 { 311 struct drm_device *dev = connector->base.dev; 312 enum drm_connector_status old_status; 313 u64 old_epoch_counter; 314 int status; 315 bool ret = false; 316 317 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); 318 old_status = connector->base.status; 319 old_epoch_counter = connector->base.epoch_counter; 320 321 status = drm_helper_probe_detect(&connector->base, NULL, false); 322 if (!connector->base.force) 323 connector->base.status = status; 324 325 if (old_epoch_counter != connector->base.epoch_counter) 326 ret = true; 327 328 if (ret) { 329 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", 330 connector->base.base.id, 331 connector->base.name, 332 drm_get_connector_status_name(old_status), 333 drm_get_connector_status_name(connector->base.status), 334 old_epoch_counter, 335 connector->base.epoch_counter); 336 return INTEL_HOTPLUG_CHANGED; 337 } 338 return INTEL_HOTPLUG_UNCHANGED; 339 } 340 341 enum intel_hotplug_state 342 intel_encoder_hotplug(struct intel_encoder *encoder, 343 struct intel_connector *connector) 344 { 345 return intel_hotplug_detect_connector(connector); 346 } 347 348 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) 349 { 350 return intel_encoder_is_dig_port(encoder) && 351 enc_to_dig_port(encoder)->hpd_pulse != NULL; 352 } 353 354 static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin) 355 { 356 struct intel_encoder *encoder; 357 358 for_each_intel_encoder(display->drm, encoder) { 359 if (encoder->hpd_pin != pin) 360 continue; 361 362 if (intel_encoder_has_hpd_pulse(encoder)) 363 return true; 364 } 365 366 return false; 367 } 368 369 static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin) 370 { 371 lockdep_assert_held(&display->irq.lock); 372 373 return display->hotplug.stats[pin].blocked_count; 374 } 375 376 static u32 get_blocked_hpd_pin_mask(struct intel_display *display) 377 { 378 enum hpd_pin pin; 379 u32 hpd_pin_mask = 0; 380 381 for_each_hpd_pin(pin) { 382 if (hpd_pin_is_blocked(display, pin)) 383 hpd_pin_mask |= BIT(pin); 384 } 385 386 return hpd_pin_mask; 387 } 388 389 static void i915_digport_work_func(struct work_struct *work) 390 { 391 struct intel_display *display = 392 container_of(work, struct intel_display, hotplug.dig_port_work); 393 struct intel_hotplug *hotplug = &display->hotplug; 394 u32 long_hpd_pin_mask, short_hpd_pin_mask; 395 struct intel_encoder *encoder; 396 u32 blocked_hpd_pin_mask; 397 u32 old_bits = 0; 398 399 spin_lock_irq(&display->irq.lock); 400 401 blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); 402 long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask; 403 hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask; 404 short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask; 405 hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask; 406 407 spin_unlock_irq(&display->irq.lock); 408 409 for_each_intel_encoder(display->drm, encoder) { 410 struct intel_digital_port *dig_port; 411 enum hpd_pin pin = encoder->hpd_pin; 412 bool long_hpd, short_hpd; 413 enum irqreturn ret; 414 415 if (!intel_encoder_has_hpd_pulse(encoder)) 416 continue; 417 418 long_hpd = long_hpd_pin_mask & BIT(pin); 419 short_hpd = short_hpd_pin_mask & BIT(pin); 420 421 if (!long_hpd && !short_hpd) 422 continue; 423 424 dig_port = enc_to_dig_port(encoder); 425 426 ret = dig_port->hpd_pulse(dig_port, long_hpd); 427 if (ret == IRQ_NONE) { 428 /* fall back to old school hpd */ 429 old_bits |= BIT(pin); 430 } 431 } 432 433 if (old_bits) { 434 spin_lock_irq(&display->irq.lock); 435 display->hotplug.event_bits |= old_bits; 436 queue_delayed_detection_work(display, 437 &display->hotplug.hotplug_work, 0); 438 spin_unlock_irq(&display->irq.lock); 439 } 440 } 441 442 /** 443 * intel_hpd_trigger_irq - trigger an hpd irq event for a port 444 * @dig_port: digital port 445 * 446 * Trigger an HPD interrupt event for the given port, emulating a short pulse 447 * generated by the sink, and schedule the dig port work to handle it. 448 */ 449 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) 450 { 451 struct intel_display *display = to_intel_display(dig_port); 452 struct intel_hotplug *hotplug = &display->hotplug; 453 struct intel_encoder *encoder = &dig_port->base; 454 455 spin_lock_irq(&display->irq.lock); 456 457 hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin); 458 if (!hpd_pin_is_blocked(display, encoder->hpd_pin)) 459 queue_work(hotplug->dp_wq, &hotplug->dig_port_work); 460 461 spin_unlock_irq(&display->irq.lock); 462 } 463 464 /* 465 * Handle hotplug events outside the interrupt handler proper. 466 */ 467 static void i915_hotplug_work_func(struct work_struct *work) 468 { 469 struct intel_display *display = 470 container_of(work, struct intel_display, hotplug.hotplug_work.work); 471 struct intel_hotplug *hotplug = &display->hotplug; 472 struct drm_connector_list_iter conn_iter; 473 struct intel_connector *connector; 474 u32 changed = 0, retry = 0; 475 u32 hpd_event_bits; 476 u32 hpd_retry_bits; 477 struct drm_connector *first_changed_connector = NULL; 478 int changed_connectors = 0; 479 u32 blocked_hpd_pin_mask; 480 481 mutex_lock(&display->drm->mode_config.mutex); 482 drm_dbg_kms(display->drm, "running encoder hotplug functions\n"); 483 484 spin_lock_irq(&display->irq.lock); 485 486 blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); 487 hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask; 488 hotplug->event_bits &= ~hpd_event_bits; 489 hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask; 490 hotplug->retry_bits &= ~hpd_retry_bits; 491 492 /* Enable polling for connectors which had HPD IRQ storms */ 493 intel_hpd_irq_storm_switch_to_polling(display); 494 495 spin_unlock_irq(&display->irq.lock); 496 497 /* Skip calling encode hotplug handlers if ignore long HPD set*/ 498 if (display->hotplug.ignore_long_hpd) { 499 drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); 500 mutex_unlock(&display->drm->mode_config.mutex); 501 return; 502 } 503 504 drm_connector_list_iter_begin(display->drm, &conn_iter); 505 for_each_intel_connector_iter(connector, &conn_iter) { 506 enum hpd_pin pin; 507 u32 hpd_bit; 508 509 pin = intel_connector_hpd_pin(connector); 510 if (pin == HPD_NONE) 511 continue; 512 513 hpd_bit = BIT(pin); 514 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { 515 struct intel_encoder *encoder = 516 intel_attached_encoder(connector); 517 518 if (hpd_event_bits & hpd_bit) 519 connector->hotplug_retries = 0; 520 else 521 connector->hotplug_retries++; 522 523 drm_dbg_kms(display->drm, 524 "Connector %s (pin %i) received hotplug event. (retry %d)\n", 525 connector->base.name, pin, 526 connector->hotplug_retries); 527 528 switch (encoder->hotplug(encoder, connector)) { 529 case INTEL_HOTPLUG_UNCHANGED: 530 break; 531 case INTEL_HOTPLUG_CHANGED: 532 changed |= hpd_bit; 533 changed_connectors++; 534 if (!first_changed_connector) { 535 drm_connector_get(&connector->base); 536 first_changed_connector = &connector->base; 537 } 538 break; 539 case INTEL_HOTPLUG_RETRY: 540 retry |= hpd_bit; 541 break; 542 } 543 } 544 } 545 drm_connector_list_iter_end(&conn_iter); 546 mutex_unlock(&display->drm->mode_config.mutex); 547 548 if (changed_connectors == 1) 549 drm_kms_helper_connector_hotplug_event(first_changed_connector); 550 else if (changed_connectors > 0) 551 drm_kms_helper_hotplug_event(display->drm); 552 553 if (first_changed_connector) 554 drm_connector_put(first_changed_connector); 555 556 /* Remove shared HPD pins that have changed */ 557 retry &= ~changed; 558 if (retry) { 559 spin_lock_irq(&display->irq.lock); 560 display->hotplug.retry_bits |= retry; 561 562 mod_delayed_detection_work(display, 563 &display->hotplug.hotplug_work, 564 msecs_to_jiffies(HPD_RETRY_DELAY)); 565 spin_unlock_irq(&display->irq.lock); 566 } 567 } 568 569 570 /** 571 * intel_hpd_irq_handler - main hotplug irq handler 572 * @display: display device 573 * @pin_mask: a mask of hpd pins that have triggered the irq 574 * @long_mask: a mask of hpd pins that may be long hpd pulses 575 * 576 * This is the main hotplug irq handler for all platforms. The platform specific 577 * irq handlers call the platform specific hotplug irq handlers, which read and 578 * decode the appropriate registers into bitmasks about hpd pins that have 579 * triggered (@pin_mask), and which of those pins may be long pulses 580 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin 581 * is not a digital port. 582 * 583 * Here, we do hotplug irq storm detection and mitigation, and pass further 584 * processing to appropriate bottom halves. 585 */ 586 void intel_hpd_irq_handler(struct intel_display *display, 587 u32 pin_mask, u32 long_mask) 588 { 589 struct intel_encoder *encoder; 590 bool storm_detected = false; 591 bool queue_dig = false, queue_hp = false; 592 u32 long_hpd_pulse_mask = 0; 593 u32 short_hpd_pulse_mask = 0; 594 enum hpd_pin pin; 595 596 if (!pin_mask) 597 return; 598 599 spin_lock(&display->irq.lock); 600 601 /* 602 * Determine whether ->hpd_pulse() exists for each pin, and 603 * whether we have a short or a long pulse. This is needed 604 * as each pin may have up to two encoders (HDMI and DP) and 605 * only the one of them (DP) will have ->hpd_pulse(). 606 */ 607 for_each_intel_encoder(display->drm, encoder) { 608 bool long_hpd; 609 610 pin = encoder->hpd_pin; 611 if (!(BIT(pin) & pin_mask)) 612 continue; 613 614 if (!intel_encoder_has_hpd_pulse(encoder)) 615 continue; 616 617 long_hpd = long_mask & BIT(pin); 618 619 drm_dbg(display->drm, 620 "digital hpd on [ENCODER:%d:%s] - %s\n", 621 encoder->base.base.id, encoder->base.name, 622 long_hpd ? "long" : "short"); 623 624 if (!hpd_pin_is_blocked(display, pin)) 625 queue_dig = true; 626 627 if (long_hpd) { 628 long_hpd_pulse_mask |= BIT(pin); 629 display->hotplug.long_hpd_pin_mask |= BIT(pin); 630 } else { 631 short_hpd_pulse_mask |= BIT(pin); 632 display->hotplug.short_hpd_pin_mask |= BIT(pin); 633 } 634 } 635 636 /* Now process each pin just once */ 637 for_each_hpd_pin(pin) { 638 bool long_hpd; 639 640 if (!(BIT(pin) & pin_mask)) 641 continue; 642 643 if (display->hotplug.stats[pin].state == HPD_DISABLED) { 644 /* 645 * On GMCH platforms the interrupt mask bits only 646 * prevent irq generation, not the setting of the 647 * hotplug bits itself. So only WARN about unexpected 648 * interrupts on saner platforms. 649 */ 650 drm_WARN_ONCE(display->drm, !HAS_GMCH(display), 651 "Received HPD interrupt on pin %d although disabled\n", 652 pin); 653 continue; 654 } 655 656 if (display->hotplug.stats[pin].state != HPD_ENABLED) 657 continue; 658 659 /* 660 * Delegate to ->hpd_pulse() if one of the encoders for this 661 * pin has it, otherwise let the hotplug_work deal with this 662 * pin directly. 663 */ 664 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { 665 long_hpd = long_hpd_pulse_mask & BIT(pin); 666 } else { 667 display->hotplug.event_bits |= BIT(pin); 668 long_hpd = true; 669 670 if (!hpd_pin_is_blocked(display, pin)) 671 queue_hp = true; 672 } 673 674 if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) { 675 display->hotplug.event_bits &= ~BIT(pin); 676 storm_detected = true; 677 queue_hp = true; 678 } 679 } 680 681 /* 682 * Disable any IRQs that storms were detected on. Polling enablement 683 * happens later in our hotplug work. 684 */ 685 if (storm_detected) 686 intel_hpd_irq_setup(display); 687 688 /* 689 * Our hotplug handler can grab modeset locks (by calling down into the 690 * fb helpers). Hence it must not be run on our own dev-priv->wq work 691 * queue for otherwise the flush_work in the pageflip code will 692 * deadlock. 693 */ 694 if (queue_dig) 695 queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work); 696 if (queue_hp) 697 queue_delayed_detection_work(display, 698 &display->hotplug.hotplug_work, 0); 699 700 spin_unlock(&display->irq.lock); 701 } 702 703 /** 704 * intel_hpd_init - initializes and enables hpd support 705 * @display: display device instance 706 * 707 * This function enables the hotplug support. It requires that interrupts have 708 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 709 * poll request can run concurrently to other code, so locking rules must be 710 * obeyed. 711 * 712 * This is a separate step from interrupt enabling to simplify the locking rules 713 * in the driver load and resume code. 714 * 715 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). 716 */ 717 void intel_hpd_init(struct intel_display *display) 718 { 719 int i; 720 721 if (!HAS_DISPLAY(display)) 722 return; 723 724 for_each_hpd_pin(i) { 725 display->hotplug.stats[i].count = 0; 726 display->hotplug.stats[i].state = HPD_ENABLED; 727 } 728 729 /* 730 * Interrupt setup is already guaranteed to be single-threaded, this is 731 * just to make the assert_spin_locked checks happy. 732 */ 733 spin_lock_irq(&display->irq.lock); 734 intel_hpd_irq_setup(display); 735 spin_unlock_irq(&display->irq.lock); 736 } 737 738 static void i915_hpd_poll_detect_connectors(struct intel_display *display) 739 { 740 struct drm_connector_list_iter conn_iter; 741 struct intel_connector *connector; 742 struct intel_connector *first_changed_connector = NULL; 743 int changed = 0; 744 745 mutex_lock(&display->drm->mode_config.mutex); 746 747 if (!display->drm->mode_config.poll_enabled) 748 goto out; 749 750 drm_connector_list_iter_begin(display->drm, &conn_iter); 751 for_each_intel_connector_iter(connector, &conn_iter) { 752 if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD)) 753 continue; 754 755 if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED) 756 continue; 757 758 changed++; 759 760 if (changed == 1) { 761 drm_connector_get(&connector->base); 762 first_changed_connector = connector; 763 } 764 } 765 drm_connector_list_iter_end(&conn_iter); 766 767 out: 768 mutex_unlock(&display->drm->mode_config.mutex); 769 770 if (!changed) 771 return; 772 773 if (changed == 1) 774 drm_kms_helper_connector_hotplug_event(&first_changed_connector->base); 775 else 776 drm_kms_helper_hotplug_event(display->drm); 777 778 drm_connector_put(&first_changed_connector->base); 779 } 780 781 static void i915_hpd_poll_init_work(struct work_struct *work) 782 { 783 struct intel_display *display = 784 container_of(work, typeof(*display), hotplug.poll_init_work); 785 struct drm_connector_list_iter conn_iter; 786 struct intel_connector *connector; 787 intel_wakeref_t wakeref; 788 bool enabled; 789 790 mutex_lock(&display->drm->mode_config.mutex); 791 792 enabled = READ_ONCE(display->hotplug.poll_enabled); 793 /* 794 * Prevent taking a power reference from this sequence of 795 * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() -> 796 * connector detect which would requeue i915_hpd_poll_init_work() 797 * and so risk an endless loop of this same sequence. 798 */ 799 if (!enabled) { 800 wakeref = intel_display_power_get(display, 801 POWER_DOMAIN_DISPLAY_CORE); 802 drm_WARN_ON(display->drm, 803 READ_ONCE(display->hotplug.poll_enabled)); 804 cancel_work(&display->hotplug.poll_init_work); 805 } 806 807 spin_lock_irq(&display->irq.lock); 808 809 drm_connector_list_iter_begin(display->drm, &conn_iter); 810 for_each_intel_connector_iter(connector, &conn_iter) { 811 enum hpd_pin pin; 812 813 pin = intel_connector_hpd_pin(connector); 814 if (pin == HPD_NONE) 815 continue; 816 817 if (display->hotplug.stats[pin].state == HPD_DISABLED) 818 continue; 819 820 connector->base.polled = connector->polled; 821 822 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) 823 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 824 DRM_CONNECTOR_POLL_DISCONNECT; 825 } 826 drm_connector_list_iter_end(&conn_iter); 827 828 spin_unlock_irq(&display->irq.lock); 829 830 if (enabled) 831 drm_kms_helper_poll_reschedule(display->drm); 832 833 mutex_unlock(&display->drm->mode_config.mutex); 834 835 /* 836 * We might have missed any hotplugs that happened while we were 837 * in the middle of disabling polling 838 */ 839 if (!enabled) { 840 i915_hpd_poll_detect_connectors(display); 841 842 intel_display_power_put(display, 843 POWER_DOMAIN_DISPLAY_CORE, 844 wakeref); 845 } 846 } 847 848 /** 849 * intel_hpd_poll_enable - enable polling for connectors with hpd 850 * @display: display device instance 851 * 852 * This function enables polling for all connectors which support HPD. 853 * Under certain conditions HPD may not be functional. On most Intel GPUs, 854 * this happens when we enter runtime suspend. 855 * On Valleyview and Cherryview systems, this also happens when we shut off all 856 * of the powerwells. 857 * 858 * Since this function can get called in contexts where we're already holding 859 * dev->mode_config.mutex, we do the actual hotplug enabling in a separate 860 * worker. 861 * 862 * Also see: intel_hpd_init() and intel_hpd_poll_disable(). 863 */ 864 void intel_hpd_poll_enable(struct intel_display *display) 865 { 866 if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display)) 867 return; 868 869 WRITE_ONCE(display->hotplug.poll_enabled, true); 870 871 /* 872 * We might already be holding dev->mode_config.mutex, so do this in a 873 * separate worker 874 * As well, there's no issue if we race here since we always reschedule 875 * this worker anyway 876 */ 877 spin_lock_irq(&display->irq.lock); 878 queue_detection_work(display, 879 &display->hotplug.poll_init_work); 880 spin_unlock_irq(&display->irq.lock); 881 } 882 883 /** 884 * intel_hpd_poll_disable - disable polling for connectors with hpd 885 * @display: display device instance 886 * 887 * This function disables polling for all connectors which support HPD. 888 * Under certain conditions HPD may not be functional. On most Intel GPUs, 889 * this happens when we enter runtime suspend. 890 * On Valleyview and Cherryview systems, this also happens when we shut off all 891 * of the powerwells. 892 * 893 * Since this function can get called in contexts where we're already holding 894 * dev->mode_config.mutex, we do the actual hotplug enabling in a separate 895 * worker. 896 * 897 * Also used during driver init to initialize connector->polled 898 * appropriately for all connectors. 899 * 900 * Also see: intel_hpd_init() and intel_hpd_poll_enable(). 901 */ 902 void intel_hpd_poll_disable(struct intel_display *display) 903 { 904 struct intel_encoder *encoder; 905 906 if (!HAS_DISPLAY(display)) 907 return; 908 909 for_each_intel_dp(display->drm, encoder) 910 intel_dp_dpcd_set_probe(enc_to_intel_dp(encoder), true); 911 912 WRITE_ONCE(display->hotplug.poll_enabled, false); 913 914 spin_lock_irq(&display->irq.lock); 915 queue_detection_work(display, 916 &display->hotplug.poll_init_work); 917 spin_unlock_irq(&display->irq.lock); 918 } 919 920 void intel_hpd_poll_fini(struct intel_display *display) 921 { 922 struct intel_connector *connector; 923 struct drm_connector_list_iter conn_iter; 924 925 /* Kill all the work that may have been queued by hpd. */ 926 drm_connector_list_iter_begin(display->drm, &conn_iter); 927 for_each_intel_connector_iter(connector, &conn_iter) { 928 intel_connector_cancel_modeset_retry_work(connector); 929 intel_hdcp_cancel_works(connector); 930 } 931 drm_connector_list_iter_end(&conn_iter); 932 } 933 934 void intel_hpd_init_early(struct intel_display *display) 935 { 936 INIT_DELAYED_WORK(&display->hotplug.hotplug_work, 937 i915_hotplug_work_func); 938 INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func); 939 INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work); 940 INIT_DELAYED_WORK(&display->hotplug.reenable_work, 941 intel_hpd_irq_storm_reenable_work); 942 943 display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 944 /* If we have MST support, we want to avoid doing short HPD IRQ storm 945 * detection, as short HPD storms will occur as a natural part of 946 * sideband messaging with MST. 947 * On older platforms however, IRQ storms can occur with both long and 948 * short pulses, as seen on some G4x systems. 949 */ 950 display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display); 951 } 952 953 static bool cancel_all_detection_work(struct intel_display *display) 954 { 955 bool was_pending = false; 956 957 if (cancel_delayed_work_sync(&display->hotplug.hotplug_work)) 958 was_pending = true; 959 if (cancel_work_sync(&display->hotplug.poll_init_work)) 960 was_pending = true; 961 if (cancel_delayed_work_sync(&display->hotplug.reenable_work)) 962 was_pending = true; 963 964 return was_pending; 965 } 966 967 void intel_hpd_cancel_work(struct intel_display *display) 968 { 969 if (!HAS_DISPLAY(display)) 970 return; 971 972 spin_lock_irq(&display->irq.lock); 973 974 drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display)); 975 976 display->hotplug.long_hpd_pin_mask = 0; 977 display->hotplug.short_hpd_pin_mask = 0; 978 display->hotplug.event_bits = 0; 979 display->hotplug.retry_bits = 0; 980 981 spin_unlock_irq(&display->irq.lock); 982 983 cancel_work_sync(&display->hotplug.dig_port_work); 984 985 /* 986 * All other work triggered by hotplug events should be canceled by 987 * now. 988 */ 989 if (cancel_all_detection_work(display)) 990 drm_dbg_kms(display->drm, "Hotplug detection work still active\n"); 991 } 992 993 static void queue_work_for_missed_irqs(struct intel_display *display) 994 { 995 struct intel_hotplug *hotplug = &display->hotplug; 996 bool queue_hp_work = false; 997 u32 blocked_hpd_pin_mask; 998 enum hpd_pin pin; 999 1000 lockdep_assert_held(&display->irq.lock); 1001 1002 blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); 1003 if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask) 1004 queue_hp_work = true; 1005 1006 for_each_hpd_pin(pin) { 1007 switch (display->hotplug.stats[pin].state) { 1008 case HPD_MARK_DISABLED: 1009 queue_hp_work = true; 1010 break; 1011 case HPD_DISABLED: 1012 case HPD_ENABLED: 1013 break; 1014 default: 1015 MISSING_CASE(display->hotplug.stats[pin].state); 1016 } 1017 } 1018 1019 if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask) 1020 queue_work(hotplug->dp_wq, &hotplug->dig_port_work); 1021 1022 if (queue_hp_work) 1023 queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0); 1024 } 1025 1026 static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin) 1027 { 1028 struct intel_hotplug *hotplug = &display->hotplug; 1029 1030 lockdep_assert_held(&display->irq.lock); 1031 1032 hotplug->stats[pin].blocked_count++; 1033 1034 return hotplug->stats[pin].blocked_count == 1; 1035 } 1036 1037 static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin) 1038 { 1039 struct intel_hotplug *hotplug = &display->hotplug; 1040 1041 lockdep_assert_held(&display->irq.lock); 1042 1043 if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0)) 1044 return true; 1045 1046 hotplug->stats[pin].blocked_count--; 1047 1048 return hotplug->stats[pin].blocked_count == 0; 1049 } 1050 1051 /** 1052 * intel_hpd_block - Block handling of HPD IRQs on an HPD pin 1053 * @encoder: Encoder to block the HPD handling for 1054 * 1055 * Blocks the handling of HPD IRQs on the HPD pin of @encoder. 1056 * 1057 * On return: 1058 * 1059 * - It's guaranteed that the blocked encoders' HPD pulse handler 1060 * (via intel_digital_port::hpd_pulse()) is not running. 1061 * - The hotplug event handling (via intel_encoder::hotplug()) of an 1062 * HPD IRQ pending at the time this function is called may be still 1063 * running. 1064 * - Detection on the encoder's connector (via 1065 * drm_connector_helper_funcs::detect_ctx(), 1066 * drm_connector_funcs::detect()) remains allowed, for instance as part of 1067 * userspace connector probing, or DRM core's connector polling. 1068 * 1069 * The call must be followed by calling intel_hpd_unblock(), or 1070 * intel_hpd_clear_and_unblock(). 1071 * 1072 * Note that the handling of HPD IRQs for another encoder using the same HPD 1073 * pin as that of @encoder will be also blocked. 1074 */ 1075 void intel_hpd_block(struct intel_encoder *encoder) 1076 { 1077 struct intel_display *display = to_intel_display(encoder); 1078 struct intel_hotplug *hotplug = &display->hotplug; 1079 bool do_flush = false; 1080 1081 if (encoder->hpd_pin == HPD_NONE) 1082 return; 1083 1084 spin_lock_irq(&display->irq.lock); 1085 1086 if (block_hpd_pin(display, encoder->hpd_pin)) 1087 do_flush = true; 1088 1089 spin_unlock_irq(&display->irq.lock); 1090 1091 if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin)) 1092 flush_work(&hotplug->dig_port_work); 1093 } 1094 1095 /** 1096 * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin 1097 * @encoder: Encoder to unblock the HPD handling for 1098 * 1099 * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was 1100 * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the 1101 * HPD pin while it was blocked will be handled for @encoder and for any 1102 * other encoder sharing the same HPD pin. 1103 */ 1104 void intel_hpd_unblock(struct intel_encoder *encoder) 1105 { 1106 struct intel_display *display = to_intel_display(encoder); 1107 1108 if (encoder->hpd_pin == HPD_NONE) 1109 return; 1110 1111 spin_lock_irq(&display->irq.lock); 1112 1113 if (unblock_hpd_pin(display, encoder->hpd_pin)) 1114 queue_work_for_missed_irqs(display); 1115 1116 spin_unlock_irq(&display->irq.lock); 1117 } 1118 1119 /** 1120 * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin 1121 * @encoder: Encoder to unblock the HPD handling for 1122 * 1123 * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was 1124 * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the 1125 * HPD pin while it was blocked will be cleared, handling only new IRQs. 1126 */ 1127 void intel_hpd_clear_and_unblock(struct intel_encoder *encoder) 1128 { 1129 struct intel_display *display = to_intel_display(encoder); 1130 struct intel_hotplug *hotplug = &display->hotplug; 1131 enum hpd_pin pin = encoder->hpd_pin; 1132 1133 if (pin == HPD_NONE) 1134 return; 1135 1136 spin_lock_irq(&display->irq.lock); 1137 1138 if (unblock_hpd_pin(display, pin)) { 1139 hotplug->event_bits &= ~BIT(pin); 1140 hotplug->retry_bits &= ~BIT(pin); 1141 hotplug->short_hpd_pin_mask &= ~BIT(pin); 1142 hotplug->long_hpd_pin_mask &= ~BIT(pin); 1143 } 1144 1145 spin_unlock_irq(&display->irq.lock); 1146 } 1147 1148 void intel_hpd_enable_detection_work(struct intel_display *display) 1149 { 1150 spin_lock_irq(&display->irq.lock); 1151 display->hotplug.detection_work_enabled = true; 1152 queue_work_for_missed_irqs(display); 1153 spin_unlock_irq(&display->irq.lock); 1154 } 1155 1156 void intel_hpd_disable_detection_work(struct intel_display *display) 1157 { 1158 spin_lock_irq(&display->irq.lock); 1159 display->hotplug.detection_work_enabled = false; 1160 spin_unlock_irq(&display->irq.lock); 1161 1162 cancel_all_detection_work(display); 1163 } 1164 1165 bool intel_hpd_schedule_detection(struct intel_display *display) 1166 { 1167 unsigned long flags; 1168 bool ret; 1169 1170 spin_lock_irqsave(&display->irq.lock, flags); 1171 ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0); 1172 spin_unlock_irqrestore(&display->irq.lock, flags); 1173 1174 return ret; 1175 } 1176 1177 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1178 { 1179 struct intel_display *display = m->private; 1180 struct drm_i915_private *dev_priv = to_i915(display->drm); 1181 struct intel_hotplug *hotplug = &display->hotplug; 1182 1183 /* Synchronize with everything first in case there's been an HPD 1184 * storm, but we haven't finished handling it in the kernel yet 1185 */ 1186 intel_synchronize_irq(dev_priv); 1187 flush_work(&display->hotplug.dig_port_work); 1188 flush_delayed_work(&display->hotplug.hotplug_work); 1189 1190 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1191 seq_printf(m, "Detected: %s\n", 1192 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); 1193 1194 return 0; 1195 } 1196 1197 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1198 const char __user *ubuf, size_t len, 1199 loff_t *offp) 1200 { 1201 struct seq_file *m = file->private_data; 1202 struct intel_display *display = m->private; 1203 struct intel_hotplug *hotplug = &display->hotplug; 1204 unsigned int new_threshold; 1205 int i; 1206 char *newline; 1207 char tmp[16]; 1208 1209 if (len >= sizeof(tmp)) 1210 return -EINVAL; 1211 1212 if (copy_from_user(tmp, ubuf, len)) 1213 return -EFAULT; 1214 1215 tmp[len] = '\0'; 1216 1217 /* Strip newline, if any */ 1218 newline = strchr(tmp, '\n'); 1219 if (newline) 1220 *newline = '\0'; 1221 1222 if (strcmp(tmp, "reset") == 0) 1223 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1224 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1225 return -EINVAL; 1226 1227 if (new_threshold > 0) 1228 drm_dbg_kms(display->drm, 1229 "Setting HPD storm detection threshold to %d\n", 1230 new_threshold); 1231 else 1232 drm_dbg_kms(display->drm, "Disabling HPD storm detection\n"); 1233 1234 spin_lock_irq(&display->irq.lock); 1235 hotplug->hpd_storm_threshold = new_threshold; 1236 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1237 for_each_hpd_pin(i) 1238 hotplug->stats[i].count = 0; 1239 spin_unlock_irq(&display->irq.lock); 1240 1241 /* Re-enable hpd immediately if we were in an irq storm */ 1242 flush_delayed_work(&display->hotplug.reenable_work); 1243 1244 return len; 1245 } 1246 1247 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1248 { 1249 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1250 } 1251 1252 static const struct file_operations i915_hpd_storm_ctl_fops = { 1253 .owner = THIS_MODULE, 1254 .open = i915_hpd_storm_ctl_open, 1255 .read = seq_read, 1256 .llseek = seq_lseek, 1257 .release = single_release, 1258 .write = i915_hpd_storm_ctl_write 1259 }; 1260 1261 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1262 { 1263 struct intel_display *display = m->private; 1264 1265 seq_printf(m, "Enabled: %s\n", 1266 str_yes_no(display->hotplug.hpd_short_storm_enabled)); 1267 1268 return 0; 1269 } 1270 1271 static int 1272 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1273 { 1274 return single_open(file, i915_hpd_short_storm_ctl_show, 1275 inode->i_private); 1276 } 1277 1278 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1279 const char __user *ubuf, 1280 size_t len, loff_t *offp) 1281 { 1282 struct seq_file *m = file->private_data; 1283 struct intel_display *display = m->private; 1284 struct intel_hotplug *hotplug = &display->hotplug; 1285 char *newline; 1286 char tmp[16]; 1287 int i; 1288 bool new_state; 1289 1290 if (len >= sizeof(tmp)) 1291 return -EINVAL; 1292 1293 if (copy_from_user(tmp, ubuf, len)) 1294 return -EFAULT; 1295 1296 tmp[len] = '\0'; 1297 1298 /* Strip newline, if any */ 1299 newline = strchr(tmp, '\n'); 1300 if (newline) 1301 *newline = '\0'; 1302 1303 /* Reset to the "default" state for this system */ 1304 if (strcmp(tmp, "reset") == 0) 1305 new_state = !HAS_DP_MST(display); 1306 else if (kstrtobool(tmp, &new_state) != 0) 1307 return -EINVAL; 1308 1309 drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n", 1310 new_state ? "En" : "Dis"); 1311 1312 spin_lock_irq(&display->irq.lock); 1313 hotplug->hpd_short_storm_enabled = new_state; 1314 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1315 for_each_hpd_pin(i) 1316 hotplug->stats[i].count = 0; 1317 spin_unlock_irq(&display->irq.lock); 1318 1319 /* Re-enable hpd immediately if we were in an irq storm */ 1320 flush_delayed_work(&display->hotplug.reenable_work); 1321 1322 return len; 1323 } 1324 1325 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1326 .owner = THIS_MODULE, 1327 .open = i915_hpd_short_storm_ctl_open, 1328 .read = seq_read, 1329 .llseek = seq_lseek, 1330 .release = single_release, 1331 .write = i915_hpd_short_storm_ctl_write, 1332 }; 1333 1334 void intel_hpd_debugfs_register(struct intel_display *display) 1335 { 1336 struct drm_minor *minor = display->drm->primary; 1337 1338 debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, 1339 display, &i915_hpd_storm_ctl_fops); 1340 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, 1341 display, &i915_hpd_short_storm_ctl_fops); 1342 debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, 1343 &display->hotplug.ignore_long_hpd); 1344 } 1345