1 /* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/kernel.h> 25 26 #include "i915_drv.h" 27 #include "i915_irq.h" 28 #include "intel_display_power.h" 29 #include "intel_display_types.h" 30 #include "intel_hotplug.h" 31 #include "intel_hotplug_irq.h" 32 33 /** 34 * DOC: Hotplug 35 * 36 * Simply put, hotplug occurs when a display is connected to or disconnected 37 * from the system. However, there may be adapters and docking stations and 38 * Display Port short pulses and MST devices involved, complicating matters. 39 * 40 * Hotplug in i915 is handled in many different levels of abstraction. 41 * 42 * The platform dependent interrupt handling code in i915_irq.c enables, 43 * disables, and does preliminary handling of the interrupts. The interrupt 44 * handlers gather the hotplug detect (HPD) information from relevant registers 45 * into a platform independent mask of hotplug pins that have fired. 46 * 47 * The platform independent interrupt handler intel_hpd_irq_handler() in 48 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes 49 * further processing to appropriate bottom halves (Display Port specific and 50 * regular hotplug). 51 * 52 * The Display Port work function i915_digport_work_func() calls into 53 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long 54 * pulses, with failures and non-MST long pulses triggering regular hotplug 55 * processing on the connector. 56 * 57 * The regular hotplug work function i915_hotplug_work_func() calls connector 58 * detect hooks, and, if connector status changes, triggers sending of hotplug 59 * uevent to userspace via drm_kms_helper_hotplug_event(). 60 * 61 * Finally, the userspace is responsible for triggering a modeset upon receiving 62 * the hotplug uevent, disabling or enabling the crtc as needed. 63 * 64 * The hotplug interrupt storm detection and mitigation code keeps track of the 65 * number of interrupts per hotplug pin per a period of time, and if the number 66 * of interrupts exceeds a certain threshold, the interrupt is disabled for a 67 * while before being re-enabled. The intention is to mitigate issues raising 68 * from broken hardware triggering massive amounts of interrupts and grinding 69 * the system to a halt. 70 * 71 * Current implementation expects that hotplug interrupt storm will not be 72 * seen when display port sink is connected, hence on platforms whose DP 73 * callback is handled by i915_digport_work_func reenabling of hpd is not 74 * performed (it was never expected to be disabled in the first place ;) ) 75 * this is specific to DP sinks handled by this routine and any other display 76 * such as HDMI or DVI enabled on the same port will have proper logic since 77 * it will use i915_hotplug_work_func where this logic is handled. 78 */ 79 80 /** 81 * intel_hpd_pin_default - return default pin associated with certain port. 82 * @dev_priv: private driver data pointer 83 * @port: the hpd port to get associated pin 84 * 85 * It is only valid and used by digital port encoder. 86 * 87 * Return pin that is associatade with @port. 88 */ 89 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, 90 enum port port) 91 { 92 return HPD_PORT_A + port - PORT_A; 93 } 94 95 /* Threshold == 5 for long IRQs, 50 for short */ 96 #define HPD_STORM_DEFAULT_THRESHOLD 50 97 98 #define HPD_STORM_DETECT_PERIOD 1000 99 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 100 #define HPD_RETRY_DELAY 1000 101 102 static enum hpd_pin 103 intel_connector_hpd_pin(struct intel_connector *connector) 104 { 105 struct intel_encoder *encoder = intel_attached_encoder(connector); 106 107 /* 108 * MST connectors get their encoder attached dynamically 109 * so need to make sure we have an encoder here. But since 110 * MST encoders have their hpd_pin set to HPD_NONE we don't 111 * have to special case them beyond that. 112 */ 113 return encoder ? encoder->hpd_pin : HPD_NONE; 114 } 115 116 /** 117 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin 118 * @dev_priv: private driver data pointer 119 * @pin: the pin to gather stats on 120 * @long_hpd: whether the HPD IRQ was long or short 121 * 122 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ 123 * storms. Only the pin specific stats and state are changed, the caller is 124 * responsible for further action. 125 * 126 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is 127 * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to 128 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and 129 * short IRQs count as +1. If this threshold is exceeded, it's considered an 130 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. 131 * 132 * By default, most systems will only count long IRQs towards 133 * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also 134 * suffer from short IRQ storms and must also track these. Because short IRQ 135 * storms are naturally caused by sideband interactions with DP MST devices, 136 * short IRQ detection is only enabled for systems without DP MST support. 137 * Systems which are new enough to support DP MST are far less likely to 138 * suffer from IRQ storms at all, so this is fine. 139 * 140 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 141 * and should only be adjusted for automated hotplug testing. 142 * 143 * Return true if an IRQ storm was detected on @pin. 144 */ 145 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, 146 enum hpd_pin pin, bool long_hpd) 147 { 148 struct intel_hotplug *hpd = &dev_priv->display.hotplug; 149 unsigned long start = hpd->stats[pin].last_jiffies; 150 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 151 const int increment = long_hpd ? 10 : 1; 152 const int threshold = hpd->hpd_storm_threshold; 153 bool storm = false; 154 155 if (!threshold || 156 (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) 157 return false; 158 159 if (!time_in_range(jiffies, start, end)) { 160 hpd->stats[pin].last_jiffies = jiffies; 161 hpd->stats[pin].count = 0; 162 } 163 164 hpd->stats[pin].count += increment; 165 if (hpd->stats[pin].count > threshold) { 166 hpd->stats[pin].state = HPD_MARK_DISABLED; 167 drm_dbg_kms(&dev_priv->drm, 168 "HPD interrupt storm detected on PIN %d\n", pin); 169 storm = true; 170 } else { 171 drm_dbg_kms(&dev_priv->drm, 172 "Received HPD interrupt on PIN %d - cnt: %d\n", 173 pin, 174 hpd->stats[pin].count); 175 } 176 177 return storm; 178 } 179 180 static bool detection_work_enabled(struct drm_i915_private *i915) 181 { 182 lockdep_assert_held(&i915->irq_lock); 183 184 return i915->display.hotplug.detection_work_enabled; 185 } 186 187 static bool 188 mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) 189 { 190 lockdep_assert_held(&i915->irq_lock); 191 192 if (!detection_work_enabled(i915)) 193 return false; 194 195 return mod_delayed_work(i915->unordered_wq, work, delay); 196 } 197 198 static bool 199 queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) 200 { 201 lockdep_assert_held(&i915->irq_lock); 202 203 if (!detection_work_enabled(i915)) 204 return false; 205 206 return queue_delayed_work(i915->unordered_wq, work, delay); 207 } 208 209 static bool 210 queue_detection_work(struct drm_i915_private *i915, struct work_struct *work) 211 { 212 lockdep_assert_held(&i915->irq_lock); 213 214 if (!detection_work_enabled(i915)) 215 return false; 216 217 return queue_work(i915->unordered_wq, work); 218 } 219 220 static void 221 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) 222 { 223 struct drm_connector_list_iter conn_iter; 224 struct intel_connector *connector; 225 bool hpd_disabled = false; 226 227 lockdep_assert_held(&dev_priv->irq_lock); 228 229 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 230 for_each_intel_connector_iter(connector, &conn_iter) { 231 enum hpd_pin pin; 232 233 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) 234 continue; 235 236 pin = intel_connector_hpd_pin(connector); 237 if (pin == HPD_NONE || 238 dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) 239 continue; 240 241 drm_info(&dev_priv->drm, 242 "HPD interrupt storm detected on connector %s: " 243 "switching from hotplug detection to polling\n", 244 connector->base.name); 245 246 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 247 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 248 DRM_CONNECTOR_POLL_DISCONNECT; 249 hpd_disabled = true; 250 } 251 drm_connector_list_iter_end(&conn_iter); 252 253 /* Enable polling and queue hotplug re-enabling. */ 254 if (hpd_disabled) { 255 drm_kms_helper_poll_reschedule(&dev_priv->drm); 256 mod_delayed_detection_work(dev_priv, 257 &dev_priv->display.hotplug.reenable_work, 258 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 259 } 260 } 261 262 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) 263 { 264 struct drm_i915_private *dev_priv = 265 container_of(work, typeof(*dev_priv), 266 display.hotplug.reenable_work.work); 267 struct drm_connector_list_iter conn_iter; 268 struct intel_connector *connector; 269 intel_wakeref_t wakeref; 270 enum hpd_pin pin; 271 272 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 273 274 spin_lock_irq(&dev_priv->irq_lock); 275 276 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 277 for_each_intel_connector_iter(connector, &conn_iter) { 278 pin = intel_connector_hpd_pin(connector); 279 if (pin == HPD_NONE || 280 dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) 281 continue; 282 283 if (connector->base.polled != connector->polled) 284 drm_dbg(&dev_priv->drm, 285 "Reenabling HPD on connector %s\n", 286 connector->base.name); 287 connector->base.polled = connector->polled; 288 } 289 drm_connector_list_iter_end(&conn_iter); 290 291 for_each_hpd_pin(pin) { 292 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 293 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 294 } 295 296 intel_hpd_irq_setup(dev_priv); 297 298 spin_unlock_irq(&dev_priv->irq_lock); 299 300 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 301 } 302 303 static enum intel_hotplug_state 304 intel_hotplug_detect_connector(struct intel_connector *connector) 305 { 306 struct drm_device *dev = connector->base.dev; 307 enum drm_connector_status old_status; 308 u64 old_epoch_counter; 309 int status; 310 bool ret = false; 311 312 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); 313 old_status = connector->base.status; 314 old_epoch_counter = connector->base.epoch_counter; 315 316 status = drm_helper_probe_detect(&connector->base, NULL, false); 317 if (!connector->base.force) 318 connector->base.status = status; 319 320 if (old_epoch_counter != connector->base.epoch_counter) 321 ret = true; 322 323 if (ret) { 324 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", 325 connector->base.base.id, 326 connector->base.name, 327 drm_get_connector_status_name(old_status), 328 drm_get_connector_status_name(connector->base.status), 329 old_epoch_counter, 330 connector->base.epoch_counter); 331 return INTEL_HOTPLUG_CHANGED; 332 } 333 return INTEL_HOTPLUG_UNCHANGED; 334 } 335 336 enum intel_hotplug_state 337 intel_encoder_hotplug(struct intel_encoder *encoder, 338 struct intel_connector *connector) 339 { 340 return intel_hotplug_detect_connector(connector); 341 } 342 343 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) 344 { 345 return intel_encoder_is_dig_port(encoder) && 346 enc_to_dig_port(encoder)->hpd_pulse != NULL; 347 } 348 349 static void i915_digport_work_func(struct work_struct *work) 350 { 351 struct drm_i915_private *dev_priv = 352 container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); 353 u32 long_port_mask, short_port_mask; 354 struct intel_encoder *encoder; 355 u32 old_bits = 0; 356 357 spin_lock_irq(&dev_priv->irq_lock); 358 long_port_mask = dev_priv->display.hotplug.long_port_mask; 359 dev_priv->display.hotplug.long_port_mask = 0; 360 short_port_mask = dev_priv->display.hotplug.short_port_mask; 361 dev_priv->display.hotplug.short_port_mask = 0; 362 spin_unlock_irq(&dev_priv->irq_lock); 363 364 for_each_intel_encoder(&dev_priv->drm, encoder) { 365 struct intel_digital_port *dig_port; 366 enum port port = encoder->port; 367 bool long_hpd, short_hpd; 368 enum irqreturn ret; 369 370 if (!intel_encoder_has_hpd_pulse(encoder)) 371 continue; 372 373 long_hpd = long_port_mask & BIT(port); 374 short_hpd = short_port_mask & BIT(port); 375 376 if (!long_hpd && !short_hpd) 377 continue; 378 379 dig_port = enc_to_dig_port(encoder); 380 381 ret = dig_port->hpd_pulse(dig_port, long_hpd); 382 if (ret == IRQ_NONE) { 383 /* fall back to old school hpd */ 384 old_bits |= BIT(encoder->hpd_pin); 385 } 386 } 387 388 if (old_bits) { 389 spin_lock_irq(&dev_priv->irq_lock); 390 dev_priv->display.hotplug.event_bits |= old_bits; 391 queue_delayed_detection_work(dev_priv, 392 &dev_priv->display.hotplug.hotplug_work, 0); 393 spin_unlock_irq(&dev_priv->irq_lock); 394 } 395 } 396 397 /** 398 * intel_hpd_trigger_irq - trigger an hpd irq event for a port 399 * @dig_port: digital port 400 * 401 * Trigger an HPD interrupt event for the given port, emulating a short pulse 402 * generated by the sink, and schedule the dig port work to handle it. 403 */ 404 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) 405 { 406 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 407 408 spin_lock_irq(&i915->irq_lock); 409 i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); 410 spin_unlock_irq(&i915->irq_lock); 411 412 queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); 413 } 414 415 /* 416 * Handle hotplug events outside the interrupt handler proper. 417 */ 418 static void i915_hotplug_work_func(struct work_struct *work) 419 { 420 struct drm_i915_private *dev_priv = 421 container_of(work, struct drm_i915_private, 422 display.hotplug.hotplug_work.work); 423 struct drm_connector_list_iter conn_iter; 424 struct intel_connector *connector; 425 u32 changed = 0, retry = 0; 426 u32 hpd_event_bits; 427 u32 hpd_retry_bits; 428 struct drm_connector *first_changed_connector = NULL; 429 int changed_connectors = 0; 430 431 mutex_lock(&dev_priv->drm.mode_config.mutex); 432 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); 433 434 spin_lock_irq(&dev_priv->irq_lock); 435 436 hpd_event_bits = dev_priv->display.hotplug.event_bits; 437 dev_priv->display.hotplug.event_bits = 0; 438 hpd_retry_bits = dev_priv->display.hotplug.retry_bits; 439 dev_priv->display.hotplug.retry_bits = 0; 440 441 /* Enable polling for connectors which had HPD IRQ storms */ 442 intel_hpd_irq_storm_switch_to_polling(dev_priv); 443 444 spin_unlock_irq(&dev_priv->irq_lock); 445 446 /* Skip calling encode hotplug handlers if ignore long HPD set*/ 447 if (dev_priv->display.hotplug.ignore_long_hpd) { 448 drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); 449 mutex_unlock(&dev_priv->drm.mode_config.mutex); 450 return; 451 } 452 453 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 454 for_each_intel_connector_iter(connector, &conn_iter) { 455 enum hpd_pin pin; 456 u32 hpd_bit; 457 458 pin = intel_connector_hpd_pin(connector); 459 if (pin == HPD_NONE) 460 continue; 461 462 hpd_bit = BIT(pin); 463 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { 464 struct intel_encoder *encoder = 465 intel_attached_encoder(connector); 466 467 if (hpd_event_bits & hpd_bit) 468 connector->hotplug_retries = 0; 469 else 470 connector->hotplug_retries++; 471 472 drm_dbg_kms(&dev_priv->drm, 473 "Connector %s (pin %i) received hotplug event. (retry %d)\n", 474 connector->base.name, pin, 475 connector->hotplug_retries); 476 477 switch (encoder->hotplug(encoder, connector)) { 478 case INTEL_HOTPLUG_UNCHANGED: 479 break; 480 case INTEL_HOTPLUG_CHANGED: 481 changed |= hpd_bit; 482 changed_connectors++; 483 if (!first_changed_connector) { 484 drm_connector_get(&connector->base); 485 first_changed_connector = &connector->base; 486 } 487 break; 488 case INTEL_HOTPLUG_RETRY: 489 retry |= hpd_bit; 490 break; 491 } 492 } 493 } 494 drm_connector_list_iter_end(&conn_iter); 495 mutex_unlock(&dev_priv->drm.mode_config.mutex); 496 497 if (changed_connectors == 1) 498 drm_kms_helper_connector_hotplug_event(first_changed_connector); 499 else if (changed_connectors > 0) 500 drm_kms_helper_hotplug_event(&dev_priv->drm); 501 502 if (first_changed_connector) 503 drm_connector_put(first_changed_connector); 504 505 /* Remove shared HPD pins that have changed */ 506 retry &= ~changed; 507 if (retry) { 508 spin_lock_irq(&dev_priv->irq_lock); 509 dev_priv->display.hotplug.retry_bits |= retry; 510 511 mod_delayed_detection_work(dev_priv, 512 &dev_priv->display.hotplug.hotplug_work, 513 msecs_to_jiffies(HPD_RETRY_DELAY)); 514 spin_unlock_irq(&dev_priv->irq_lock); 515 } 516 } 517 518 519 /** 520 * intel_hpd_irq_handler - main hotplug irq handler 521 * @dev_priv: drm_i915_private 522 * @pin_mask: a mask of hpd pins that have triggered the irq 523 * @long_mask: a mask of hpd pins that may be long hpd pulses 524 * 525 * This is the main hotplug irq handler for all platforms. The platform specific 526 * irq handlers call the platform specific hotplug irq handlers, which read and 527 * decode the appropriate registers into bitmasks about hpd pins that have 528 * triggered (@pin_mask), and which of those pins may be long pulses 529 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin 530 * is not a digital port. 531 * 532 * Here, we do hotplug irq storm detection and mitigation, and pass further 533 * processing to appropriate bottom halves. 534 */ 535 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 536 u32 pin_mask, u32 long_mask) 537 { 538 struct intel_encoder *encoder; 539 bool storm_detected = false; 540 bool queue_dig = false, queue_hp = false; 541 u32 long_hpd_pulse_mask = 0; 542 u32 short_hpd_pulse_mask = 0; 543 enum hpd_pin pin; 544 545 if (!pin_mask) 546 return; 547 548 spin_lock(&dev_priv->irq_lock); 549 550 /* 551 * Determine whether ->hpd_pulse() exists for each pin, and 552 * whether we have a short or a long pulse. This is needed 553 * as each pin may have up to two encoders (HDMI and DP) and 554 * only the one of them (DP) will have ->hpd_pulse(). 555 */ 556 for_each_intel_encoder(&dev_priv->drm, encoder) { 557 enum port port = encoder->port; 558 bool long_hpd; 559 560 pin = encoder->hpd_pin; 561 if (!(BIT(pin) & pin_mask)) 562 continue; 563 564 if (!intel_encoder_has_hpd_pulse(encoder)) 565 continue; 566 567 long_hpd = long_mask & BIT(pin); 568 569 drm_dbg(&dev_priv->drm, 570 "digital hpd on [ENCODER:%d:%s] - %s\n", 571 encoder->base.base.id, encoder->base.name, 572 long_hpd ? "long" : "short"); 573 queue_dig = true; 574 575 if (long_hpd) { 576 long_hpd_pulse_mask |= BIT(pin); 577 dev_priv->display.hotplug.long_port_mask |= BIT(port); 578 } else { 579 short_hpd_pulse_mask |= BIT(pin); 580 dev_priv->display.hotplug.short_port_mask |= BIT(port); 581 } 582 } 583 584 /* Now process each pin just once */ 585 for_each_hpd_pin(pin) { 586 bool long_hpd; 587 588 if (!(BIT(pin) & pin_mask)) 589 continue; 590 591 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { 592 /* 593 * On GMCH platforms the interrupt mask bits only 594 * prevent irq generation, not the setting of the 595 * hotplug bits itself. So only WARN about unexpected 596 * interrupts on saner platforms. 597 */ 598 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), 599 "Received HPD interrupt on pin %d although disabled\n", 600 pin); 601 continue; 602 } 603 604 if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) 605 continue; 606 607 /* 608 * Delegate to ->hpd_pulse() if one of the encoders for this 609 * pin has it, otherwise let the hotplug_work deal with this 610 * pin directly. 611 */ 612 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { 613 long_hpd = long_hpd_pulse_mask & BIT(pin); 614 } else { 615 dev_priv->display.hotplug.event_bits |= BIT(pin); 616 long_hpd = true; 617 queue_hp = true; 618 } 619 620 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { 621 dev_priv->display.hotplug.event_bits &= ~BIT(pin); 622 storm_detected = true; 623 queue_hp = true; 624 } 625 } 626 627 /* 628 * Disable any IRQs that storms were detected on. Polling enablement 629 * happens later in our hotplug work. 630 */ 631 if (storm_detected) 632 intel_hpd_irq_setup(dev_priv); 633 634 /* 635 * Our hotplug handler can grab modeset locks (by calling down into the 636 * fb helpers). Hence it must not be run on our own dev-priv->wq work 637 * queue for otherwise the flush_work in the pageflip code will 638 * deadlock. 639 */ 640 if (queue_dig) 641 queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); 642 if (queue_hp) 643 queue_delayed_detection_work(dev_priv, 644 &dev_priv->display.hotplug.hotplug_work, 0); 645 646 spin_unlock(&dev_priv->irq_lock); 647 } 648 649 /** 650 * intel_hpd_init - initializes and enables hpd support 651 * @dev_priv: i915 device instance 652 * 653 * This function enables the hotplug support. It requires that interrupts have 654 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 655 * poll request can run concurrently to other code, so locking rules must be 656 * obeyed. 657 * 658 * This is a separate step from interrupt enabling to simplify the locking rules 659 * in the driver load and resume code. 660 * 661 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). 662 */ 663 void intel_hpd_init(struct drm_i915_private *dev_priv) 664 { 665 int i; 666 667 if (!HAS_DISPLAY(dev_priv)) 668 return; 669 670 for_each_hpd_pin(i) { 671 dev_priv->display.hotplug.stats[i].count = 0; 672 dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; 673 } 674 675 /* 676 * Interrupt setup is already guaranteed to be single-threaded, this is 677 * just to make the assert_spin_locked checks happy. 678 */ 679 spin_lock_irq(&dev_priv->irq_lock); 680 intel_hpd_irq_setup(dev_priv); 681 spin_unlock_irq(&dev_priv->irq_lock); 682 } 683 684 static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915) 685 { 686 struct drm_connector_list_iter conn_iter; 687 struct intel_connector *connector; 688 struct intel_connector *first_changed_connector = NULL; 689 int changed = 0; 690 691 mutex_lock(&i915->drm.mode_config.mutex); 692 693 if (!i915->drm.mode_config.poll_enabled) 694 goto out; 695 696 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 697 for_each_intel_connector_iter(connector, &conn_iter) { 698 if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD)) 699 continue; 700 701 if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED) 702 continue; 703 704 changed++; 705 706 if (changed == 1) { 707 drm_connector_get(&connector->base); 708 first_changed_connector = connector; 709 } 710 } 711 drm_connector_list_iter_end(&conn_iter); 712 713 out: 714 mutex_unlock(&i915->drm.mode_config.mutex); 715 716 if (!changed) 717 return; 718 719 if (changed == 1) 720 drm_kms_helper_connector_hotplug_event(&first_changed_connector->base); 721 else 722 drm_kms_helper_hotplug_event(&i915->drm); 723 724 drm_connector_put(&first_changed_connector->base); 725 } 726 727 static void i915_hpd_poll_init_work(struct work_struct *work) 728 { 729 struct drm_i915_private *dev_priv = 730 container_of(work, struct drm_i915_private, 731 display.hotplug.poll_init_work); 732 struct drm_connector_list_iter conn_iter; 733 struct intel_connector *connector; 734 intel_wakeref_t wakeref; 735 bool enabled; 736 737 mutex_lock(&dev_priv->drm.mode_config.mutex); 738 739 enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); 740 /* 741 * Prevent taking a power reference from this sequence of 742 * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() -> 743 * connector detect which would requeue i915_hpd_poll_init_work() 744 * and so risk an endless loop of this same sequence. 745 */ 746 if (!enabled) { 747 wakeref = intel_display_power_get(dev_priv, 748 POWER_DOMAIN_DISPLAY_CORE); 749 drm_WARN_ON(&dev_priv->drm, 750 READ_ONCE(dev_priv->display.hotplug.poll_enabled)); 751 cancel_work(&dev_priv->display.hotplug.poll_init_work); 752 } 753 754 spin_lock_irq(&dev_priv->irq_lock); 755 756 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 757 for_each_intel_connector_iter(connector, &conn_iter) { 758 enum hpd_pin pin; 759 760 pin = intel_connector_hpd_pin(connector); 761 if (pin == HPD_NONE) 762 continue; 763 764 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 765 continue; 766 767 connector->base.polled = connector->polled; 768 769 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) 770 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 771 DRM_CONNECTOR_POLL_DISCONNECT; 772 } 773 drm_connector_list_iter_end(&conn_iter); 774 775 spin_unlock_irq(&dev_priv->irq_lock); 776 777 if (enabled) 778 drm_kms_helper_poll_reschedule(&dev_priv->drm); 779 780 mutex_unlock(&dev_priv->drm.mode_config.mutex); 781 782 /* 783 * We might have missed any hotplugs that happened while we were 784 * in the middle of disabling polling 785 */ 786 if (!enabled) { 787 i915_hpd_poll_detect_connectors(dev_priv); 788 789 intel_display_power_put(dev_priv, 790 POWER_DOMAIN_DISPLAY_CORE, 791 wakeref); 792 } 793 } 794 795 /** 796 * intel_hpd_poll_enable - enable polling for connectors with hpd 797 * @dev_priv: i915 device instance 798 * 799 * This function enables polling for all connectors which support HPD. 800 * Under certain conditions HPD may not be functional. On most Intel GPUs, 801 * this happens when we enter runtime suspend. 802 * On Valleyview and Cherryview systems, this also happens when we shut off all 803 * of the powerwells. 804 * 805 * Since this function can get called in contexts where we're already holding 806 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 807 * worker. 808 * 809 * Also see: intel_hpd_init() and intel_hpd_poll_disable(). 810 */ 811 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) 812 { 813 if (!HAS_DISPLAY(dev_priv) || 814 !intel_display_device_enabled(dev_priv)) 815 return; 816 817 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); 818 819 /* 820 * We might already be holding dev->mode_config.mutex, so do this in a 821 * seperate worker 822 * As well, there's no issue if we race here since we always reschedule 823 * this worker anyway 824 */ 825 spin_lock_irq(&dev_priv->irq_lock); 826 queue_detection_work(dev_priv, 827 &dev_priv->display.hotplug.poll_init_work); 828 spin_unlock_irq(&dev_priv->irq_lock); 829 } 830 831 /** 832 * intel_hpd_poll_disable - disable polling for connectors with hpd 833 * @dev_priv: i915 device instance 834 * 835 * This function disables polling for all connectors which support HPD. 836 * Under certain conditions HPD may not be functional. On most Intel GPUs, 837 * this happens when we enter runtime suspend. 838 * On Valleyview and Cherryview systems, this also happens when we shut off all 839 * of the powerwells. 840 * 841 * Since this function can get called in contexts where we're already holding 842 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 843 * worker. 844 * 845 * Also used during driver init to initialize connector->polled 846 * appropriately for all connectors. 847 * 848 * Also see: intel_hpd_init() and intel_hpd_poll_enable(). 849 */ 850 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) 851 { 852 if (!HAS_DISPLAY(dev_priv)) 853 return; 854 855 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); 856 857 spin_lock_irq(&dev_priv->irq_lock); 858 queue_detection_work(dev_priv, 859 &dev_priv->display.hotplug.poll_init_work); 860 spin_unlock_irq(&dev_priv->irq_lock); 861 } 862 863 void intel_hpd_init_early(struct drm_i915_private *i915) 864 { 865 INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, 866 i915_hotplug_work_func); 867 INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); 868 INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); 869 INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, 870 intel_hpd_irq_storm_reenable_work); 871 872 i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 873 /* If we have MST support, we want to avoid doing short HPD IRQ storm 874 * detection, as short HPD storms will occur as a natural part of 875 * sideband messaging with MST. 876 * On older platforms however, IRQ storms can occur with both long and 877 * short pulses, as seen on some G4x systems. 878 */ 879 i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); 880 } 881 882 static bool cancel_all_detection_work(struct drm_i915_private *i915) 883 { 884 bool was_pending = false; 885 886 if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work)) 887 was_pending = true; 888 if (cancel_work_sync(&i915->display.hotplug.poll_init_work)) 889 was_pending = true; 890 if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work)) 891 was_pending = true; 892 893 return was_pending; 894 } 895 896 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) 897 { 898 if (!HAS_DISPLAY(dev_priv)) 899 return; 900 901 spin_lock_irq(&dev_priv->irq_lock); 902 903 dev_priv->display.hotplug.long_port_mask = 0; 904 dev_priv->display.hotplug.short_port_mask = 0; 905 dev_priv->display.hotplug.event_bits = 0; 906 dev_priv->display.hotplug.retry_bits = 0; 907 908 spin_unlock_irq(&dev_priv->irq_lock); 909 910 cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); 911 912 /* 913 * All other work triggered by hotplug events should be canceled by 914 * now. 915 */ 916 if (cancel_all_detection_work(dev_priv)) 917 drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n"); 918 } 919 920 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 921 { 922 bool ret = false; 923 924 if (pin == HPD_NONE) 925 return false; 926 927 spin_lock_irq(&dev_priv->irq_lock); 928 if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { 929 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 930 ret = true; 931 } 932 spin_unlock_irq(&dev_priv->irq_lock); 933 934 return ret; 935 } 936 937 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 938 { 939 if (pin == HPD_NONE) 940 return; 941 942 spin_lock_irq(&dev_priv->irq_lock); 943 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 944 spin_unlock_irq(&dev_priv->irq_lock); 945 } 946 947 static void queue_work_for_missed_irqs(struct drm_i915_private *i915) 948 { 949 bool queue_work = false; 950 enum hpd_pin pin; 951 952 lockdep_assert_held(&i915->irq_lock); 953 954 if (i915->display.hotplug.event_bits || 955 i915->display.hotplug.retry_bits) 956 queue_work = true; 957 958 for_each_hpd_pin(pin) { 959 switch (i915->display.hotplug.stats[pin].state) { 960 case HPD_MARK_DISABLED: 961 queue_work = true; 962 break; 963 case HPD_ENABLED: 964 break; 965 default: 966 MISSING_CASE(i915->display.hotplug.stats[pin].state); 967 } 968 } 969 970 if (queue_work) 971 queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); 972 } 973 974 void intel_hpd_enable_detection_work(struct drm_i915_private *i915) 975 { 976 spin_lock_irq(&i915->irq_lock); 977 i915->display.hotplug.detection_work_enabled = true; 978 queue_work_for_missed_irqs(i915); 979 spin_unlock_irq(&i915->irq_lock); 980 } 981 982 void intel_hpd_disable_detection_work(struct drm_i915_private *i915) 983 { 984 spin_lock_irq(&i915->irq_lock); 985 i915->display.hotplug.detection_work_enabled = false; 986 spin_unlock_irq(&i915->irq_lock); 987 988 cancel_all_detection_work(i915); 989 } 990 991 bool intel_hpd_schedule_detection(struct drm_i915_private *i915) 992 { 993 unsigned long flags; 994 bool ret; 995 996 spin_lock_irqsave(&i915->irq_lock, flags); 997 ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); 998 spin_unlock_irqrestore(&i915->irq_lock, flags); 999 1000 return ret; 1001 } 1002 1003 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1004 { 1005 struct drm_i915_private *dev_priv = m->private; 1006 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1007 1008 /* Synchronize with everything first in case there's been an HPD 1009 * storm, but we haven't finished handling it in the kernel yet 1010 */ 1011 intel_synchronize_irq(dev_priv); 1012 flush_work(&dev_priv->display.hotplug.dig_port_work); 1013 flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); 1014 1015 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1016 seq_printf(m, "Detected: %s\n", 1017 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); 1018 1019 return 0; 1020 } 1021 1022 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1023 const char __user *ubuf, size_t len, 1024 loff_t *offp) 1025 { 1026 struct seq_file *m = file->private_data; 1027 struct drm_i915_private *dev_priv = m->private; 1028 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1029 unsigned int new_threshold; 1030 int i; 1031 char *newline; 1032 char tmp[16]; 1033 1034 if (len >= sizeof(tmp)) 1035 return -EINVAL; 1036 1037 if (copy_from_user(tmp, ubuf, len)) 1038 return -EFAULT; 1039 1040 tmp[len] = '\0'; 1041 1042 /* Strip newline, if any */ 1043 newline = strchr(tmp, '\n'); 1044 if (newline) 1045 *newline = '\0'; 1046 1047 if (strcmp(tmp, "reset") == 0) 1048 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1049 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1050 return -EINVAL; 1051 1052 if (new_threshold > 0) 1053 drm_dbg_kms(&dev_priv->drm, 1054 "Setting HPD storm detection threshold to %d\n", 1055 new_threshold); 1056 else 1057 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1058 1059 spin_lock_irq(&dev_priv->irq_lock); 1060 hotplug->hpd_storm_threshold = new_threshold; 1061 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1062 for_each_hpd_pin(i) 1063 hotplug->stats[i].count = 0; 1064 spin_unlock_irq(&dev_priv->irq_lock); 1065 1066 /* Re-enable hpd immediately if we were in an irq storm */ 1067 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 1068 1069 return len; 1070 } 1071 1072 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1073 { 1074 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1075 } 1076 1077 static const struct file_operations i915_hpd_storm_ctl_fops = { 1078 .owner = THIS_MODULE, 1079 .open = i915_hpd_storm_ctl_open, 1080 .read = seq_read, 1081 .llseek = seq_lseek, 1082 .release = single_release, 1083 .write = i915_hpd_storm_ctl_write 1084 }; 1085 1086 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1087 { 1088 struct drm_i915_private *dev_priv = m->private; 1089 1090 seq_printf(m, "Enabled: %s\n", 1091 str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); 1092 1093 return 0; 1094 } 1095 1096 static int 1097 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1098 { 1099 return single_open(file, i915_hpd_short_storm_ctl_show, 1100 inode->i_private); 1101 } 1102 1103 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1104 const char __user *ubuf, 1105 size_t len, loff_t *offp) 1106 { 1107 struct seq_file *m = file->private_data; 1108 struct drm_i915_private *dev_priv = m->private; 1109 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1110 char *newline; 1111 char tmp[16]; 1112 int i; 1113 bool new_state; 1114 1115 if (len >= sizeof(tmp)) 1116 return -EINVAL; 1117 1118 if (copy_from_user(tmp, ubuf, len)) 1119 return -EFAULT; 1120 1121 tmp[len] = '\0'; 1122 1123 /* Strip newline, if any */ 1124 newline = strchr(tmp, '\n'); 1125 if (newline) 1126 *newline = '\0'; 1127 1128 /* Reset to the "default" state for this system */ 1129 if (strcmp(tmp, "reset") == 0) 1130 new_state = !HAS_DP_MST(dev_priv); 1131 else if (kstrtobool(tmp, &new_state) != 0) 1132 return -EINVAL; 1133 1134 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1135 new_state ? "En" : "Dis"); 1136 1137 spin_lock_irq(&dev_priv->irq_lock); 1138 hotplug->hpd_short_storm_enabled = new_state; 1139 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1140 for_each_hpd_pin(i) 1141 hotplug->stats[i].count = 0; 1142 spin_unlock_irq(&dev_priv->irq_lock); 1143 1144 /* Re-enable hpd immediately if we were in an irq storm */ 1145 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 1146 1147 return len; 1148 } 1149 1150 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1151 .owner = THIS_MODULE, 1152 .open = i915_hpd_short_storm_ctl_open, 1153 .read = seq_read, 1154 .llseek = seq_lseek, 1155 .release = single_release, 1156 .write = i915_hpd_short_storm_ctl_write, 1157 }; 1158 1159 void intel_hpd_debugfs_register(struct drm_i915_private *i915) 1160 { 1161 struct drm_minor *minor = i915->drm.primary; 1162 1163 debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, 1164 i915, &i915_hpd_storm_ctl_fops); 1165 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, 1166 i915, &i915_hpd_short_storm_ctl_fops); 1167 debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, 1168 &i915->display.hotplug.ignore_long_hpd); 1169 } 1170