1 /* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/debugfs.h> 25 #include <linux/kernel.h> 26 27 #include <drm/drm_probe_helper.h> 28 29 #include "i915_drv.h" 30 #include "i915_irq.h" 31 #include "intel_display_power.h" 32 #include "intel_display_types.h" 33 #include "intel_hotplug.h" 34 #include "intel_hotplug_irq.h" 35 36 /** 37 * DOC: Hotplug 38 * 39 * Simply put, hotplug occurs when a display is connected to or disconnected 40 * from the system. However, there may be adapters and docking stations and 41 * Display Port short pulses and MST devices involved, complicating matters. 42 * 43 * Hotplug in i915 is handled in many different levels of abstraction. 44 * 45 * The platform dependent interrupt handling code in i915_irq.c enables, 46 * disables, and does preliminary handling of the interrupts. The interrupt 47 * handlers gather the hotplug detect (HPD) information from relevant registers 48 * into a platform independent mask of hotplug pins that have fired. 49 * 50 * The platform independent interrupt handler intel_hpd_irq_handler() in 51 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes 52 * further processing to appropriate bottom halves (Display Port specific and 53 * regular hotplug). 54 * 55 * The Display Port work function i915_digport_work_func() calls into 56 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long 57 * pulses, with failures and non-MST long pulses triggering regular hotplug 58 * processing on the connector. 59 * 60 * The regular hotplug work function i915_hotplug_work_func() calls connector 61 * detect hooks, and, if connector status changes, triggers sending of hotplug 62 * uevent to userspace via drm_kms_helper_hotplug_event(). 63 * 64 * Finally, the userspace is responsible for triggering a modeset upon receiving 65 * the hotplug uevent, disabling or enabling the crtc as needed. 66 * 67 * The hotplug interrupt storm detection and mitigation code keeps track of the 68 * number of interrupts per hotplug pin per a period of time, and if the number 69 * of interrupts exceeds a certain threshold, the interrupt is disabled for a 70 * while before being re-enabled. The intention is to mitigate issues raising 71 * from broken hardware triggering massive amounts of interrupts and grinding 72 * the system to a halt. 73 * 74 * Current implementation expects that hotplug interrupt storm will not be 75 * seen when display port sink is connected, hence on platforms whose DP 76 * callback is handled by i915_digport_work_func reenabling of hpd is not 77 * performed (it was never expected to be disabled in the first place ;) ) 78 * this is specific to DP sinks handled by this routine and any other display 79 * such as HDMI or DVI enabled on the same port will have proper logic since 80 * it will use i915_hotplug_work_func where this logic is handled. 81 */ 82 83 /** 84 * intel_hpd_pin_default - return default pin associated with certain port. 85 * @dev_priv: private driver data pointer 86 * @port: the hpd port to get associated pin 87 * 88 * It is only valid and used by digital port encoder. 89 * 90 * Return pin that is associatade with @port. 91 */ 92 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, 93 enum port port) 94 { 95 return HPD_PORT_A + port - PORT_A; 96 } 97 98 /* Threshold == 5 for long IRQs, 50 for short */ 99 #define HPD_STORM_DEFAULT_THRESHOLD 50 100 101 #define HPD_STORM_DETECT_PERIOD 1000 102 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 103 #define HPD_RETRY_DELAY 1000 104 105 static enum hpd_pin 106 intel_connector_hpd_pin(struct intel_connector *connector) 107 { 108 struct intel_encoder *encoder = intel_attached_encoder(connector); 109 110 /* 111 * MST connectors get their encoder attached dynamically 112 * so need to make sure we have an encoder here. But since 113 * MST encoders have their hpd_pin set to HPD_NONE we don't 114 * have to special case them beyond that. 115 */ 116 return encoder ? encoder->hpd_pin : HPD_NONE; 117 } 118 119 /** 120 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin 121 * @dev_priv: private driver data pointer 122 * @pin: the pin to gather stats on 123 * @long_hpd: whether the HPD IRQ was long or short 124 * 125 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ 126 * storms. Only the pin specific stats and state are changed, the caller is 127 * responsible for further action. 128 * 129 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is 130 * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to 131 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and 132 * short IRQs count as +1. If this threshold is exceeded, it's considered an 133 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. 134 * 135 * By default, most systems will only count long IRQs towards 136 * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also 137 * suffer from short IRQ storms and must also track these. Because short IRQ 138 * storms are naturally caused by sideband interactions with DP MST devices, 139 * short IRQ detection is only enabled for systems without DP MST support. 140 * Systems which are new enough to support DP MST are far less likely to 141 * suffer from IRQ storms at all, so this is fine. 142 * 143 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 144 * and should only be adjusted for automated hotplug testing. 145 * 146 * Return true if an IRQ storm was detected on @pin. 147 */ 148 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, 149 enum hpd_pin pin, bool long_hpd) 150 { 151 struct intel_hotplug *hpd = &dev_priv->display.hotplug; 152 unsigned long start = hpd->stats[pin].last_jiffies; 153 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 154 const int increment = long_hpd ? 10 : 1; 155 const int threshold = hpd->hpd_storm_threshold; 156 bool storm = false; 157 158 if (!threshold || 159 (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) 160 return false; 161 162 if (!time_in_range(jiffies, start, end)) { 163 hpd->stats[pin].last_jiffies = jiffies; 164 hpd->stats[pin].count = 0; 165 } 166 167 hpd->stats[pin].count += increment; 168 if (hpd->stats[pin].count > threshold) { 169 hpd->stats[pin].state = HPD_MARK_DISABLED; 170 drm_dbg_kms(&dev_priv->drm, 171 "HPD interrupt storm detected on PIN %d\n", pin); 172 storm = true; 173 } else { 174 drm_dbg_kms(&dev_priv->drm, 175 "Received HPD interrupt on PIN %d - cnt: %d\n", 176 pin, 177 hpd->stats[pin].count); 178 } 179 180 return storm; 181 } 182 183 static bool detection_work_enabled(struct drm_i915_private *i915) 184 { 185 lockdep_assert_held(&i915->irq_lock); 186 187 return i915->display.hotplug.detection_work_enabled; 188 } 189 190 static bool 191 mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) 192 { 193 lockdep_assert_held(&i915->irq_lock); 194 195 if (!detection_work_enabled(i915)) 196 return false; 197 198 return mod_delayed_work(i915->unordered_wq, work, delay); 199 } 200 201 static bool 202 queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) 203 { 204 lockdep_assert_held(&i915->irq_lock); 205 206 if (!detection_work_enabled(i915)) 207 return false; 208 209 return queue_delayed_work(i915->unordered_wq, work, delay); 210 } 211 212 static bool 213 queue_detection_work(struct drm_i915_private *i915, struct work_struct *work) 214 { 215 lockdep_assert_held(&i915->irq_lock); 216 217 if (!detection_work_enabled(i915)) 218 return false; 219 220 return queue_work(i915->unordered_wq, work); 221 } 222 223 static void 224 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) 225 { 226 struct drm_connector_list_iter conn_iter; 227 struct intel_connector *connector; 228 bool hpd_disabled = false; 229 230 lockdep_assert_held(&dev_priv->irq_lock); 231 232 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 233 for_each_intel_connector_iter(connector, &conn_iter) { 234 enum hpd_pin pin; 235 236 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) 237 continue; 238 239 pin = intel_connector_hpd_pin(connector); 240 if (pin == HPD_NONE || 241 dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) 242 continue; 243 244 drm_info(&dev_priv->drm, 245 "HPD interrupt storm detected on connector %s: " 246 "switching from hotplug detection to polling\n", 247 connector->base.name); 248 249 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 250 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 251 DRM_CONNECTOR_POLL_DISCONNECT; 252 hpd_disabled = true; 253 } 254 drm_connector_list_iter_end(&conn_iter); 255 256 /* Enable polling and queue hotplug re-enabling. */ 257 if (hpd_disabled) { 258 drm_kms_helper_poll_reschedule(&dev_priv->drm); 259 mod_delayed_detection_work(dev_priv, 260 &dev_priv->display.hotplug.reenable_work, 261 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 262 } 263 } 264 265 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) 266 { 267 struct drm_i915_private *dev_priv = 268 container_of(work, typeof(*dev_priv), 269 display.hotplug.reenable_work.work); 270 struct drm_connector_list_iter conn_iter; 271 struct intel_connector *connector; 272 intel_wakeref_t wakeref; 273 enum hpd_pin pin; 274 275 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 276 277 spin_lock_irq(&dev_priv->irq_lock); 278 279 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 280 for_each_intel_connector_iter(connector, &conn_iter) { 281 pin = intel_connector_hpd_pin(connector); 282 if (pin == HPD_NONE || 283 dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) 284 continue; 285 286 if (connector->base.polled != connector->polled) 287 drm_dbg(&dev_priv->drm, 288 "Reenabling HPD on connector %s\n", 289 connector->base.name); 290 connector->base.polled = connector->polled; 291 } 292 drm_connector_list_iter_end(&conn_iter); 293 294 for_each_hpd_pin(pin) { 295 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 296 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 297 } 298 299 intel_hpd_irq_setup(dev_priv); 300 301 spin_unlock_irq(&dev_priv->irq_lock); 302 303 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 304 } 305 306 static enum intel_hotplug_state 307 intel_hotplug_detect_connector(struct intel_connector *connector) 308 { 309 struct drm_device *dev = connector->base.dev; 310 enum drm_connector_status old_status; 311 u64 old_epoch_counter; 312 int status; 313 bool ret = false; 314 315 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); 316 old_status = connector->base.status; 317 old_epoch_counter = connector->base.epoch_counter; 318 319 status = drm_helper_probe_detect(&connector->base, NULL, false); 320 if (!connector->base.force) 321 connector->base.status = status; 322 323 if (old_epoch_counter != connector->base.epoch_counter) 324 ret = true; 325 326 if (ret) { 327 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", 328 connector->base.base.id, 329 connector->base.name, 330 drm_get_connector_status_name(old_status), 331 drm_get_connector_status_name(connector->base.status), 332 old_epoch_counter, 333 connector->base.epoch_counter); 334 return INTEL_HOTPLUG_CHANGED; 335 } 336 return INTEL_HOTPLUG_UNCHANGED; 337 } 338 339 enum intel_hotplug_state 340 intel_encoder_hotplug(struct intel_encoder *encoder, 341 struct intel_connector *connector) 342 { 343 return intel_hotplug_detect_connector(connector); 344 } 345 346 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) 347 { 348 return intel_encoder_is_dig_port(encoder) && 349 enc_to_dig_port(encoder)->hpd_pulse != NULL; 350 } 351 352 static void i915_digport_work_func(struct work_struct *work) 353 { 354 struct drm_i915_private *dev_priv = 355 container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); 356 u32 long_port_mask, short_port_mask; 357 struct intel_encoder *encoder; 358 u32 old_bits = 0; 359 360 spin_lock_irq(&dev_priv->irq_lock); 361 long_port_mask = dev_priv->display.hotplug.long_port_mask; 362 dev_priv->display.hotplug.long_port_mask = 0; 363 short_port_mask = dev_priv->display.hotplug.short_port_mask; 364 dev_priv->display.hotplug.short_port_mask = 0; 365 spin_unlock_irq(&dev_priv->irq_lock); 366 367 for_each_intel_encoder(&dev_priv->drm, encoder) { 368 struct intel_digital_port *dig_port; 369 enum port port = encoder->port; 370 bool long_hpd, short_hpd; 371 enum irqreturn ret; 372 373 if (!intel_encoder_has_hpd_pulse(encoder)) 374 continue; 375 376 long_hpd = long_port_mask & BIT(port); 377 short_hpd = short_port_mask & BIT(port); 378 379 if (!long_hpd && !short_hpd) 380 continue; 381 382 dig_port = enc_to_dig_port(encoder); 383 384 ret = dig_port->hpd_pulse(dig_port, long_hpd); 385 if (ret == IRQ_NONE) { 386 /* fall back to old school hpd */ 387 old_bits |= BIT(encoder->hpd_pin); 388 } 389 } 390 391 if (old_bits) { 392 spin_lock_irq(&dev_priv->irq_lock); 393 dev_priv->display.hotplug.event_bits |= old_bits; 394 queue_delayed_detection_work(dev_priv, 395 &dev_priv->display.hotplug.hotplug_work, 0); 396 spin_unlock_irq(&dev_priv->irq_lock); 397 } 398 } 399 400 /** 401 * intel_hpd_trigger_irq - trigger an hpd irq event for a port 402 * @dig_port: digital port 403 * 404 * Trigger an HPD interrupt event for the given port, emulating a short pulse 405 * generated by the sink, and schedule the dig port work to handle it. 406 */ 407 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) 408 { 409 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 410 411 spin_lock_irq(&i915->irq_lock); 412 i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); 413 spin_unlock_irq(&i915->irq_lock); 414 415 queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); 416 } 417 418 /* 419 * Handle hotplug events outside the interrupt handler proper. 420 */ 421 static void i915_hotplug_work_func(struct work_struct *work) 422 { 423 struct drm_i915_private *dev_priv = 424 container_of(work, struct drm_i915_private, 425 display.hotplug.hotplug_work.work); 426 struct drm_connector_list_iter conn_iter; 427 struct intel_connector *connector; 428 u32 changed = 0, retry = 0; 429 u32 hpd_event_bits; 430 u32 hpd_retry_bits; 431 struct drm_connector *first_changed_connector = NULL; 432 int changed_connectors = 0; 433 434 mutex_lock(&dev_priv->drm.mode_config.mutex); 435 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); 436 437 spin_lock_irq(&dev_priv->irq_lock); 438 439 hpd_event_bits = dev_priv->display.hotplug.event_bits; 440 dev_priv->display.hotplug.event_bits = 0; 441 hpd_retry_bits = dev_priv->display.hotplug.retry_bits; 442 dev_priv->display.hotplug.retry_bits = 0; 443 444 /* Enable polling for connectors which had HPD IRQ storms */ 445 intel_hpd_irq_storm_switch_to_polling(dev_priv); 446 447 spin_unlock_irq(&dev_priv->irq_lock); 448 449 /* Skip calling encode hotplug handlers if ignore long HPD set*/ 450 if (dev_priv->display.hotplug.ignore_long_hpd) { 451 drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); 452 mutex_unlock(&dev_priv->drm.mode_config.mutex); 453 return; 454 } 455 456 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 457 for_each_intel_connector_iter(connector, &conn_iter) { 458 enum hpd_pin pin; 459 u32 hpd_bit; 460 461 pin = intel_connector_hpd_pin(connector); 462 if (pin == HPD_NONE) 463 continue; 464 465 hpd_bit = BIT(pin); 466 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { 467 struct intel_encoder *encoder = 468 intel_attached_encoder(connector); 469 470 if (hpd_event_bits & hpd_bit) 471 connector->hotplug_retries = 0; 472 else 473 connector->hotplug_retries++; 474 475 drm_dbg_kms(&dev_priv->drm, 476 "Connector %s (pin %i) received hotplug event. (retry %d)\n", 477 connector->base.name, pin, 478 connector->hotplug_retries); 479 480 switch (encoder->hotplug(encoder, connector)) { 481 case INTEL_HOTPLUG_UNCHANGED: 482 break; 483 case INTEL_HOTPLUG_CHANGED: 484 changed |= hpd_bit; 485 changed_connectors++; 486 if (!first_changed_connector) { 487 drm_connector_get(&connector->base); 488 first_changed_connector = &connector->base; 489 } 490 break; 491 case INTEL_HOTPLUG_RETRY: 492 retry |= hpd_bit; 493 break; 494 } 495 } 496 } 497 drm_connector_list_iter_end(&conn_iter); 498 mutex_unlock(&dev_priv->drm.mode_config.mutex); 499 500 if (changed_connectors == 1) 501 drm_kms_helper_connector_hotplug_event(first_changed_connector); 502 else if (changed_connectors > 0) 503 drm_kms_helper_hotplug_event(&dev_priv->drm); 504 505 if (first_changed_connector) 506 drm_connector_put(first_changed_connector); 507 508 /* Remove shared HPD pins that have changed */ 509 retry &= ~changed; 510 if (retry) { 511 spin_lock_irq(&dev_priv->irq_lock); 512 dev_priv->display.hotplug.retry_bits |= retry; 513 514 mod_delayed_detection_work(dev_priv, 515 &dev_priv->display.hotplug.hotplug_work, 516 msecs_to_jiffies(HPD_RETRY_DELAY)); 517 spin_unlock_irq(&dev_priv->irq_lock); 518 } 519 } 520 521 522 /** 523 * intel_hpd_irq_handler - main hotplug irq handler 524 * @dev_priv: drm_i915_private 525 * @pin_mask: a mask of hpd pins that have triggered the irq 526 * @long_mask: a mask of hpd pins that may be long hpd pulses 527 * 528 * This is the main hotplug irq handler for all platforms. The platform specific 529 * irq handlers call the platform specific hotplug irq handlers, which read and 530 * decode the appropriate registers into bitmasks about hpd pins that have 531 * triggered (@pin_mask), and which of those pins may be long pulses 532 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin 533 * is not a digital port. 534 * 535 * Here, we do hotplug irq storm detection and mitigation, and pass further 536 * processing to appropriate bottom halves. 537 */ 538 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 539 u32 pin_mask, u32 long_mask) 540 { 541 struct intel_encoder *encoder; 542 bool storm_detected = false; 543 bool queue_dig = false, queue_hp = false; 544 u32 long_hpd_pulse_mask = 0; 545 u32 short_hpd_pulse_mask = 0; 546 enum hpd_pin pin; 547 548 if (!pin_mask) 549 return; 550 551 spin_lock(&dev_priv->irq_lock); 552 553 /* 554 * Determine whether ->hpd_pulse() exists for each pin, and 555 * whether we have a short or a long pulse. This is needed 556 * as each pin may have up to two encoders (HDMI and DP) and 557 * only the one of them (DP) will have ->hpd_pulse(). 558 */ 559 for_each_intel_encoder(&dev_priv->drm, encoder) { 560 enum port port = encoder->port; 561 bool long_hpd; 562 563 pin = encoder->hpd_pin; 564 if (!(BIT(pin) & pin_mask)) 565 continue; 566 567 if (!intel_encoder_has_hpd_pulse(encoder)) 568 continue; 569 570 long_hpd = long_mask & BIT(pin); 571 572 drm_dbg(&dev_priv->drm, 573 "digital hpd on [ENCODER:%d:%s] - %s\n", 574 encoder->base.base.id, encoder->base.name, 575 long_hpd ? "long" : "short"); 576 queue_dig = true; 577 578 if (long_hpd) { 579 long_hpd_pulse_mask |= BIT(pin); 580 dev_priv->display.hotplug.long_port_mask |= BIT(port); 581 } else { 582 short_hpd_pulse_mask |= BIT(pin); 583 dev_priv->display.hotplug.short_port_mask |= BIT(port); 584 } 585 } 586 587 /* Now process each pin just once */ 588 for_each_hpd_pin(pin) { 589 bool long_hpd; 590 591 if (!(BIT(pin) & pin_mask)) 592 continue; 593 594 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { 595 /* 596 * On GMCH platforms the interrupt mask bits only 597 * prevent irq generation, not the setting of the 598 * hotplug bits itself. So only WARN about unexpected 599 * interrupts on saner platforms. 600 */ 601 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), 602 "Received HPD interrupt on pin %d although disabled\n", 603 pin); 604 continue; 605 } 606 607 if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) 608 continue; 609 610 /* 611 * Delegate to ->hpd_pulse() if one of the encoders for this 612 * pin has it, otherwise let the hotplug_work deal with this 613 * pin directly. 614 */ 615 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { 616 long_hpd = long_hpd_pulse_mask & BIT(pin); 617 } else { 618 dev_priv->display.hotplug.event_bits |= BIT(pin); 619 long_hpd = true; 620 queue_hp = true; 621 } 622 623 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { 624 dev_priv->display.hotplug.event_bits &= ~BIT(pin); 625 storm_detected = true; 626 queue_hp = true; 627 } 628 } 629 630 /* 631 * Disable any IRQs that storms were detected on. Polling enablement 632 * happens later in our hotplug work. 633 */ 634 if (storm_detected) 635 intel_hpd_irq_setup(dev_priv); 636 637 /* 638 * Our hotplug handler can grab modeset locks (by calling down into the 639 * fb helpers). Hence it must not be run on our own dev-priv->wq work 640 * queue for otherwise the flush_work in the pageflip code will 641 * deadlock. 642 */ 643 if (queue_dig) 644 queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); 645 if (queue_hp) 646 queue_delayed_detection_work(dev_priv, 647 &dev_priv->display.hotplug.hotplug_work, 0); 648 649 spin_unlock(&dev_priv->irq_lock); 650 } 651 652 /** 653 * intel_hpd_init - initializes and enables hpd support 654 * @dev_priv: i915 device instance 655 * 656 * This function enables the hotplug support. It requires that interrupts have 657 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 658 * poll request can run concurrently to other code, so locking rules must be 659 * obeyed. 660 * 661 * This is a separate step from interrupt enabling to simplify the locking rules 662 * in the driver load and resume code. 663 * 664 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). 665 */ 666 void intel_hpd_init(struct drm_i915_private *dev_priv) 667 { 668 int i; 669 670 if (!HAS_DISPLAY(dev_priv)) 671 return; 672 673 for_each_hpd_pin(i) { 674 dev_priv->display.hotplug.stats[i].count = 0; 675 dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; 676 } 677 678 /* 679 * Interrupt setup is already guaranteed to be single-threaded, this is 680 * just to make the assert_spin_locked checks happy. 681 */ 682 spin_lock_irq(&dev_priv->irq_lock); 683 intel_hpd_irq_setup(dev_priv); 684 spin_unlock_irq(&dev_priv->irq_lock); 685 } 686 687 static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915) 688 { 689 struct drm_connector_list_iter conn_iter; 690 struct intel_connector *connector; 691 struct intel_connector *first_changed_connector = NULL; 692 int changed = 0; 693 694 mutex_lock(&i915->drm.mode_config.mutex); 695 696 if (!i915->drm.mode_config.poll_enabled) 697 goto out; 698 699 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 700 for_each_intel_connector_iter(connector, &conn_iter) { 701 if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD)) 702 continue; 703 704 if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED) 705 continue; 706 707 changed++; 708 709 if (changed == 1) { 710 drm_connector_get(&connector->base); 711 first_changed_connector = connector; 712 } 713 } 714 drm_connector_list_iter_end(&conn_iter); 715 716 out: 717 mutex_unlock(&i915->drm.mode_config.mutex); 718 719 if (!changed) 720 return; 721 722 if (changed == 1) 723 drm_kms_helper_connector_hotplug_event(&first_changed_connector->base); 724 else 725 drm_kms_helper_hotplug_event(&i915->drm); 726 727 drm_connector_put(&first_changed_connector->base); 728 } 729 730 static void i915_hpd_poll_init_work(struct work_struct *work) 731 { 732 struct drm_i915_private *dev_priv = 733 container_of(work, struct drm_i915_private, 734 display.hotplug.poll_init_work); 735 struct drm_connector_list_iter conn_iter; 736 struct intel_connector *connector; 737 intel_wakeref_t wakeref; 738 bool enabled; 739 740 mutex_lock(&dev_priv->drm.mode_config.mutex); 741 742 enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); 743 /* 744 * Prevent taking a power reference from this sequence of 745 * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() -> 746 * connector detect which would requeue i915_hpd_poll_init_work() 747 * and so risk an endless loop of this same sequence. 748 */ 749 if (!enabled) { 750 wakeref = intel_display_power_get(dev_priv, 751 POWER_DOMAIN_DISPLAY_CORE); 752 drm_WARN_ON(&dev_priv->drm, 753 READ_ONCE(dev_priv->display.hotplug.poll_enabled)); 754 cancel_work(&dev_priv->display.hotplug.poll_init_work); 755 } 756 757 spin_lock_irq(&dev_priv->irq_lock); 758 759 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 760 for_each_intel_connector_iter(connector, &conn_iter) { 761 enum hpd_pin pin; 762 763 pin = intel_connector_hpd_pin(connector); 764 if (pin == HPD_NONE) 765 continue; 766 767 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 768 continue; 769 770 connector->base.polled = connector->polled; 771 772 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) 773 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 774 DRM_CONNECTOR_POLL_DISCONNECT; 775 } 776 drm_connector_list_iter_end(&conn_iter); 777 778 spin_unlock_irq(&dev_priv->irq_lock); 779 780 if (enabled) 781 drm_kms_helper_poll_reschedule(&dev_priv->drm); 782 783 mutex_unlock(&dev_priv->drm.mode_config.mutex); 784 785 /* 786 * We might have missed any hotplugs that happened while we were 787 * in the middle of disabling polling 788 */ 789 if (!enabled) { 790 i915_hpd_poll_detect_connectors(dev_priv); 791 792 intel_display_power_put(dev_priv, 793 POWER_DOMAIN_DISPLAY_CORE, 794 wakeref); 795 } 796 } 797 798 /** 799 * intel_hpd_poll_enable - enable polling for connectors with hpd 800 * @dev_priv: i915 device instance 801 * 802 * This function enables polling for all connectors which support HPD. 803 * Under certain conditions HPD may not be functional. On most Intel GPUs, 804 * this happens when we enter runtime suspend. 805 * On Valleyview and Cherryview systems, this also happens when we shut off all 806 * of the powerwells. 807 * 808 * Since this function can get called in contexts where we're already holding 809 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 810 * worker. 811 * 812 * Also see: intel_hpd_init() and intel_hpd_poll_disable(). 813 */ 814 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) 815 { 816 if (!HAS_DISPLAY(dev_priv) || 817 !intel_display_device_enabled(dev_priv)) 818 return; 819 820 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); 821 822 /* 823 * We might already be holding dev->mode_config.mutex, so do this in a 824 * seperate worker 825 * As well, there's no issue if we race here since we always reschedule 826 * this worker anyway 827 */ 828 spin_lock_irq(&dev_priv->irq_lock); 829 queue_detection_work(dev_priv, 830 &dev_priv->display.hotplug.poll_init_work); 831 spin_unlock_irq(&dev_priv->irq_lock); 832 } 833 834 /** 835 * intel_hpd_poll_disable - disable polling for connectors with hpd 836 * @dev_priv: i915 device instance 837 * 838 * This function disables polling for all connectors which support HPD. 839 * Under certain conditions HPD may not be functional. On most Intel GPUs, 840 * this happens when we enter runtime suspend. 841 * On Valleyview and Cherryview systems, this also happens when we shut off all 842 * of the powerwells. 843 * 844 * Since this function can get called in contexts where we're already holding 845 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 846 * worker. 847 * 848 * Also used during driver init to initialize connector->polled 849 * appropriately for all connectors. 850 * 851 * Also see: intel_hpd_init() and intel_hpd_poll_enable(). 852 */ 853 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) 854 { 855 if (!HAS_DISPLAY(dev_priv)) 856 return; 857 858 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); 859 860 spin_lock_irq(&dev_priv->irq_lock); 861 queue_detection_work(dev_priv, 862 &dev_priv->display.hotplug.poll_init_work); 863 spin_unlock_irq(&dev_priv->irq_lock); 864 } 865 866 void intel_hpd_init_early(struct drm_i915_private *i915) 867 { 868 INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, 869 i915_hotplug_work_func); 870 INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); 871 INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); 872 INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, 873 intel_hpd_irq_storm_reenable_work); 874 875 i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 876 /* If we have MST support, we want to avoid doing short HPD IRQ storm 877 * detection, as short HPD storms will occur as a natural part of 878 * sideband messaging with MST. 879 * On older platforms however, IRQ storms can occur with both long and 880 * short pulses, as seen on some G4x systems. 881 */ 882 i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); 883 } 884 885 static bool cancel_all_detection_work(struct drm_i915_private *i915) 886 { 887 bool was_pending = false; 888 889 if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work)) 890 was_pending = true; 891 if (cancel_work_sync(&i915->display.hotplug.poll_init_work)) 892 was_pending = true; 893 if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work)) 894 was_pending = true; 895 896 return was_pending; 897 } 898 899 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) 900 { 901 if (!HAS_DISPLAY(dev_priv)) 902 return; 903 904 spin_lock_irq(&dev_priv->irq_lock); 905 906 dev_priv->display.hotplug.long_port_mask = 0; 907 dev_priv->display.hotplug.short_port_mask = 0; 908 dev_priv->display.hotplug.event_bits = 0; 909 dev_priv->display.hotplug.retry_bits = 0; 910 911 spin_unlock_irq(&dev_priv->irq_lock); 912 913 cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); 914 915 /* 916 * All other work triggered by hotplug events should be canceled by 917 * now. 918 */ 919 if (cancel_all_detection_work(dev_priv)) 920 drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n"); 921 } 922 923 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 924 { 925 bool ret = false; 926 927 if (pin == HPD_NONE) 928 return false; 929 930 spin_lock_irq(&dev_priv->irq_lock); 931 if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { 932 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 933 ret = true; 934 } 935 spin_unlock_irq(&dev_priv->irq_lock); 936 937 return ret; 938 } 939 940 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 941 { 942 if (pin == HPD_NONE) 943 return; 944 945 spin_lock_irq(&dev_priv->irq_lock); 946 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 947 spin_unlock_irq(&dev_priv->irq_lock); 948 } 949 950 static void queue_work_for_missed_irqs(struct drm_i915_private *i915) 951 { 952 bool queue_work = false; 953 enum hpd_pin pin; 954 955 lockdep_assert_held(&i915->irq_lock); 956 957 if (i915->display.hotplug.event_bits || 958 i915->display.hotplug.retry_bits) 959 queue_work = true; 960 961 for_each_hpd_pin(pin) { 962 switch (i915->display.hotplug.stats[pin].state) { 963 case HPD_MARK_DISABLED: 964 queue_work = true; 965 break; 966 case HPD_ENABLED: 967 break; 968 default: 969 MISSING_CASE(i915->display.hotplug.stats[pin].state); 970 } 971 } 972 973 if (queue_work) 974 queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); 975 } 976 977 void intel_hpd_enable_detection_work(struct drm_i915_private *i915) 978 { 979 spin_lock_irq(&i915->irq_lock); 980 i915->display.hotplug.detection_work_enabled = true; 981 queue_work_for_missed_irqs(i915); 982 spin_unlock_irq(&i915->irq_lock); 983 } 984 985 void intel_hpd_disable_detection_work(struct drm_i915_private *i915) 986 { 987 spin_lock_irq(&i915->irq_lock); 988 i915->display.hotplug.detection_work_enabled = false; 989 spin_unlock_irq(&i915->irq_lock); 990 991 cancel_all_detection_work(i915); 992 } 993 994 bool intel_hpd_schedule_detection(struct drm_i915_private *i915) 995 { 996 unsigned long flags; 997 bool ret; 998 999 spin_lock_irqsave(&i915->irq_lock, flags); 1000 ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); 1001 spin_unlock_irqrestore(&i915->irq_lock, flags); 1002 1003 return ret; 1004 } 1005 1006 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1007 { 1008 struct drm_i915_private *dev_priv = m->private; 1009 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1010 1011 /* Synchronize with everything first in case there's been an HPD 1012 * storm, but we haven't finished handling it in the kernel yet 1013 */ 1014 intel_synchronize_irq(dev_priv); 1015 flush_work(&dev_priv->display.hotplug.dig_port_work); 1016 flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); 1017 1018 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1019 seq_printf(m, "Detected: %s\n", 1020 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); 1021 1022 return 0; 1023 } 1024 1025 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1026 const char __user *ubuf, size_t len, 1027 loff_t *offp) 1028 { 1029 struct seq_file *m = file->private_data; 1030 struct drm_i915_private *dev_priv = m->private; 1031 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1032 unsigned int new_threshold; 1033 int i; 1034 char *newline; 1035 char tmp[16]; 1036 1037 if (len >= sizeof(tmp)) 1038 return -EINVAL; 1039 1040 if (copy_from_user(tmp, ubuf, len)) 1041 return -EFAULT; 1042 1043 tmp[len] = '\0'; 1044 1045 /* Strip newline, if any */ 1046 newline = strchr(tmp, '\n'); 1047 if (newline) 1048 *newline = '\0'; 1049 1050 if (strcmp(tmp, "reset") == 0) 1051 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1052 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1053 return -EINVAL; 1054 1055 if (new_threshold > 0) 1056 drm_dbg_kms(&dev_priv->drm, 1057 "Setting HPD storm detection threshold to %d\n", 1058 new_threshold); 1059 else 1060 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1061 1062 spin_lock_irq(&dev_priv->irq_lock); 1063 hotplug->hpd_storm_threshold = new_threshold; 1064 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1065 for_each_hpd_pin(i) 1066 hotplug->stats[i].count = 0; 1067 spin_unlock_irq(&dev_priv->irq_lock); 1068 1069 /* Re-enable hpd immediately if we were in an irq storm */ 1070 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 1071 1072 return len; 1073 } 1074 1075 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1076 { 1077 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1078 } 1079 1080 static const struct file_operations i915_hpd_storm_ctl_fops = { 1081 .owner = THIS_MODULE, 1082 .open = i915_hpd_storm_ctl_open, 1083 .read = seq_read, 1084 .llseek = seq_lseek, 1085 .release = single_release, 1086 .write = i915_hpd_storm_ctl_write 1087 }; 1088 1089 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1090 { 1091 struct drm_i915_private *dev_priv = m->private; 1092 1093 seq_printf(m, "Enabled: %s\n", 1094 str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); 1095 1096 return 0; 1097 } 1098 1099 static int 1100 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1101 { 1102 return single_open(file, i915_hpd_short_storm_ctl_show, 1103 inode->i_private); 1104 } 1105 1106 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1107 const char __user *ubuf, 1108 size_t len, loff_t *offp) 1109 { 1110 struct seq_file *m = file->private_data; 1111 struct drm_i915_private *dev_priv = m->private; 1112 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1113 char *newline; 1114 char tmp[16]; 1115 int i; 1116 bool new_state; 1117 1118 if (len >= sizeof(tmp)) 1119 return -EINVAL; 1120 1121 if (copy_from_user(tmp, ubuf, len)) 1122 return -EFAULT; 1123 1124 tmp[len] = '\0'; 1125 1126 /* Strip newline, if any */ 1127 newline = strchr(tmp, '\n'); 1128 if (newline) 1129 *newline = '\0'; 1130 1131 /* Reset to the "default" state for this system */ 1132 if (strcmp(tmp, "reset") == 0) 1133 new_state = !HAS_DP_MST(dev_priv); 1134 else if (kstrtobool(tmp, &new_state) != 0) 1135 return -EINVAL; 1136 1137 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1138 new_state ? "En" : "Dis"); 1139 1140 spin_lock_irq(&dev_priv->irq_lock); 1141 hotplug->hpd_short_storm_enabled = new_state; 1142 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1143 for_each_hpd_pin(i) 1144 hotplug->stats[i].count = 0; 1145 spin_unlock_irq(&dev_priv->irq_lock); 1146 1147 /* Re-enable hpd immediately if we were in an irq storm */ 1148 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 1149 1150 return len; 1151 } 1152 1153 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1154 .owner = THIS_MODULE, 1155 .open = i915_hpd_short_storm_ctl_open, 1156 .read = seq_read, 1157 .llseek = seq_lseek, 1158 .release = single_release, 1159 .write = i915_hpd_short_storm_ctl_write, 1160 }; 1161 1162 void intel_hpd_debugfs_register(struct drm_i915_private *i915) 1163 { 1164 struct drm_minor *minor = i915->drm.primary; 1165 1166 debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, 1167 i915, &i915_hpd_storm_ctl_fops); 1168 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, 1169 i915, &i915_hpd_short_storm_ctl_fops); 1170 debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, 1171 &i915->display.hotplug.ignore_long_hpd); 1172 } 1173