1 /* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/debugfs.h> 25 #include <linux/kernel.h> 26 27 #include <drm/drm_probe_helper.h> 28 29 #include "i915_drv.h" 30 #include "i915_irq.h" 31 #include "intel_connector.h" 32 #include "intel_display_power.h" 33 #include "intel_display_types.h" 34 #include "intel_hdcp.h" 35 #include "intel_hotplug.h" 36 #include "intel_hotplug_irq.h" 37 38 /** 39 * DOC: Hotplug 40 * 41 * Simply put, hotplug occurs when a display is connected to or disconnected 42 * from the system. However, there may be adapters and docking stations and 43 * Display Port short pulses and MST devices involved, complicating matters. 44 * 45 * Hotplug in i915 is handled in many different levels of abstraction. 46 * 47 * The platform dependent interrupt handling code in i915_irq.c enables, 48 * disables, and does preliminary handling of the interrupts. The interrupt 49 * handlers gather the hotplug detect (HPD) information from relevant registers 50 * into a platform independent mask of hotplug pins that have fired. 51 * 52 * The platform independent interrupt handler intel_hpd_irq_handler() in 53 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes 54 * further processing to appropriate bottom halves (Display Port specific and 55 * regular hotplug). 56 * 57 * The Display Port work function i915_digport_work_func() calls into 58 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long 59 * pulses, with failures and non-MST long pulses triggering regular hotplug 60 * processing on the connector. 61 * 62 * The regular hotplug work function i915_hotplug_work_func() calls connector 63 * detect hooks, and, if connector status changes, triggers sending of hotplug 64 * uevent to userspace via drm_kms_helper_hotplug_event(). 65 * 66 * Finally, the userspace is responsible for triggering a modeset upon receiving 67 * the hotplug uevent, disabling or enabling the crtc as needed. 68 * 69 * The hotplug interrupt storm detection and mitigation code keeps track of the 70 * number of interrupts per hotplug pin per a period of time, and if the number 71 * of interrupts exceeds a certain threshold, the interrupt is disabled for a 72 * while before being re-enabled. The intention is to mitigate issues raising 73 * from broken hardware triggering massive amounts of interrupts and grinding 74 * the system to a halt. 75 * 76 * Current implementation expects that hotplug interrupt storm will not be 77 * seen when display port sink is connected, hence on platforms whose DP 78 * callback is handled by i915_digport_work_func reenabling of hpd is not 79 * performed (it was never expected to be disabled in the first place ;) ) 80 * this is specific to DP sinks handled by this routine and any other display 81 * such as HDMI or DVI enabled on the same port will have proper logic since 82 * it will use i915_hotplug_work_func where this logic is handled. 83 */ 84 85 /** 86 * intel_hpd_pin_default - return default pin associated with certain port. 87 * @port: the hpd port to get associated pin 88 * 89 * It is only valid and used by digital port encoder. 90 * 91 * Return pin that is associatade with @port. 92 */ 93 enum hpd_pin intel_hpd_pin_default(enum port port) 94 { 95 return HPD_PORT_A + port - PORT_A; 96 } 97 98 /* Threshold == 5 for long IRQs, 50 for short */ 99 #define HPD_STORM_DEFAULT_THRESHOLD 50 100 101 #define HPD_STORM_DETECT_PERIOD 1000 102 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 103 #define HPD_RETRY_DELAY 1000 104 105 static enum hpd_pin 106 intel_connector_hpd_pin(struct intel_connector *connector) 107 { 108 struct intel_encoder *encoder = intel_attached_encoder(connector); 109 110 /* 111 * MST connectors get their encoder attached dynamically 112 * so need to make sure we have an encoder here. But since 113 * MST encoders have their hpd_pin set to HPD_NONE we don't 114 * have to special case them beyond that. 115 */ 116 return encoder ? encoder->hpd_pin : HPD_NONE; 117 } 118 119 /** 120 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin 121 * @dev_priv: private driver data pointer 122 * @pin: the pin to gather stats on 123 * @long_hpd: whether the HPD IRQ was long or short 124 * 125 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ 126 * storms. Only the pin specific stats and state are changed, the caller is 127 * responsible for further action. 128 * 129 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is 130 * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to 131 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and 132 * short IRQs count as +1. If this threshold is exceeded, it's considered an 133 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. 134 * 135 * By default, most systems will only count long IRQs towards 136 * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also 137 * suffer from short IRQ storms and must also track these. Because short IRQ 138 * storms are naturally caused by sideband interactions with DP MST devices, 139 * short IRQ detection is only enabled for systems without DP MST support. 140 * Systems which are new enough to support DP MST are far less likely to 141 * suffer from IRQ storms at all, so this is fine. 142 * 143 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 144 * and should only be adjusted for automated hotplug testing. 145 * 146 * Return true if an IRQ storm was detected on @pin. 147 */ 148 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, 149 enum hpd_pin pin, bool long_hpd) 150 { 151 struct intel_hotplug *hpd = &dev_priv->display.hotplug; 152 unsigned long start = hpd->stats[pin].last_jiffies; 153 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 154 const int increment = long_hpd ? 10 : 1; 155 const int threshold = hpd->hpd_storm_threshold; 156 bool storm = false; 157 158 if (!threshold || 159 (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) 160 return false; 161 162 if (!time_in_range(jiffies, start, end)) { 163 hpd->stats[pin].last_jiffies = jiffies; 164 hpd->stats[pin].count = 0; 165 } 166 167 hpd->stats[pin].count += increment; 168 if (hpd->stats[pin].count > threshold) { 169 hpd->stats[pin].state = HPD_MARK_DISABLED; 170 drm_dbg_kms(&dev_priv->drm, 171 "HPD interrupt storm detected on PIN %d\n", pin); 172 storm = true; 173 } else { 174 drm_dbg_kms(&dev_priv->drm, 175 "Received HPD interrupt on PIN %d - cnt: %d\n", 176 pin, 177 hpd->stats[pin].count); 178 } 179 180 return storm; 181 } 182 183 static bool detection_work_enabled(struct drm_i915_private *i915) 184 { 185 lockdep_assert_held(&i915->irq_lock); 186 187 return i915->display.hotplug.detection_work_enabled; 188 } 189 190 static bool 191 mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) 192 { 193 lockdep_assert_held(&i915->irq_lock); 194 195 if (!detection_work_enabled(i915)) 196 return false; 197 198 return mod_delayed_work(i915->unordered_wq, work, delay); 199 } 200 201 static bool 202 queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) 203 { 204 lockdep_assert_held(&i915->irq_lock); 205 206 if (!detection_work_enabled(i915)) 207 return false; 208 209 return queue_delayed_work(i915->unordered_wq, work, delay); 210 } 211 212 static bool 213 queue_detection_work(struct drm_i915_private *i915, struct work_struct *work) 214 { 215 lockdep_assert_held(&i915->irq_lock); 216 217 if (!detection_work_enabled(i915)) 218 return false; 219 220 return queue_work(i915->unordered_wq, work); 221 } 222 223 static void 224 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) 225 { 226 struct drm_connector_list_iter conn_iter; 227 struct intel_connector *connector; 228 bool hpd_disabled = false; 229 230 lockdep_assert_held(&dev_priv->irq_lock); 231 232 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 233 for_each_intel_connector_iter(connector, &conn_iter) { 234 enum hpd_pin pin; 235 236 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) 237 continue; 238 239 pin = intel_connector_hpd_pin(connector); 240 if (pin == HPD_NONE || 241 dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) 242 continue; 243 244 drm_info(&dev_priv->drm, 245 "HPD interrupt storm detected on connector %s: " 246 "switching from hotplug detection to polling\n", 247 connector->base.name); 248 249 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 250 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 251 DRM_CONNECTOR_POLL_DISCONNECT; 252 hpd_disabled = true; 253 } 254 drm_connector_list_iter_end(&conn_iter); 255 256 /* Enable polling and queue hotplug re-enabling. */ 257 if (hpd_disabled) { 258 drm_kms_helper_poll_reschedule(&dev_priv->drm); 259 mod_delayed_detection_work(dev_priv, 260 &dev_priv->display.hotplug.reenable_work, 261 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 262 } 263 } 264 265 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) 266 { 267 struct drm_i915_private *dev_priv = 268 container_of(work, typeof(*dev_priv), 269 display.hotplug.reenable_work.work); 270 struct drm_connector_list_iter conn_iter; 271 struct intel_connector *connector; 272 intel_wakeref_t wakeref; 273 enum hpd_pin pin; 274 275 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 276 277 spin_lock_irq(&dev_priv->irq_lock); 278 279 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 280 for_each_intel_connector_iter(connector, &conn_iter) { 281 pin = intel_connector_hpd_pin(connector); 282 if (pin == HPD_NONE || 283 dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) 284 continue; 285 286 if (connector->base.polled != connector->polled) 287 drm_dbg(&dev_priv->drm, 288 "Reenabling HPD on connector %s\n", 289 connector->base.name); 290 connector->base.polled = connector->polled; 291 } 292 drm_connector_list_iter_end(&conn_iter); 293 294 for_each_hpd_pin(pin) { 295 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 296 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 297 } 298 299 intel_hpd_irq_setup(dev_priv); 300 301 spin_unlock_irq(&dev_priv->irq_lock); 302 303 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 304 } 305 306 static enum intel_hotplug_state 307 intel_hotplug_detect_connector(struct intel_connector *connector) 308 { 309 struct drm_device *dev = connector->base.dev; 310 enum drm_connector_status old_status; 311 u64 old_epoch_counter; 312 int status; 313 bool ret = false; 314 315 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); 316 old_status = connector->base.status; 317 old_epoch_counter = connector->base.epoch_counter; 318 319 status = drm_helper_probe_detect(&connector->base, NULL, false); 320 if (!connector->base.force) 321 connector->base.status = status; 322 323 if (old_epoch_counter != connector->base.epoch_counter) 324 ret = true; 325 326 if (ret) { 327 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", 328 connector->base.base.id, 329 connector->base.name, 330 drm_get_connector_status_name(old_status), 331 drm_get_connector_status_name(connector->base.status), 332 old_epoch_counter, 333 connector->base.epoch_counter); 334 return INTEL_HOTPLUG_CHANGED; 335 } 336 return INTEL_HOTPLUG_UNCHANGED; 337 } 338 339 enum intel_hotplug_state 340 intel_encoder_hotplug(struct intel_encoder *encoder, 341 struct intel_connector *connector) 342 { 343 return intel_hotplug_detect_connector(connector); 344 } 345 346 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) 347 { 348 return intel_encoder_is_dig_port(encoder) && 349 enc_to_dig_port(encoder)->hpd_pulse != NULL; 350 } 351 352 static void i915_digport_work_func(struct work_struct *work) 353 { 354 struct drm_i915_private *dev_priv = 355 container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); 356 u32 long_port_mask, short_port_mask; 357 struct intel_encoder *encoder; 358 u32 old_bits = 0; 359 360 spin_lock_irq(&dev_priv->irq_lock); 361 long_port_mask = dev_priv->display.hotplug.long_port_mask; 362 dev_priv->display.hotplug.long_port_mask = 0; 363 short_port_mask = dev_priv->display.hotplug.short_port_mask; 364 dev_priv->display.hotplug.short_port_mask = 0; 365 spin_unlock_irq(&dev_priv->irq_lock); 366 367 for_each_intel_encoder(&dev_priv->drm, encoder) { 368 struct intel_digital_port *dig_port; 369 enum port port = encoder->port; 370 bool long_hpd, short_hpd; 371 enum irqreturn ret; 372 373 if (!intel_encoder_has_hpd_pulse(encoder)) 374 continue; 375 376 long_hpd = long_port_mask & BIT(port); 377 short_hpd = short_port_mask & BIT(port); 378 379 if (!long_hpd && !short_hpd) 380 continue; 381 382 dig_port = enc_to_dig_port(encoder); 383 384 ret = dig_port->hpd_pulse(dig_port, long_hpd); 385 if (ret == IRQ_NONE) { 386 /* fall back to old school hpd */ 387 old_bits |= BIT(encoder->hpd_pin); 388 } 389 } 390 391 if (old_bits) { 392 spin_lock_irq(&dev_priv->irq_lock); 393 dev_priv->display.hotplug.event_bits |= old_bits; 394 queue_delayed_detection_work(dev_priv, 395 &dev_priv->display.hotplug.hotplug_work, 0); 396 spin_unlock_irq(&dev_priv->irq_lock); 397 } 398 } 399 400 /** 401 * intel_hpd_trigger_irq - trigger an hpd irq event for a port 402 * @dig_port: digital port 403 * 404 * Trigger an HPD interrupt event for the given port, emulating a short pulse 405 * generated by the sink, and schedule the dig port work to handle it. 406 */ 407 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) 408 { 409 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 410 411 spin_lock_irq(&i915->irq_lock); 412 i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); 413 spin_unlock_irq(&i915->irq_lock); 414 415 queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); 416 } 417 418 /* 419 * Handle hotplug events outside the interrupt handler proper. 420 */ 421 static void i915_hotplug_work_func(struct work_struct *work) 422 { 423 struct drm_i915_private *dev_priv = 424 container_of(work, struct drm_i915_private, 425 display.hotplug.hotplug_work.work); 426 struct drm_connector_list_iter conn_iter; 427 struct intel_connector *connector; 428 u32 changed = 0, retry = 0; 429 u32 hpd_event_bits; 430 u32 hpd_retry_bits; 431 struct drm_connector *first_changed_connector = NULL; 432 int changed_connectors = 0; 433 434 mutex_lock(&dev_priv->drm.mode_config.mutex); 435 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); 436 437 spin_lock_irq(&dev_priv->irq_lock); 438 439 hpd_event_bits = dev_priv->display.hotplug.event_bits; 440 dev_priv->display.hotplug.event_bits = 0; 441 hpd_retry_bits = dev_priv->display.hotplug.retry_bits; 442 dev_priv->display.hotplug.retry_bits = 0; 443 444 /* Enable polling for connectors which had HPD IRQ storms */ 445 intel_hpd_irq_storm_switch_to_polling(dev_priv); 446 447 spin_unlock_irq(&dev_priv->irq_lock); 448 449 /* Skip calling encode hotplug handlers if ignore long HPD set*/ 450 if (dev_priv->display.hotplug.ignore_long_hpd) { 451 drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); 452 mutex_unlock(&dev_priv->drm.mode_config.mutex); 453 return; 454 } 455 456 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 457 for_each_intel_connector_iter(connector, &conn_iter) { 458 enum hpd_pin pin; 459 u32 hpd_bit; 460 461 pin = intel_connector_hpd_pin(connector); 462 if (pin == HPD_NONE) 463 continue; 464 465 hpd_bit = BIT(pin); 466 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { 467 struct intel_encoder *encoder = 468 intel_attached_encoder(connector); 469 470 if (hpd_event_bits & hpd_bit) 471 connector->hotplug_retries = 0; 472 else 473 connector->hotplug_retries++; 474 475 drm_dbg_kms(&dev_priv->drm, 476 "Connector %s (pin %i) received hotplug event. (retry %d)\n", 477 connector->base.name, pin, 478 connector->hotplug_retries); 479 480 switch (encoder->hotplug(encoder, connector)) { 481 case INTEL_HOTPLUG_UNCHANGED: 482 break; 483 case INTEL_HOTPLUG_CHANGED: 484 changed |= hpd_bit; 485 changed_connectors++; 486 if (!first_changed_connector) { 487 drm_connector_get(&connector->base); 488 first_changed_connector = &connector->base; 489 } 490 break; 491 case INTEL_HOTPLUG_RETRY: 492 retry |= hpd_bit; 493 break; 494 } 495 } 496 } 497 drm_connector_list_iter_end(&conn_iter); 498 mutex_unlock(&dev_priv->drm.mode_config.mutex); 499 500 if (changed_connectors == 1) 501 drm_kms_helper_connector_hotplug_event(first_changed_connector); 502 else if (changed_connectors > 0) 503 drm_kms_helper_hotplug_event(&dev_priv->drm); 504 505 if (first_changed_connector) 506 drm_connector_put(first_changed_connector); 507 508 /* Remove shared HPD pins that have changed */ 509 retry &= ~changed; 510 if (retry) { 511 spin_lock_irq(&dev_priv->irq_lock); 512 dev_priv->display.hotplug.retry_bits |= retry; 513 514 mod_delayed_detection_work(dev_priv, 515 &dev_priv->display.hotplug.hotplug_work, 516 msecs_to_jiffies(HPD_RETRY_DELAY)); 517 spin_unlock_irq(&dev_priv->irq_lock); 518 } 519 } 520 521 522 /** 523 * intel_hpd_irq_handler - main hotplug irq handler 524 * @dev_priv: drm_i915_private 525 * @pin_mask: a mask of hpd pins that have triggered the irq 526 * @long_mask: a mask of hpd pins that may be long hpd pulses 527 * 528 * This is the main hotplug irq handler for all platforms. The platform specific 529 * irq handlers call the platform specific hotplug irq handlers, which read and 530 * decode the appropriate registers into bitmasks about hpd pins that have 531 * triggered (@pin_mask), and which of those pins may be long pulses 532 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin 533 * is not a digital port. 534 * 535 * Here, we do hotplug irq storm detection and mitigation, and pass further 536 * processing to appropriate bottom halves. 537 */ 538 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 539 u32 pin_mask, u32 long_mask) 540 { 541 struct intel_encoder *encoder; 542 bool storm_detected = false; 543 bool queue_dig = false, queue_hp = false; 544 u32 long_hpd_pulse_mask = 0; 545 u32 short_hpd_pulse_mask = 0; 546 enum hpd_pin pin; 547 548 if (!pin_mask) 549 return; 550 551 spin_lock(&dev_priv->irq_lock); 552 553 /* 554 * Determine whether ->hpd_pulse() exists for each pin, and 555 * whether we have a short or a long pulse. This is needed 556 * as each pin may have up to two encoders (HDMI and DP) and 557 * only the one of them (DP) will have ->hpd_pulse(). 558 */ 559 for_each_intel_encoder(&dev_priv->drm, encoder) { 560 enum port port = encoder->port; 561 bool long_hpd; 562 563 pin = encoder->hpd_pin; 564 if (!(BIT(pin) & pin_mask)) 565 continue; 566 567 if (!intel_encoder_has_hpd_pulse(encoder)) 568 continue; 569 570 long_hpd = long_mask & BIT(pin); 571 572 drm_dbg(&dev_priv->drm, 573 "digital hpd on [ENCODER:%d:%s] - %s\n", 574 encoder->base.base.id, encoder->base.name, 575 long_hpd ? "long" : "short"); 576 queue_dig = true; 577 578 if (long_hpd) { 579 long_hpd_pulse_mask |= BIT(pin); 580 dev_priv->display.hotplug.long_port_mask |= BIT(port); 581 } else { 582 short_hpd_pulse_mask |= BIT(pin); 583 dev_priv->display.hotplug.short_port_mask |= BIT(port); 584 } 585 } 586 587 /* Now process each pin just once */ 588 for_each_hpd_pin(pin) { 589 bool long_hpd; 590 591 if (!(BIT(pin) & pin_mask)) 592 continue; 593 594 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { 595 /* 596 * On GMCH platforms the interrupt mask bits only 597 * prevent irq generation, not the setting of the 598 * hotplug bits itself. So only WARN about unexpected 599 * interrupts on saner platforms. 600 */ 601 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), 602 "Received HPD interrupt on pin %d although disabled\n", 603 pin); 604 continue; 605 } 606 607 if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) 608 continue; 609 610 /* 611 * Delegate to ->hpd_pulse() if one of the encoders for this 612 * pin has it, otherwise let the hotplug_work deal with this 613 * pin directly. 614 */ 615 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { 616 long_hpd = long_hpd_pulse_mask & BIT(pin); 617 } else { 618 dev_priv->display.hotplug.event_bits |= BIT(pin); 619 long_hpd = true; 620 queue_hp = true; 621 } 622 623 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { 624 dev_priv->display.hotplug.event_bits &= ~BIT(pin); 625 storm_detected = true; 626 queue_hp = true; 627 } 628 } 629 630 /* 631 * Disable any IRQs that storms were detected on. Polling enablement 632 * happens later in our hotplug work. 633 */ 634 if (storm_detected) 635 intel_hpd_irq_setup(dev_priv); 636 637 /* 638 * Our hotplug handler can grab modeset locks (by calling down into the 639 * fb helpers). Hence it must not be run on our own dev-priv->wq work 640 * queue for otherwise the flush_work in the pageflip code will 641 * deadlock. 642 */ 643 if (queue_dig) 644 queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); 645 if (queue_hp) 646 queue_delayed_detection_work(dev_priv, 647 &dev_priv->display.hotplug.hotplug_work, 0); 648 649 spin_unlock(&dev_priv->irq_lock); 650 } 651 652 /** 653 * intel_hpd_init - initializes and enables hpd support 654 * @dev_priv: i915 device instance 655 * 656 * This function enables the hotplug support. It requires that interrupts have 657 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 658 * poll request can run concurrently to other code, so locking rules must be 659 * obeyed. 660 * 661 * This is a separate step from interrupt enabling to simplify the locking rules 662 * in the driver load and resume code. 663 * 664 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). 665 */ 666 void intel_hpd_init(struct drm_i915_private *dev_priv) 667 { 668 int i; 669 670 if (!HAS_DISPLAY(dev_priv)) 671 return; 672 673 for_each_hpd_pin(i) { 674 dev_priv->display.hotplug.stats[i].count = 0; 675 dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; 676 } 677 678 /* 679 * Interrupt setup is already guaranteed to be single-threaded, this is 680 * just to make the assert_spin_locked checks happy. 681 */ 682 spin_lock_irq(&dev_priv->irq_lock); 683 intel_hpd_irq_setup(dev_priv); 684 spin_unlock_irq(&dev_priv->irq_lock); 685 } 686 687 static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915) 688 { 689 struct drm_connector_list_iter conn_iter; 690 struct intel_connector *connector; 691 struct intel_connector *first_changed_connector = NULL; 692 int changed = 0; 693 694 mutex_lock(&i915->drm.mode_config.mutex); 695 696 if (!i915->drm.mode_config.poll_enabled) 697 goto out; 698 699 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 700 for_each_intel_connector_iter(connector, &conn_iter) { 701 if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD)) 702 continue; 703 704 if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED) 705 continue; 706 707 changed++; 708 709 if (changed == 1) { 710 drm_connector_get(&connector->base); 711 first_changed_connector = connector; 712 } 713 } 714 drm_connector_list_iter_end(&conn_iter); 715 716 out: 717 mutex_unlock(&i915->drm.mode_config.mutex); 718 719 if (!changed) 720 return; 721 722 if (changed == 1) 723 drm_kms_helper_connector_hotplug_event(&first_changed_connector->base); 724 else 725 drm_kms_helper_hotplug_event(&i915->drm); 726 727 drm_connector_put(&first_changed_connector->base); 728 } 729 730 static void i915_hpd_poll_init_work(struct work_struct *work) 731 { 732 struct drm_i915_private *dev_priv = 733 container_of(work, struct drm_i915_private, 734 display.hotplug.poll_init_work); 735 struct intel_display *display = &dev_priv->display; 736 struct drm_connector_list_iter conn_iter; 737 struct intel_connector *connector; 738 intel_wakeref_t wakeref; 739 bool enabled; 740 741 mutex_lock(&dev_priv->drm.mode_config.mutex); 742 743 enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); 744 /* 745 * Prevent taking a power reference from this sequence of 746 * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() -> 747 * connector detect which would requeue i915_hpd_poll_init_work() 748 * and so risk an endless loop of this same sequence. 749 */ 750 if (!enabled) { 751 wakeref = intel_display_power_get(display, 752 POWER_DOMAIN_DISPLAY_CORE); 753 drm_WARN_ON(&dev_priv->drm, 754 READ_ONCE(dev_priv->display.hotplug.poll_enabled)); 755 cancel_work(&dev_priv->display.hotplug.poll_init_work); 756 } 757 758 spin_lock_irq(&dev_priv->irq_lock); 759 760 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 761 for_each_intel_connector_iter(connector, &conn_iter) { 762 enum hpd_pin pin; 763 764 pin = intel_connector_hpd_pin(connector); 765 if (pin == HPD_NONE) 766 continue; 767 768 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 769 continue; 770 771 connector->base.polled = connector->polled; 772 773 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) 774 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 775 DRM_CONNECTOR_POLL_DISCONNECT; 776 } 777 drm_connector_list_iter_end(&conn_iter); 778 779 spin_unlock_irq(&dev_priv->irq_lock); 780 781 if (enabled) 782 drm_kms_helper_poll_reschedule(&dev_priv->drm); 783 784 mutex_unlock(&dev_priv->drm.mode_config.mutex); 785 786 /* 787 * We might have missed any hotplugs that happened while we were 788 * in the middle of disabling polling 789 */ 790 if (!enabled) { 791 i915_hpd_poll_detect_connectors(dev_priv); 792 793 intel_display_power_put(display, 794 POWER_DOMAIN_DISPLAY_CORE, 795 wakeref); 796 } 797 } 798 799 /** 800 * intel_hpd_poll_enable - enable polling for connectors with hpd 801 * @dev_priv: i915 device instance 802 * 803 * This function enables polling for all connectors which support HPD. 804 * Under certain conditions HPD may not be functional. On most Intel GPUs, 805 * this happens when we enter runtime suspend. 806 * On Valleyview and Cherryview systems, this also happens when we shut off all 807 * of the powerwells. 808 * 809 * Since this function can get called in contexts where we're already holding 810 * dev->mode_config.mutex, we do the actual hotplug enabling in a separate 811 * worker. 812 * 813 * Also see: intel_hpd_init() and intel_hpd_poll_disable(). 814 */ 815 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) 816 { 817 struct intel_display *display = &dev_priv->display; 818 819 if (!HAS_DISPLAY(dev_priv) || 820 !intel_display_device_enabled(display)) 821 return; 822 823 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); 824 825 /* 826 * We might already be holding dev->mode_config.mutex, so do this in a 827 * separate worker 828 * As well, there's no issue if we race here since we always reschedule 829 * this worker anyway 830 */ 831 spin_lock_irq(&dev_priv->irq_lock); 832 queue_detection_work(dev_priv, 833 &dev_priv->display.hotplug.poll_init_work); 834 spin_unlock_irq(&dev_priv->irq_lock); 835 } 836 837 /** 838 * intel_hpd_poll_disable - disable polling for connectors with hpd 839 * @dev_priv: i915 device instance 840 * 841 * This function disables polling for all connectors which support HPD. 842 * Under certain conditions HPD may not be functional. On most Intel GPUs, 843 * this happens when we enter runtime suspend. 844 * On Valleyview and Cherryview systems, this also happens when we shut off all 845 * of the powerwells. 846 * 847 * Since this function can get called in contexts where we're already holding 848 * dev->mode_config.mutex, we do the actual hotplug enabling in a separate 849 * worker. 850 * 851 * Also used during driver init to initialize connector->polled 852 * appropriately for all connectors. 853 * 854 * Also see: intel_hpd_init() and intel_hpd_poll_enable(). 855 */ 856 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) 857 { 858 if (!HAS_DISPLAY(dev_priv)) 859 return; 860 861 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); 862 863 spin_lock_irq(&dev_priv->irq_lock); 864 queue_detection_work(dev_priv, 865 &dev_priv->display.hotplug.poll_init_work); 866 spin_unlock_irq(&dev_priv->irq_lock); 867 } 868 869 void intel_hpd_poll_fini(struct drm_i915_private *i915) 870 { 871 struct intel_connector *connector; 872 struct drm_connector_list_iter conn_iter; 873 874 /* Kill all the work that may have been queued by hpd. */ 875 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 876 for_each_intel_connector_iter(connector, &conn_iter) { 877 intel_connector_cancel_modeset_retry_work(connector); 878 intel_hdcp_cancel_works(connector); 879 } 880 drm_connector_list_iter_end(&conn_iter); 881 } 882 883 void intel_hpd_init_early(struct drm_i915_private *i915) 884 { 885 INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, 886 i915_hotplug_work_func); 887 INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); 888 INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); 889 INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, 890 intel_hpd_irq_storm_reenable_work); 891 892 i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 893 /* If we have MST support, we want to avoid doing short HPD IRQ storm 894 * detection, as short HPD storms will occur as a natural part of 895 * sideband messaging with MST. 896 * On older platforms however, IRQ storms can occur with both long and 897 * short pulses, as seen on some G4x systems. 898 */ 899 i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); 900 } 901 902 static bool cancel_all_detection_work(struct drm_i915_private *i915) 903 { 904 bool was_pending = false; 905 906 if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work)) 907 was_pending = true; 908 if (cancel_work_sync(&i915->display.hotplug.poll_init_work)) 909 was_pending = true; 910 if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work)) 911 was_pending = true; 912 913 return was_pending; 914 } 915 916 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) 917 { 918 if (!HAS_DISPLAY(dev_priv)) 919 return; 920 921 spin_lock_irq(&dev_priv->irq_lock); 922 923 dev_priv->display.hotplug.long_port_mask = 0; 924 dev_priv->display.hotplug.short_port_mask = 0; 925 dev_priv->display.hotplug.event_bits = 0; 926 dev_priv->display.hotplug.retry_bits = 0; 927 928 spin_unlock_irq(&dev_priv->irq_lock); 929 930 cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); 931 932 /* 933 * All other work triggered by hotplug events should be canceled by 934 * now. 935 */ 936 if (cancel_all_detection_work(dev_priv)) 937 drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n"); 938 } 939 940 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 941 { 942 bool ret = false; 943 944 if (pin == HPD_NONE) 945 return false; 946 947 spin_lock_irq(&dev_priv->irq_lock); 948 if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { 949 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 950 ret = true; 951 } 952 spin_unlock_irq(&dev_priv->irq_lock); 953 954 return ret; 955 } 956 957 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 958 { 959 if (pin == HPD_NONE) 960 return; 961 962 spin_lock_irq(&dev_priv->irq_lock); 963 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 964 spin_unlock_irq(&dev_priv->irq_lock); 965 } 966 967 static void queue_work_for_missed_irqs(struct drm_i915_private *i915) 968 { 969 bool queue_work = false; 970 enum hpd_pin pin; 971 972 lockdep_assert_held(&i915->irq_lock); 973 974 if (i915->display.hotplug.event_bits || 975 i915->display.hotplug.retry_bits) 976 queue_work = true; 977 978 for_each_hpd_pin(pin) { 979 switch (i915->display.hotplug.stats[pin].state) { 980 case HPD_MARK_DISABLED: 981 queue_work = true; 982 break; 983 case HPD_ENABLED: 984 break; 985 default: 986 MISSING_CASE(i915->display.hotplug.stats[pin].state); 987 } 988 } 989 990 if (queue_work) 991 queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); 992 } 993 994 void intel_hpd_enable_detection_work(struct drm_i915_private *i915) 995 { 996 spin_lock_irq(&i915->irq_lock); 997 i915->display.hotplug.detection_work_enabled = true; 998 queue_work_for_missed_irqs(i915); 999 spin_unlock_irq(&i915->irq_lock); 1000 } 1001 1002 void intel_hpd_disable_detection_work(struct drm_i915_private *i915) 1003 { 1004 spin_lock_irq(&i915->irq_lock); 1005 i915->display.hotplug.detection_work_enabled = false; 1006 spin_unlock_irq(&i915->irq_lock); 1007 1008 cancel_all_detection_work(i915); 1009 } 1010 1011 bool intel_hpd_schedule_detection(struct drm_i915_private *i915) 1012 { 1013 unsigned long flags; 1014 bool ret; 1015 1016 spin_lock_irqsave(&i915->irq_lock, flags); 1017 ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); 1018 spin_unlock_irqrestore(&i915->irq_lock, flags); 1019 1020 return ret; 1021 } 1022 1023 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1024 { 1025 struct drm_i915_private *dev_priv = m->private; 1026 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1027 1028 /* Synchronize with everything first in case there's been an HPD 1029 * storm, but we haven't finished handling it in the kernel yet 1030 */ 1031 intel_synchronize_irq(dev_priv); 1032 flush_work(&dev_priv->display.hotplug.dig_port_work); 1033 flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); 1034 1035 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1036 seq_printf(m, "Detected: %s\n", 1037 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); 1038 1039 return 0; 1040 } 1041 1042 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1043 const char __user *ubuf, size_t len, 1044 loff_t *offp) 1045 { 1046 struct seq_file *m = file->private_data; 1047 struct drm_i915_private *dev_priv = m->private; 1048 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1049 unsigned int new_threshold; 1050 int i; 1051 char *newline; 1052 char tmp[16]; 1053 1054 if (len >= sizeof(tmp)) 1055 return -EINVAL; 1056 1057 if (copy_from_user(tmp, ubuf, len)) 1058 return -EFAULT; 1059 1060 tmp[len] = '\0'; 1061 1062 /* Strip newline, if any */ 1063 newline = strchr(tmp, '\n'); 1064 if (newline) 1065 *newline = '\0'; 1066 1067 if (strcmp(tmp, "reset") == 0) 1068 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1069 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1070 return -EINVAL; 1071 1072 if (new_threshold > 0) 1073 drm_dbg_kms(&dev_priv->drm, 1074 "Setting HPD storm detection threshold to %d\n", 1075 new_threshold); 1076 else 1077 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1078 1079 spin_lock_irq(&dev_priv->irq_lock); 1080 hotplug->hpd_storm_threshold = new_threshold; 1081 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1082 for_each_hpd_pin(i) 1083 hotplug->stats[i].count = 0; 1084 spin_unlock_irq(&dev_priv->irq_lock); 1085 1086 /* Re-enable hpd immediately if we were in an irq storm */ 1087 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 1088 1089 return len; 1090 } 1091 1092 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1093 { 1094 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1095 } 1096 1097 static const struct file_operations i915_hpd_storm_ctl_fops = { 1098 .owner = THIS_MODULE, 1099 .open = i915_hpd_storm_ctl_open, 1100 .read = seq_read, 1101 .llseek = seq_lseek, 1102 .release = single_release, 1103 .write = i915_hpd_storm_ctl_write 1104 }; 1105 1106 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1107 { 1108 struct drm_i915_private *dev_priv = m->private; 1109 1110 seq_printf(m, "Enabled: %s\n", 1111 str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); 1112 1113 return 0; 1114 } 1115 1116 static int 1117 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1118 { 1119 return single_open(file, i915_hpd_short_storm_ctl_show, 1120 inode->i_private); 1121 } 1122 1123 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1124 const char __user *ubuf, 1125 size_t len, loff_t *offp) 1126 { 1127 struct seq_file *m = file->private_data; 1128 struct drm_i915_private *dev_priv = m->private; 1129 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 1130 char *newline; 1131 char tmp[16]; 1132 int i; 1133 bool new_state; 1134 1135 if (len >= sizeof(tmp)) 1136 return -EINVAL; 1137 1138 if (copy_from_user(tmp, ubuf, len)) 1139 return -EFAULT; 1140 1141 tmp[len] = '\0'; 1142 1143 /* Strip newline, if any */ 1144 newline = strchr(tmp, '\n'); 1145 if (newline) 1146 *newline = '\0'; 1147 1148 /* Reset to the "default" state for this system */ 1149 if (strcmp(tmp, "reset") == 0) 1150 new_state = !HAS_DP_MST(dev_priv); 1151 else if (kstrtobool(tmp, &new_state) != 0) 1152 return -EINVAL; 1153 1154 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1155 new_state ? "En" : "Dis"); 1156 1157 spin_lock_irq(&dev_priv->irq_lock); 1158 hotplug->hpd_short_storm_enabled = new_state; 1159 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1160 for_each_hpd_pin(i) 1161 hotplug->stats[i].count = 0; 1162 spin_unlock_irq(&dev_priv->irq_lock); 1163 1164 /* Re-enable hpd immediately if we were in an irq storm */ 1165 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 1166 1167 return len; 1168 } 1169 1170 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1171 .owner = THIS_MODULE, 1172 .open = i915_hpd_short_storm_ctl_open, 1173 .read = seq_read, 1174 .llseek = seq_lseek, 1175 .release = single_release, 1176 .write = i915_hpd_short_storm_ctl_write, 1177 }; 1178 1179 void intel_hpd_debugfs_register(struct drm_i915_private *i915) 1180 { 1181 struct drm_minor *minor = i915->drm.primary; 1182 1183 debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, 1184 i915, &i915_hpd_storm_ctl_fops); 1185 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, 1186 i915, &i915_hpd_short_storm_ctl_fops); 1187 debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, 1188 &i915->display.hotplug.ignore_long_hpd); 1189 } 1190