1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "gt/intel_rps.h" 7 #include "i915_drv.h" 8 #include "i915_irq.h" 9 #include "i915_reg.h" 10 #include "icl_dsi_regs.h" 11 #include "intel_crtc.h" 12 #include "intel_de.h" 13 #include "intel_display_irq.h" 14 #include "intel_display_trace.h" 15 #include "intel_display_types.h" 16 #include "intel_dp_aux.h" 17 #include "intel_fdi_regs.h" 18 #include "intel_fifo_underrun.h" 19 #include "intel_gmbus.h" 20 #include "intel_hotplug_irq.h" 21 #include "intel_pipe_crc_regs.h" 22 #include "intel_pmdemand.h" 23 #include "intel_psr.h" 24 #include "intel_psr_regs.h" 25 26 static void 27 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 28 { 29 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 30 31 drm_crtc_handle_vblank(&crtc->base); 32 } 33 34 /** 35 * ilk_update_display_irq - update DEIMR 36 * @dev_priv: driver private 37 * @interrupt_mask: mask of interrupt bits to update 38 * @enabled_irq_mask: mask of interrupt bits to enable 39 */ 40 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 41 u32 interrupt_mask, u32 enabled_irq_mask) 42 { 43 u32 new_val; 44 45 lockdep_assert_held(&dev_priv->irq_lock); 46 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 47 48 new_val = dev_priv->irq_mask; 49 new_val &= ~interrupt_mask; 50 new_val |= (~enabled_irq_mask & interrupt_mask); 51 52 if (new_val != dev_priv->irq_mask && 53 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 54 dev_priv->irq_mask = new_val; 55 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 56 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 57 } 58 } 59 60 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 61 { 62 ilk_update_display_irq(i915, bits, bits); 63 } 64 65 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 66 { 67 ilk_update_display_irq(i915, bits, 0); 68 } 69 70 /** 71 * bdw_update_port_irq - update DE port interrupt 72 * @dev_priv: driver private 73 * @interrupt_mask: mask of interrupt bits to update 74 * @enabled_irq_mask: mask of interrupt bits to enable 75 */ 76 void bdw_update_port_irq(struct drm_i915_private *dev_priv, 77 u32 interrupt_mask, u32 enabled_irq_mask) 78 { 79 u32 new_val; 80 u32 old_val; 81 82 lockdep_assert_held(&dev_priv->irq_lock); 83 84 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 85 86 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 87 return; 88 89 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 90 91 new_val = old_val; 92 new_val &= ~interrupt_mask; 93 new_val |= (~enabled_irq_mask & interrupt_mask); 94 95 if (new_val != old_val) { 96 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 97 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 98 } 99 } 100 101 /** 102 * bdw_update_pipe_irq - update DE pipe interrupt 103 * @dev_priv: driver private 104 * @pipe: pipe whose interrupt to update 105 * @interrupt_mask: mask of interrupt bits to update 106 * @enabled_irq_mask: mask of interrupt bits to enable 107 */ 108 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 109 enum pipe pipe, u32 interrupt_mask, 110 u32 enabled_irq_mask) 111 { 112 u32 new_val; 113 114 lockdep_assert_held(&dev_priv->irq_lock); 115 116 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 117 118 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 119 return; 120 121 new_val = dev_priv->display.irq.de_irq_mask[pipe]; 122 new_val &= ~interrupt_mask; 123 new_val |= (~enabled_irq_mask & interrupt_mask); 124 125 if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) { 126 dev_priv->display.irq.de_irq_mask[pipe] = new_val; 127 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), 128 dev_priv->display.irq.de_irq_mask[pipe]); 129 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 130 } 131 } 132 133 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 134 enum pipe pipe, u32 bits) 135 { 136 bdw_update_pipe_irq(i915, pipe, bits, bits); 137 } 138 139 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 140 enum pipe pipe, u32 bits) 141 { 142 bdw_update_pipe_irq(i915, pipe, bits, 0); 143 } 144 145 /** 146 * ibx_display_interrupt_update - update SDEIMR 147 * @dev_priv: driver private 148 * @interrupt_mask: mask of interrupt bits to update 149 * @enabled_irq_mask: mask of interrupt bits to enable 150 */ 151 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 152 u32 interrupt_mask, 153 u32 enabled_irq_mask) 154 { 155 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 156 157 sdeimr &= ~interrupt_mask; 158 sdeimr |= (~enabled_irq_mask & interrupt_mask); 159 160 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 161 162 lockdep_assert_held(&dev_priv->irq_lock); 163 164 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 165 return; 166 167 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 168 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 169 } 170 171 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 172 { 173 ibx_display_interrupt_update(i915, bits, bits); 174 } 175 176 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 177 { 178 ibx_display_interrupt_update(i915, bits, 0); 179 } 180 181 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 182 enum pipe pipe) 183 { 184 u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe]; 185 u32 enable_mask = status_mask << 16; 186 187 lockdep_assert_held(&dev_priv->irq_lock); 188 189 if (DISPLAY_VER(dev_priv) < 5) 190 goto out; 191 192 /* 193 * On pipe A we don't support the PSR interrupt yet, 194 * on pipe B and C the same bit MBZ. 195 */ 196 if (drm_WARN_ON_ONCE(&dev_priv->drm, 197 status_mask & PIPE_A_PSR_STATUS_VLV)) 198 return 0; 199 /* 200 * On pipe B and C we don't support the PSR interrupt yet, on pipe 201 * A the same bit is for perf counters which we don't use either. 202 */ 203 if (drm_WARN_ON_ONCE(&dev_priv->drm, 204 status_mask & PIPE_B_PSR_STATUS_VLV)) 205 return 0; 206 207 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 208 SPRITE0_FLIP_DONE_INT_EN_VLV | 209 SPRITE1_FLIP_DONE_INT_EN_VLV); 210 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 211 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 212 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 213 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 214 215 out: 216 drm_WARN_ONCE(&dev_priv->drm, 217 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 218 status_mask & ~PIPESTAT_INT_STATUS_MASK, 219 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 220 pipe_name(pipe), enable_mask, status_mask); 221 222 return enable_mask; 223 } 224 225 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 226 enum pipe pipe, u32 status_mask) 227 { 228 i915_reg_t reg = PIPESTAT(dev_priv, pipe); 229 u32 enable_mask; 230 231 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 232 "pipe %c: status_mask=0x%x\n", 233 pipe_name(pipe), status_mask); 234 235 lockdep_assert_held(&dev_priv->irq_lock); 236 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 237 238 if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) 239 return; 240 241 dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask; 242 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 243 244 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 245 intel_uncore_posting_read(&dev_priv->uncore, reg); 246 } 247 248 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 249 enum pipe pipe, u32 status_mask) 250 { 251 i915_reg_t reg = PIPESTAT(dev_priv, pipe); 252 u32 enable_mask; 253 254 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 255 "pipe %c: status_mask=0x%x\n", 256 pipe_name(pipe), status_mask); 257 258 lockdep_assert_held(&dev_priv->irq_lock); 259 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 260 261 if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0) 262 return; 263 264 dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask; 265 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 266 267 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 268 intel_uncore_posting_read(&dev_priv->uncore, reg); 269 } 270 271 static bool i915_has_asle(struct drm_i915_private *i915) 272 { 273 if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915)) 274 return false; 275 276 return intel_opregion_asle_present(i915); 277 } 278 279 /** 280 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 281 * @dev_priv: i915 device private 282 */ 283 void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 284 { 285 if (!i915_has_asle(dev_priv)) 286 return; 287 288 spin_lock_irq(&dev_priv->irq_lock); 289 290 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 291 if (DISPLAY_VER(dev_priv) >= 4) 292 i915_enable_pipestat(dev_priv, PIPE_A, 293 PIPE_LEGACY_BLC_EVENT_STATUS); 294 295 spin_unlock_irq(&dev_priv->irq_lock); 296 } 297 298 #if defined(CONFIG_DEBUG_FS) 299 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 300 enum pipe pipe, 301 u32 crc0, u32 crc1, 302 u32 crc2, u32 crc3, 303 u32 crc4) 304 { 305 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 306 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 307 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 308 309 trace_intel_pipe_crc(crtc, crcs); 310 311 spin_lock(&pipe_crc->lock); 312 /* 313 * For some not yet identified reason, the first CRC is 314 * bonkers. So let's just wait for the next vblank and read 315 * out the buggy result. 316 * 317 * On GEN8+ sometimes the second CRC is bonkers as well, so 318 * don't trust that one either. 319 */ 320 if (pipe_crc->skipped <= 0 || 321 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 322 pipe_crc->skipped++; 323 spin_unlock(&pipe_crc->lock); 324 return; 325 } 326 spin_unlock(&pipe_crc->lock); 327 328 drm_crtc_add_crc_entry(&crtc->base, true, 329 drm_crtc_accurate_vblank_count(&crtc->base), 330 crcs); 331 } 332 #else 333 static inline void 334 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 335 enum pipe pipe, 336 u32 crc0, u32 crc1, 337 u32 crc2, u32 crc3, 338 u32 crc4) {} 339 #endif 340 341 static void flip_done_handler(struct drm_i915_private *i915, 342 enum pipe pipe) 343 { 344 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); 345 346 spin_lock(&i915->drm.event_lock); 347 348 if (crtc->flip_done_event) { 349 drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); 350 crtc->flip_done_event = NULL; 351 } 352 353 spin_unlock(&i915->drm.event_lock); 354 } 355 356 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 357 enum pipe pipe) 358 { 359 display_pipe_crc_irq_handler(dev_priv, pipe, 360 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_HSW(pipe)), 361 0, 0, 0, 0); 362 } 363 364 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 365 enum pipe pipe) 366 { 367 display_pipe_crc_irq_handler(dev_priv, pipe, 368 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 369 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 370 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 371 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 372 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 373 } 374 375 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 376 enum pipe pipe) 377 { 378 u32 res1, res2; 379 380 if (DISPLAY_VER(dev_priv) >= 3) 381 res1 = intel_uncore_read(&dev_priv->uncore, 382 PIPE_CRC_RES_RES1_I915(dev_priv, pipe)); 383 else 384 res1 = 0; 385 386 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 387 res2 = intel_uncore_read(&dev_priv->uncore, 388 PIPE_CRC_RES_RES2_G4X(dev_priv, pipe)); 389 else 390 res2 = 0; 391 392 display_pipe_crc_irq_handler(dev_priv, pipe, 393 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(dev_priv, pipe)), 394 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(dev_priv, pipe)), 395 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(dev_priv, pipe)), 396 res1, res2); 397 } 398 399 void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 400 { 401 enum pipe pipe; 402 403 for_each_pipe(dev_priv, pipe) { 404 intel_uncore_write(&dev_priv->uncore, 405 PIPESTAT(dev_priv, pipe), 406 PIPESTAT_INT_STATUS_MASK | 407 PIPE_FIFO_UNDERRUN_STATUS); 408 409 dev_priv->display.irq.pipestat_irq_mask[pipe] = 0; 410 } 411 } 412 413 void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 414 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 415 { 416 enum pipe pipe; 417 418 spin_lock(&dev_priv->irq_lock); 419 420 if (!dev_priv->display.irq.display_irqs_enabled) { 421 spin_unlock(&dev_priv->irq_lock); 422 return; 423 } 424 425 for_each_pipe(dev_priv, pipe) { 426 i915_reg_t reg; 427 u32 status_mask, enable_mask, iir_bit = 0; 428 429 /* 430 * PIPESTAT bits get signalled even when the interrupt is 431 * disabled with the mask bits, and some of the status bits do 432 * not generate interrupts at all (like the underrun bit). Hence 433 * we need to be careful that we only handle what we want to 434 * handle. 435 */ 436 437 /* fifo underruns are filterered in the underrun handler. */ 438 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 439 440 switch (pipe) { 441 default: 442 case PIPE_A: 443 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 444 break; 445 case PIPE_B: 446 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 447 break; 448 case PIPE_C: 449 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 450 break; 451 } 452 if (iir & iir_bit) 453 status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe]; 454 455 if (!status_mask) 456 continue; 457 458 reg = PIPESTAT(dev_priv, pipe); 459 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 460 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 461 462 /* 463 * Clear the PIPE*STAT regs before the IIR 464 * 465 * Toggle the enable bits to make sure we get an 466 * edge in the ISR pipe event bit if we don't clear 467 * all the enabled status bits. Otherwise the edge 468 * triggered IIR on i965/g4x wouldn't notice that 469 * an interrupt is still pending. 470 */ 471 if (pipe_stats[pipe]) { 472 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 473 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 474 } 475 } 476 spin_unlock(&dev_priv->irq_lock); 477 } 478 479 void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 480 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 481 { 482 enum pipe pipe; 483 484 for_each_pipe(dev_priv, pipe) { 485 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 486 intel_handle_vblank(dev_priv, pipe); 487 488 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 489 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 490 491 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 492 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 493 } 494 } 495 496 void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 497 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 498 { 499 bool blc_event = false; 500 enum pipe pipe; 501 502 for_each_pipe(dev_priv, pipe) { 503 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 504 intel_handle_vblank(dev_priv, pipe); 505 506 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 507 blc_event = true; 508 509 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 510 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 511 512 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 513 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 514 } 515 516 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 517 intel_opregion_asle_intr(dev_priv); 518 } 519 520 void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 521 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 522 { 523 bool blc_event = false; 524 enum pipe pipe; 525 526 for_each_pipe(dev_priv, pipe) { 527 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 528 intel_handle_vblank(dev_priv, pipe); 529 530 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 531 blc_event = true; 532 533 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 534 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 535 536 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 537 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 538 } 539 540 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 541 intel_opregion_asle_intr(dev_priv); 542 543 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 544 intel_gmbus_irq_handler(dev_priv); 545 } 546 547 void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 548 u32 pipe_stats[I915_MAX_PIPES]) 549 { 550 enum pipe pipe; 551 552 for_each_pipe(dev_priv, pipe) { 553 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 554 intel_handle_vblank(dev_priv, pipe); 555 556 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 557 flip_done_handler(dev_priv, pipe); 558 559 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 560 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 561 562 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 563 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 564 } 565 566 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 567 intel_gmbus_irq_handler(dev_priv); 568 } 569 570 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 571 { 572 enum pipe pipe; 573 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 574 575 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 576 577 if (pch_iir & SDE_AUDIO_POWER_MASK) { 578 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 579 SDE_AUDIO_POWER_SHIFT); 580 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 581 port_name(port)); 582 } 583 584 if (pch_iir & SDE_AUX_MASK) 585 intel_dp_aux_irq_handler(dev_priv); 586 587 if (pch_iir & SDE_GMBUS) 588 intel_gmbus_irq_handler(dev_priv); 589 590 if (pch_iir & SDE_AUDIO_HDCP_MASK) 591 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 592 593 if (pch_iir & SDE_AUDIO_TRANS_MASK) 594 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 595 596 if (pch_iir & SDE_POISON) 597 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 598 599 if (pch_iir & SDE_FDI_MASK) { 600 for_each_pipe(dev_priv, pipe) 601 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 602 pipe_name(pipe), 603 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 604 } 605 606 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 607 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 608 609 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 610 drm_dbg(&dev_priv->drm, 611 "PCH transcoder CRC error interrupt\n"); 612 613 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 614 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 615 616 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 617 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 618 } 619 620 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 621 { 622 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 623 enum pipe pipe; 624 625 if (err_int & ERR_INT_POISON) 626 drm_err(&dev_priv->drm, "Poison interrupt\n"); 627 628 for_each_pipe(dev_priv, pipe) { 629 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 630 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 631 632 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 633 if (IS_IVYBRIDGE(dev_priv)) 634 ivb_pipe_crc_irq_handler(dev_priv, pipe); 635 else 636 hsw_pipe_crc_irq_handler(dev_priv, pipe); 637 } 638 } 639 640 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 641 } 642 643 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 644 { 645 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 646 enum pipe pipe; 647 648 if (serr_int & SERR_INT_POISON) 649 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 650 651 for_each_pipe(dev_priv, pipe) 652 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 653 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 654 655 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 656 } 657 658 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 659 { 660 enum pipe pipe; 661 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 662 663 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 664 665 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 666 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 667 SDE_AUDIO_POWER_SHIFT_CPT); 668 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 669 port_name(port)); 670 } 671 672 if (pch_iir & SDE_AUX_MASK_CPT) 673 intel_dp_aux_irq_handler(dev_priv); 674 675 if (pch_iir & SDE_GMBUS_CPT) 676 intel_gmbus_irq_handler(dev_priv); 677 678 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 679 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 680 681 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 682 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 683 684 if (pch_iir & SDE_FDI_MASK_CPT) { 685 for_each_pipe(dev_priv, pipe) 686 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 687 pipe_name(pipe), 688 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 689 } 690 691 if (pch_iir & SDE_ERROR_CPT) 692 cpt_serr_int_handler(dev_priv); 693 } 694 695 void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 696 { 697 enum pipe pipe; 698 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 699 700 if (hotplug_trigger) 701 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 702 703 if (de_iir & DE_AUX_CHANNEL_A) 704 intel_dp_aux_irq_handler(dev_priv); 705 706 if (de_iir & DE_GSE) 707 intel_opregion_asle_intr(dev_priv); 708 709 if (de_iir & DE_POISON) 710 drm_err(&dev_priv->drm, "Poison interrupt\n"); 711 712 for_each_pipe(dev_priv, pipe) { 713 if (de_iir & DE_PIPE_VBLANK(pipe)) 714 intel_handle_vblank(dev_priv, pipe); 715 716 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 717 flip_done_handler(dev_priv, pipe); 718 719 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 720 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 721 722 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 723 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 724 } 725 726 /* check event from PCH */ 727 if (de_iir & DE_PCH_EVENT) { 728 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 729 730 if (HAS_PCH_CPT(dev_priv)) 731 cpt_irq_handler(dev_priv, pch_iir); 732 else 733 ibx_irq_handler(dev_priv, pch_iir); 734 735 /* should clear PCH hotplug event before clear CPU irq */ 736 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 737 } 738 739 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 740 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 741 } 742 743 void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 744 { 745 enum pipe pipe; 746 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 747 748 if (hotplug_trigger) 749 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 750 751 if (de_iir & DE_ERR_INT_IVB) 752 ivb_err_int_handler(dev_priv); 753 754 if (de_iir & DE_EDP_PSR_INT_HSW) { 755 struct intel_encoder *encoder; 756 757 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 758 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 759 u32 psr_iir; 760 761 psr_iir = intel_uncore_rmw(&dev_priv->uncore, 762 EDP_PSR_IIR, 0, 0); 763 intel_psr_irq_handler(intel_dp, psr_iir); 764 break; 765 } 766 } 767 768 if (de_iir & DE_AUX_CHANNEL_A_IVB) 769 intel_dp_aux_irq_handler(dev_priv); 770 771 if (de_iir & DE_GSE_IVB) 772 intel_opregion_asle_intr(dev_priv); 773 774 for_each_pipe(dev_priv, pipe) { 775 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 776 intel_handle_vblank(dev_priv, pipe); 777 778 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 779 flip_done_handler(dev_priv, pipe); 780 } 781 782 /* check event from PCH */ 783 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 784 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 785 786 cpt_irq_handler(dev_priv, pch_iir); 787 788 /* clear PCH hotplug event before clear CPU irq */ 789 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 790 } 791 } 792 793 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 794 { 795 u32 mask; 796 797 if (DISPLAY_VER(dev_priv) >= 20) 798 return 0; 799 else if (DISPLAY_VER(dev_priv) >= 14) 800 return TGL_DE_PORT_AUX_DDIA | 801 TGL_DE_PORT_AUX_DDIB; 802 else if (DISPLAY_VER(dev_priv) >= 13) 803 return TGL_DE_PORT_AUX_DDIA | 804 TGL_DE_PORT_AUX_DDIB | 805 TGL_DE_PORT_AUX_DDIC | 806 XELPD_DE_PORT_AUX_DDID | 807 XELPD_DE_PORT_AUX_DDIE | 808 TGL_DE_PORT_AUX_USBC1 | 809 TGL_DE_PORT_AUX_USBC2 | 810 TGL_DE_PORT_AUX_USBC3 | 811 TGL_DE_PORT_AUX_USBC4; 812 else if (DISPLAY_VER(dev_priv) >= 12) 813 return TGL_DE_PORT_AUX_DDIA | 814 TGL_DE_PORT_AUX_DDIB | 815 TGL_DE_PORT_AUX_DDIC | 816 TGL_DE_PORT_AUX_USBC1 | 817 TGL_DE_PORT_AUX_USBC2 | 818 TGL_DE_PORT_AUX_USBC3 | 819 TGL_DE_PORT_AUX_USBC4 | 820 TGL_DE_PORT_AUX_USBC5 | 821 TGL_DE_PORT_AUX_USBC6; 822 823 mask = GEN8_AUX_CHANNEL_A; 824 if (DISPLAY_VER(dev_priv) >= 9) 825 mask |= GEN9_AUX_CHANNEL_B | 826 GEN9_AUX_CHANNEL_C | 827 GEN9_AUX_CHANNEL_D; 828 829 if (DISPLAY_VER(dev_priv) == 11) { 830 mask |= ICL_AUX_CHANNEL_F; 831 mask |= ICL_AUX_CHANNEL_E; 832 } 833 834 return mask; 835 } 836 837 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 838 { 839 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) 840 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 841 else if (DISPLAY_VER(dev_priv) >= 11) 842 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 843 else if (DISPLAY_VER(dev_priv) >= 9) 844 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 845 else 846 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 847 } 848 849 static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) 850 { 851 wake_up_all(&dev_priv->display.pmdemand.waitqueue); 852 } 853 854 static void 855 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 856 { 857 bool found = false; 858 859 if (DISPLAY_VER(dev_priv) >= 14) { 860 if (iir & (XELPDP_PMDEMAND_RSP | 861 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 862 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 863 drm_dbg(&dev_priv->drm, 864 "Error waiting for Punit PM Demand Response\n"); 865 866 intel_pmdemand_irq_handler(dev_priv); 867 found = true; 868 } 869 } else if (iir & GEN8_DE_MISC_GSE) { 870 intel_opregion_asle_intr(dev_priv); 871 found = true; 872 } 873 874 if (iir & GEN8_DE_EDP_PSR) { 875 struct intel_encoder *encoder; 876 u32 psr_iir; 877 i915_reg_t iir_reg; 878 879 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 880 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 881 882 if (DISPLAY_VER(dev_priv) >= 12) 883 iir_reg = TRANS_PSR_IIR(dev_priv, 884 intel_dp->psr.transcoder); 885 else 886 iir_reg = EDP_PSR_IIR; 887 888 psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); 889 890 if (psr_iir) 891 found = true; 892 893 intel_psr_irq_handler(intel_dp, psr_iir); 894 895 /* prior GEN12 only have one EDP PSR */ 896 if (DISPLAY_VER(dev_priv) < 12) 897 break; 898 } 899 } 900 901 if (!found) 902 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); 903 } 904 905 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 906 u32 te_trigger) 907 { 908 enum pipe pipe = INVALID_PIPE; 909 enum transcoder dsi_trans; 910 enum port port; 911 u32 val; 912 913 /* 914 * Incase of dual link, TE comes from DSI_1 915 * this is to check if dual link is enabled 916 */ 917 val = intel_uncore_read(&dev_priv->uncore, 918 TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0)); 919 val &= PORT_SYNC_MODE_ENABLE; 920 921 /* 922 * if dual link is enabled, then read DSI_0 923 * transcoder registers 924 */ 925 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 926 PORT_A : PORT_B; 927 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 928 929 /* Check if DSI configured in command mode */ 930 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 931 val = val & OP_MODE_MASK; 932 933 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 934 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 935 return; 936 } 937 938 /* Get PIPE for handling VBLANK event */ 939 val = intel_uncore_read(&dev_priv->uncore, 940 TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); 941 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 942 case TRANS_DDI_EDP_INPUT_A_ON: 943 pipe = PIPE_A; 944 break; 945 case TRANS_DDI_EDP_INPUT_B_ONOFF: 946 pipe = PIPE_B; 947 break; 948 case TRANS_DDI_EDP_INPUT_C_ONOFF: 949 pipe = PIPE_C; 950 break; 951 default: 952 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 953 return; 954 } 955 956 intel_handle_vblank(dev_priv, pipe); 957 958 /* clear TE in dsi IIR */ 959 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 960 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 961 } 962 963 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 964 { 965 if (DISPLAY_VER(i915) >= 9) 966 return GEN9_PIPE_PLANE1_FLIP_DONE; 967 else 968 return GEN8_PIPE_PRIMARY_FLIP_DONE; 969 } 970 971 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) 972 { 973 u32 mask = GEN8_PIPE_FIFO_UNDERRUN; 974 975 if (DISPLAY_VER(dev_priv) >= 13) 976 mask |= XELPD_PIPE_SOFT_UNDERRUN | 977 XELPD_PIPE_HARD_UNDERRUN; 978 979 return mask; 980 } 981 982 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) 983 { 984 u32 pica_ier = 0; 985 986 *pica_iir = 0; 987 *pch_iir = intel_de_read(i915, SDEIIR); 988 if (!*pch_iir) 989 return; 990 991 /** 992 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 993 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 994 * their flags both in the PICA and SDE IIR. 995 */ 996 if (*pch_iir & SDE_PICAINTERRUPT) { 997 drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); 998 999 pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0); 1000 *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR); 1001 intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir); 1002 } 1003 1004 intel_de_write(i915, SDEIIR, *pch_iir); 1005 1006 if (pica_ier) 1007 intel_de_write(i915, PICAINTERRUPT_IER, pica_ier); 1008 } 1009 1010 void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 1011 { 1012 u32 iir; 1013 enum pipe pipe; 1014 1015 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 1016 1017 if (master_ctl & GEN8_DE_MISC_IRQ) { 1018 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 1019 if (iir) { 1020 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 1021 gen8_de_misc_irq_handler(dev_priv, iir); 1022 } else { 1023 drm_err_ratelimited(&dev_priv->drm, 1024 "The master control interrupt lied (DE MISC)!\n"); 1025 } 1026 } 1027 1028 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1029 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 1030 if (iir) { 1031 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 1032 gen11_hpd_irq_handler(dev_priv, iir); 1033 } else { 1034 drm_err_ratelimited(&dev_priv->drm, 1035 "The master control interrupt lied, (DE HPD)!\n"); 1036 } 1037 } 1038 1039 if (master_ctl & GEN8_DE_PORT_IRQ) { 1040 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 1041 if (iir) { 1042 bool found = false; 1043 1044 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 1045 1046 if (iir & gen8_de_port_aux_mask(dev_priv)) { 1047 intel_dp_aux_irq_handler(dev_priv); 1048 found = true; 1049 } 1050 1051 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 1052 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1053 1054 if (hotplug_trigger) { 1055 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 1056 found = true; 1057 } 1058 } else if (IS_BROADWELL(dev_priv)) { 1059 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1060 1061 if (hotplug_trigger) { 1062 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 1063 found = true; 1064 } 1065 } 1066 1067 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 1068 (iir & BXT_DE_PORT_GMBUS)) { 1069 intel_gmbus_irq_handler(dev_priv); 1070 found = true; 1071 } 1072 1073 if (DISPLAY_VER(dev_priv) >= 11) { 1074 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1075 1076 if (te_trigger) { 1077 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 1078 found = true; 1079 } 1080 } 1081 1082 if (!found) 1083 drm_err_ratelimited(&dev_priv->drm, 1084 "Unexpected DE Port interrupt\n"); 1085 } else { 1086 drm_err_ratelimited(&dev_priv->drm, 1087 "The master control interrupt lied (DE PORT)!\n"); 1088 } 1089 } 1090 1091 for_each_pipe(dev_priv, pipe) { 1092 u32 fault_errors; 1093 1094 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1095 continue; 1096 1097 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 1098 if (!iir) { 1099 drm_err_ratelimited(&dev_priv->drm, 1100 "The master control interrupt lied (DE PIPE)!\n"); 1101 continue; 1102 } 1103 1104 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 1105 1106 if (iir & GEN8_PIPE_VBLANK) 1107 intel_handle_vblank(dev_priv, pipe); 1108 1109 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 1110 flip_done_handler(dev_priv, pipe); 1111 1112 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1113 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1114 1115 if (iir & gen8_de_pipe_underrun_mask(dev_priv)) 1116 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1117 1118 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 1119 if (fault_errors) 1120 drm_err_ratelimited(&dev_priv->drm, 1121 "Fault errors on pipe %c: 0x%08x\n", 1122 pipe_name(pipe), 1123 fault_errors); 1124 } 1125 1126 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 1127 master_ctl & GEN8_DE_PCH_IRQ) { 1128 u32 pica_iir; 1129 1130 /* 1131 * FIXME(BDW): Assume for now that the new interrupt handling 1132 * scheme also closed the SDE interrupt handling race we've seen 1133 * on older pch-split platforms. But this needs testing. 1134 */ 1135 gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); 1136 if (iir) { 1137 if (pica_iir) 1138 xelpdp_pica_irq_handler(dev_priv, pica_iir); 1139 1140 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1141 icp_irq_handler(dev_priv, iir); 1142 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 1143 spt_irq_handler(dev_priv, iir); 1144 else 1145 cpt_irq_handler(dev_priv, iir); 1146 } else { 1147 /* 1148 * Like on previous PCH there seems to be something 1149 * fishy going on with forwarding PCH interrupts. 1150 */ 1151 drm_dbg(&dev_priv->drm, 1152 "The master control interrupt lied (SDE)!\n"); 1153 } 1154 } 1155 } 1156 1157 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) 1158 { 1159 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 1160 u32 iir; 1161 1162 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1163 return 0; 1164 1165 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 1166 if (likely(iir)) 1167 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 1168 1169 return iir; 1170 } 1171 1172 void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) 1173 { 1174 if (iir & GEN11_GU_MISC_GSE) 1175 intel_opregion_asle_intr(i915); 1176 } 1177 1178 void gen11_display_irq_handler(struct drm_i915_private *i915) 1179 { 1180 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 1181 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 1182 1183 disable_rpm_wakeref_asserts(&i915->runtime_pm); 1184 /* 1185 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1186 * for the display related bits. 1187 */ 1188 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 1189 gen8_de_irq_handler(i915, disp_ctl); 1190 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 1191 GEN11_DISPLAY_IRQ_ENABLE); 1192 1193 enable_rpm_wakeref_asserts(&i915->runtime_pm); 1194 } 1195 1196 /* Called from drm generic code, passed 'crtc' which 1197 * we use as a pipe index 1198 */ 1199 int i8xx_enable_vblank(struct drm_crtc *crtc) 1200 { 1201 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1202 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1203 unsigned long irqflags; 1204 1205 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1206 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1207 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1208 1209 return 0; 1210 } 1211 1212 int i915gm_enable_vblank(struct drm_crtc *crtc) 1213 { 1214 struct drm_i915_private *i915 = to_i915(crtc->dev); 1215 1216 /* 1217 * Vblank interrupts fail to wake the device up from C2+. 1218 * Disabling render clock gating during C-states avoids 1219 * the problem. There is a small power cost so we do this 1220 * only when vblank interrupts are actually enabled. 1221 */ 1222 if (i915->display.irq.vblank_enabled++ == 0) 1223 intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1224 1225 return i8xx_enable_vblank(crtc); 1226 } 1227 1228 int i965_enable_vblank(struct drm_crtc *crtc) 1229 { 1230 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1231 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1232 unsigned long irqflags; 1233 1234 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1235 i915_enable_pipestat(dev_priv, pipe, 1236 PIPE_START_VBLANK_INTERRUPT_STATUS); 1237 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1238 1239 return 0; 1240 } 1241 1242 int ilk_enable_vblank(struct drm_crtc *crtc) 1243 { 1244 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1245 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1246 unsigned long irqflags; 1247 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1248 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1249 1250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1251 ilk_enable_display_irq(dev_priv, bit); 1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1253 1254 /* Even though there is no DMC, frame counter can get stuck when 1255 * PSR is active as no frames are generated. 1256 */ 1257 if (HAS_PSR(dev_priv)) 1258 drm_crtc_vblank_restore(crtc); 1259 1260 return 0; 1261 } 1262 1263 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1264 bool enable) 1265 { 1266 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 1267 enum port port; 1268 1269 if (!(intel_crtc->mode_flags & 1270 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1271 return false; 1272 1273 /* for dual link cases we consider TE from slave */ 1274 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1275 port = PORT_B; 1276 else 1277 port = PORT_A; 1278 1279 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, 1280 enable ? 0 : DSI_TE_EVENT); 1281 1282 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 1283 1284 return true; 1285 } 1286 1287 int bdw_enable_vblank(struct drm_crtc *_crtc) 1288 { 1289 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1291 enum pipe pipe = crtc->pipe; 1292 unsigned long irqflags; 1293 1294 if (gen11_dsi_configure_te(crtc, true)) 1295 return 0; 1296 1297 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1298 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1299 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1300 1301 /* Even if there is no DMC, frame counter can get stuck when 1302 * PSR is active as no frames are generated, so check only for PSR. 1303 */ 1304 if (HAS_PSR(dev_priv)) 1305 drm_crtc_vblank_restore(&crtc->base); 1306 1307 return 0; 1308 } 1309 1310 /* Called from drm generic code, passed 'crtc' which 1311 * we use as a pipe index 1312 */ 1313 void i8xx_disable_vblank(struct drm_crtc *crtc) 1314 { 1315 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1316 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1317 unsigned long irqflags; 1318 1319 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1320 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1321 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1322 } 1323 1324 void i915gm_disable_vblank(struct drm_crtc *crtc) 1325 { 1326 struct drm_i915_private *i915 = to_i915(crtc->dev); 1327 1328 i8xx_disable_vblank(crtc); 1329 1330 if (--i915->display.irq.vblank_enabled == 0) 1331 intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1332 } 1333 1334 void i965_disable_vblank(struct drm_crtc *crtc) 1335 { 1336 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1337 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1338 unsigned long irqflags; 1339 1340 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1341 i915_disable_pipestat(dev_priv, pipe, 1342 PIPE_START_VBLANK_INTERRUPT_STATUS); 1343 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1344 } 1345 1346 void ilk_disable_vblank(struct drm_crtc *crtc) 1347 { 1348 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1349 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1350 unsigned long irqflags; 1351 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1352 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1353 1354 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1355 ilk_disable_display_irq(dev_priv, bit); 1356 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1357 } 1358 1359 void bdw_disable_vblank(struct drm_crtc *_crtc) 1360 { 1361 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1362 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1363 enum pipe pipe = crtc->pipe; 1364 unsigned long irqflags; 1365 1366 if (gen11_dsi_configure_te(crtc, false)) 1367 return; 1368 1369 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1370 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1371 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1372 } 1373 1374 void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 1375 { 1376 struct intel_uncore *uncore = &dev_priv->uncore; 1377 1378 if (IS_CHERRYVIEW(dev_priv)) 1379 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1380 else 1381 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1382 1383 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 1384 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0); 1385 1386 i9xx_pipestat_irq_reset(dev_priv); 1387 1388 GEN3_IRQ_RESET(uncore, VLV_); 1389 dev_priv->irq_mask = ~0u; 1390 } 1391 1392 void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 1393 { 1394 struct intel_uncore *uncore = &dev_priv->uncore; 1395 1396 u32 pipestat_mask; 1397 u32 enable_mask; 1398 enum pipe pipe; 1399 1400 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 1401 1402 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1403 for_each_pipe(dev_priv, pipe) 1404 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 1405 1406 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 1407 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1408 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1409 I915_LPE_PIPE_A_INTERRUPT | 1410 I915_LPE_PIPE_B_INTERRUPT; 1411 1412 if (IS_CHERRYVIEW(dev_priv)) 1413 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 1414 I915_LPE_PIPE_C_INTERRUPT; 1415 1416 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 1417 1418 dev_priv->irq_mask = ~enable_mask; 1419 1420 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 1421 } 1422 1423 void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 1424 { 1425 struct intel_uncore *uncore = &dev_priv->uncore; 1426 enum pipe pipe; 1427 1428 if (!HAS_DISPLAY(dev_priv)) 1429 return; 1430 1431 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1432 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1433 1434 for_each_pipe(dev_priv, pipe) 1435 if (intel_display_power_is_enabled(dev_priv, 1436 POWER_DOMAIN_PIPE(pipe))) 1437 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1438 1439 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 1440 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 1441 } 1442 1443 void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 1444 { 1445 struct intel_uncore *uncore = &dev_priv->uncore; 1446 enum pipe pipe; 1447 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1448 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1449 1450 if (!HAS_DISPLAY(dev_priv)) 1451 return; 1452 1453 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 1454 1455 if (DISPLAY_VER(dev_priv) >= 12) { 1456 enum transcoder trans; 1457 1458 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1459 enum intel_display_power_domain domain; 1460 1461 domain = POWER_DOMAIN_TRANSCODER(trans); 1462 if (!intel_display_power_is_enabled(dev_priv, domain)) 1463 continue; 1464 1465 intel_uncore_write(uncore, 1466 TRANS_PSR_IMR(dev_priv, trans), 1467 0xffffffff); 1468 intel_uncore_write(uncore, 1469 TRANS_PSR_IIR(dev_priv, trans), 1470 0xffffffff); 1471 } 1472 } else { 1473 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1474 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1475 } 1476 1477 for_each_pipe(dev_priv, pipe) 1478 if (intel_display_power_is_enabled(dev_priv, 1479 POWER_DOMAIN_PIPE(pipe))) 1480 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1481 1482 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 1483 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 1484 1485 if (DISPLAY_VER(dev_priv) >= 14) 1486 GEN3_IRQ_RESET(uncore, PICAINTERRUPT_); 1487 else 1488 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 1489 1490 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1491 GEN3_IRQ_RESET(uncore, SDE); 1492 } 1493 1494 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 1495 u8 pipe_mask) 1496 { 1497 struct intel_uncore *uncore = &dev_priv->uncore; 1498 u32 extra_ier = GEN8_PIPE_VBLANK | 1499 gen8_de_pipe_underrun_mask(dev_priv) | 1500 gen8_de_pipe_flip_done_mask(dev_priv); 1501 enum pipe pipe; 1502 1503 spin_lock_irq(&dev_priv->irq_lock); 1504 1505 if (!intel_irqs_enabled(dev_priv)) { 1506 spin_unlock_irq(&dev_priv->irq_lock); 1507 return; 1508 } 1509 1510 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1511 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 1512 dev_priv->display.irq.de_irq_mask[pipe], 1513 ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); 1514 1515 spin_unlock_irq(&dev_priv->irq_lock); 1516 } 1517 1518 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 1519 u8 pipe_mask) 1520 { 1521 struct intel_uncore *uncore = &dev_priv->uncore; 1522 enum pipe pipe; 1523 1524 spin_lock_irq(&dev_priv->irq_lock); 1525 1526 if (!intel_irqs_enabled(dev_priv)) { 1527 spin_unlock_irq(&dev_priv->irq_lock); 1528 return; 1529 } 1530 1531 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1532 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1533 1534 spin_unlock_irq(&dev_priv->irq_lock); 1535 1536 /* make sure we're done processing display irqs */ 1537 intel_synchronize_irq(dev_priv); 1538 } 1539 1540 /* 1541 * SDEIER is also touched by the interrupt handler to work around missed PCH 1542 * interrupts. Hence we can't update it after the interrupt handler is enabled - 1543 * instead we unconditionally enable all PCH interrupt sources here, but then 1544 * only unmask them as needed with SDEIMR. 1545 * 1546 * Note that we currently do this after installing the interrupt handler, 1547 * but before we enable the master interrupt. That should be sufficient 1548 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 1549 * interrupts could still race. 1550 */ 1551 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 1552 { 1553 struct intel_uncore *uncore = &dev_priv->uncore; 1554 u32 mask; 1555 1556 if (HAS_PCH_NOP(dev_priv)) 1557 return; 1558 1559 if (HAS_PCH_IBX(dev_priv)) 1560 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 1561 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 1562 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 1563 else 1564 mask = SDE_GMBUS_CPT; 1565 1566 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 1567 } 1568 1569 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 1570 { 1571 lockdep_assert_held(&dev_priv->irq_lock); 1572 1573 if (dev_priv->display.irq.display_irqs_enabled) 1574 return; 1575 1576 dev_priv->display.irq.display_irqs_enabled = true; 1577 1578 if (intel_irqs_enabled(dev_priv)) { 1579 vlv_display_irq_reset(dev_priv); 1580 vlv_display_irq_postinstall(dev_priv); 1581 } 1582 } 1583 1584 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 1585 { 1586 lockdep_assert_held(&dev_priv->irq_lock); 1587 1588 if (!dev_priv->display.irq.display_irqs_enabled) 1589 return; 1590 1591 dev_priv->display.irq.display_irqs_enabled = false; 1592 1593 if (intel_irqs_enabled(dev_priv)) 1594 vlv_display_irq_reset(dev_priv); 1595 } 1596 1597 void ilk_de_irq_postinstall(struct drm_i915_private *i915) 1598 { 1599 struct intel_uncore *uncore = &i915->uncore; 1600 u32 display_mask, extra_mask; 1601 1602 if (DISPLAY_VER(i915) >= 7) { 1603 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1604 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 1605 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 1606 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 1607 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 1608 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 1609 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 1610 DE_DP_A_HOTPLUG_IVB); 1611 } else { 1612 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1613 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 1614 DE_PIPEA_CRC_DONE | DE_POISON); 1615 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 1616 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 1617 DE_PLANE_FLIP_DONE(PLANE_A) | 1618 DE_PLANE_FLIP_DONE(PLANE_B) | 1619 DE_DP_A_HOTPLUG); 1620 } 1621 1622 if (IS_HASWELL(i915)) { 1623 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1624 display_mask |= DE_EDP_PSR_INT_HSW; 1625 } 1626 1627 if (IS_IRONLAKE_M(i915)) 1628 extra_mask |= DE_PCU_EVENT; 1629 1630 i915->irq_mask = ~display_mask; 1631 1632 ibx_irq_postinstall(i915); 1633 1634 GEN3_IRQ_INIT(uncore, DE, i915->irq_mask, 1635 display_mask | extra_mask); 1636 } 1637 1638 static void mtp_irq_postinstall(struct drm_i915_private *i915); 1639 static void icp_irq_postinstall(struct drm_i915_private *i915); 1640 1641 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 1642 { 1643 struct intel_uncore *uncore = &dev_priv->uncore; 1644 1645 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 1646 GEN8_PIPE_CDCLK_CRC_DONE; 1647 u32 de_pipe_enables; 1648 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 1649 u32 de_port_enables; 1650 u32 de_misc_masked = GEN8_DE_EDP_PSR; 1651 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1652 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1653 enum pipe pipe; 1654 1655 if (!HAS_DISPLAY(dev_priv)) 1656 return; 1657 1658 if (DISPLAY_VER(dev_priv) >= 14) 1659 mtp_irq_postinstall(dev_priv); 1660 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1661 icp_irq_postinstall(dev_priv); 1662 else if (HAS_PCH_SPLIT(dev_priv)) 1663 ibx_irq_postinstall(dev_priv); 1664 1665 if (DISPLAY_VER(dev_priv) < 11) 1666 de_misc_masked |= GEN8_DE_MISC_GSE; 1667 1668 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1669 de_port_masked |= BXT_DE_PORT_GMBUS; 1670 1671 if (DISPLAY_VER(dev_priv) >= 14) { 1672 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 1673 XELPDP_PMDEMAND_RSP; 1674 } else if (DISPLAY_VER(dev_priv) >= 11) { 1675 enum port port; 1676 1677 if (intel_bios_is_dsi_present(dev_priv, &port)) 1678 de_port_masked |= DSI0_TE | DSI1_TE; 1679 } 1680 1681 de_pipe_enables = de_pipe_masked | 1682 GEN8_PIPE_VBLANK | 1683 gen8_de_pipe_underrun_mask(dev_priv) | 1684 gen8_de_pipe_flip_done_mask(dev_priv); 1685 1686 de_port_enables = de_port_masked; 1687 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1688 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 1689 else if (IS_BROADWELL(dev_priv)) 1690 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 1691 1692 if (DISPLAY_VER(dev_priv) >= 12) { 1693 enum transcoder trans; 1694 1695 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1696 enum intel_display_power_domain domain; 1697 1698 domain = POWER_DOMAIN_TRANSCODER(trans); 1699 if (!intel_display_power_is_enabled(dev_priv, domain)) 1700 continue; 1701 1702 gen3_assert_iir_is_zero(uncore, 1703 TRANS_PSR_IIR(dev_priv, trans)); 1704 } 1705 } else { 1706 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1707 } 1708 1709 for_each_pipe(dev_priv, pipe) { 1710 dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked; 1711 1712 if (intel_display_power_is_enabled(dev_priv, 1713 POWER_DOMAIN_PIPE(pipe))) 1714 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 1715 dev_priv->display.irq.de_irq_mask[pipe], 1716 de_pipe_enables); 1717 } 1718 1719 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 1720 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 1721 1722 if (IS_DISPLAY_VER(dev_priv, 11, 13)) { 1723 u32 de_hpd_masked = 0; 1724 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 1725 GEN11_DE_TBT_HOTPLUG_MASK; 1726 1727 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 1728 de_hpd_enables); 1729 } 1730 } 1731 1732 static void mtp_irq_postinstall(struct drm_i915_private *i915) 1733 { 1734 struct intel_uncore *uncore = &i915->uncore; 1735 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 1736 u32 de_hpd_mask = XELPDP_AUX_TC_MASK; 1737 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 1738 XELPDP_TBT_HOTPLUG_MASK; 1739 1740 GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask, 1741 de_hpd_enables); 1742 1743 GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff); 1744 } 1745 1746 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 1747 { 1748 struct intel_uncore *uncore = &dev_priv->uncore; 1749 u32 mask = SDE_GMBUS_ICP; 1750 1751 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 1752 } 1753 1754 void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 1755 { 1756 if (!HAS_DISPLAY(dev_priv)) 1757 return; 1758 1759 gen8_de_irq_postinstall(dev_priv); 1760 1761 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 1762 GEN11_DISPLAY_IRQ_ENABLE); 1763 } 1764 1765 void dg1_de_irq_postinstall(struct drm_i915_private *i915) 1766 { 1767 if (!HAS_DISPLAY(i915)) 1768 return; 1769 1770 gen8_de_irq_postinstall(i915); 1771 intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL, 1772 GEN11_DISPLAY_IRQ_ENABLE); 1773 } 1774 1775 void intel_display_irq_init(struct drm_i915_private *i915) 1776 { 1777 i915->drm.vblank_disable_immediate = true; 1778 1779 /* 1780 * Most platforms treat the display irq block as an always-on power 1781 * domain. vlv/chv can disable it at runtime and need special care to 1782 * avoid writing any of the display block registers outside of the power 1783 * domain. We defer setting up the display irqs in this case to the 1784 * runtime pm. 1785 */ 1786 i915->display.irq.display_irqs_enabled = true; 1787 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1788 i915->display.irq.display_irqs_enabled = false; 1789 1790 intel_hotplug_irq_init(i915); 1791 } 1792