1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "gt/intel_rps.h" 7 #include "i915_drv.h" 8 #include "i915_irq.h" 9 #include "i915_reg.h" 10 #include "icl_dsi_regs.h" 11 #include "intel_crtc.h" 12 #include "intel_de.h" 13 #include "intel_display_irq.h" 14 #include "intel_display_trace.h" 15 #include "intel_display_types.h" 16 #include "intel_dp_aux.h" 17 #include "intel_fdi_regs.h" 18 #include "intel_fifo_underrun.h" 19 #include "intel_gmbus.h" 20 #include "intel_hotplug_irq.h" 21 #include "intel_pipe_crc_regs.h" 22 #include "intel_pmdemand.h" 23 #include "intel_psr.h" 24 #include "intel_psr_regs.h" 25 26 static void 27 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 28 { 29 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 30 31 drm_crtc_handle_vblank(&crtc->base); 32 } 33 34 /** 35 * ilk_update_display_irq - update DEIMR 36 * @dev_priv: driver private 37 * @interrupt_mask: mask of interrupt bits to update 38 * @enabled_irq_mask: mask of interrupt bits to enable 39 */ 40 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 41 u32 interrupt_mask, u32 enabled_irq_mask) 42 { 43 u32 new_val; 44 45 lockdep_assert_held(&dev_priv->irq_lock); 46 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 47 48 new_val = dev_priv->irq_mask; 49 new_val &= ~interrupt_mask; 50 new_val |= (~enabled_irq_mask & interrupt_mask); 51 52 if (new_val != dev_priv->irq_mask && 53 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 54 dev_priv->irq_mask = new_val; 55 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 56 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 57 } 58 } 59 60 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 61 { 62 ilk_update_display_irq(i915, bits, bits); 63 } 64 65 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 66 { 67 ilk_update_display_irq(i915, bits, 0); 68 } 69 70 /** 71 * bdw_update_port_irq - update DE port interrupt 72 * @dev_priv: driver private 73 * @interrupt_mask: mask of interrupt bits to update 74 * @enabled_irq_mask: mask of interrupt bits to enable 75 */ 76 void bdw_update_port_irq(struct drm_i915_private *dev_priv, 77 u32 interrupt_mask, u32 enabled_irq_mask) 78 { 79 u32 new_val; 80 u32 old_val; 81 82 lockdep_assert_held(&dev_priv->irq_lock); 83 84 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 85 86 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 87 return; 88 89 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 90 91 new_val = old_val; 92 new_val &= ~interrupt_mask; 93 new_val |= (~enabled_irq_mask & interrupt_mask); 94 95 if (new_val != old_val) { 96 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 97 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 98 } 99 } 100 101 /** 102 * bdw_update_pipe_irq - update DE pipe interrupt 103 * @dev_priv: driver private 104 * @pipe: pipe whose interrupt to update 105 * @interrupt_mask: mask of interrupt bits to update 106 * @enabled_irq_mask: mask of interrupt bits to enable 107 */ 108 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 109 enum pipe pipe, u32 interrupt_mask, 110 u32 enabled_irq_mask) 111 { 112 u32 new_val; 113 114 lockdep_assert_held(&dev_priv->irq_lock); 115 116 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 117 118 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 119 return; 120 121 new_val = dev_priv->display.irq.de_irq_mask[pipe]; 122 new_val &= ~interrupt_mask; 123 new_val |= (~enabled_irq_mask & interrupt_mask); 124 125 if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) { 126 dev_priv->display.irq.de_irq_mask[pipe] = new_val; 127 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), 128 dev_priv->display.irq.de_irq_mask[pipe]); 129 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 130 } 131 } 132 133 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 134 enum pipe pipe, u32 bits) 135 { 136 bdw_update_pipe_irq(i915, pipe, bits, bits); 137 } 138 139 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 140 enum pipe pipe, u32 bits) 141 { 142 bdw_update_pipe_irq(i915, pipe, bits, 0); 143 } 144 145 /** 146 * ibx_display_interrupt_update - update SDEIMR 147 * @dev_priv: driver private 148 * @interrupt_mask: mask of interrupt bits to update 149 * @enabled_irq_mask: mask of interrupt bits to enable 150 */ 151 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 152 u32 interrupt_mask, 153 u32 enabled_irq_mask) 154 { 155 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 156 157 sdeimr &= ~interrupt_mask; 158 sdeimr |= (~enabled_irq_mask & interrupt_mask); 159 160 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 161 162 lockdep_assert_held(&dev_priv->irq_lock); 163 164 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 165 return; 166 167 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 168 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 169 } 170 171 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 172 { 173 ibx_display_interrupt_update(i915, bits, bits); 174 } 175 176 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 177 { 178 ibx_display_interrupt_update(i915, bits, 0); 179 } 180 181 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 182 enum pipe pipe) 183 { 184 u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe]; 185 u32 enable_mask = status_mask << 16; 186 187 lockdep_assert_held(&dev_priv->irq_lock); 188 189 if (DISPLAY_VER(dev_priv) < 5) 190 goto out; 191 192 /* 193 * On pipe A we don't support the PSR interrupt yet, 194 * on pipe B and C the same bit MBZ. 195 */ 196 if (drm_WARN_ON_ONCE(&dev_priv->drm, 197 status_mask & PIPE_A_PSR_STATUS_VLV)) 198 return 0; 199 /* 200 * On pipe B and C we don't support the PSR interrupt yet, on pipe 201 * A the same bit is for perf counters which we don't use either. 202 */ 203 if (drm_WARN_ON_ONCE(&dev_priv->drm, 204 status_mask & PIPE_B_PSR_STATUS_VLV)) 205 return 0; 206 207 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 208 SPRITE0_FLIP_DONE_INT_EN_VLV | 209 SPRITE1_FLIP_DONE_INT_EN_VLV); 210 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 211 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 212 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 213 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 214 215 out: 216 drm_WARN_ONCE(&dev_priv->drm, 217 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 218 status_mask & ~PIPESTAT_INT_STATUS_MASK, 219 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 220 pipe_name(pipe), enable_mask, status_mask); 221 222 return enable_mask; 223 } 224 225 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 226 enum pipe pipe, u32 status_mask) 227 { 228 i915_reg_t reg = PIPESTAT(dev_priv, pipe); 229 u32 enable_mask; 230 231 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 232 "pipe %c: status_mask=0x%x\n", 233 pipe_name(pipe), status_mask); 234 235 lockdep_assert_held(&dev_priv->irq_lock); 236 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 237 238 if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) 239 return; 240 241 dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask; 242 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 243 244 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 245 intel_uncore_posting_read(&dev_priv->uncore, reg); 246 } 247 248 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 249 enum pipe pipe, u32 status_mask) 250 { 251 i915_reg_t reg = PIPESTAT(dev_priv, pipe); 252 u32 enable_mask; 253 254 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 255 "pipe %c: status_mask=0x%x\n", 256 pipe_name(pipe), status_mask); 257 258 lockdep_assert_held(&dev_priv->irq_lock); 259 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 260 261 if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0) 262 return; 263 264 dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask; 265 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 266 267 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 268 intel_uncore_posting_read(&dev_priv->uncore, reg); 269 } 270 271 static bool i915_has_asle(struct drm_i915_private *i915) 272 { 273 struct intel_display *display = &i915->display; 274 275 if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915)) 276 return false; 277 278 return intel_opregion_asle_present(display); 279 } 280 281 /** 282 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 283 * @dev_priv: i915 device private 284 */ 285 void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 286 { 287 if (!i915_has_asle(dev_priv)) 288 return; 289 290 spin_lock_irq(&dev_priv->irq_lock); 291 292 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 293 if (DISPLAY_VER(dev_priv) >= 4) 294 i915_enable_pipestat(dev_priv, PIPE_A, 295 PIPE_LEGACY_BLC_EVENT_STATUS); 296 297 spin_unlock_irq(&dev_priv->irq_lock); 298 } 299 300 #if defined(CONFIG_DEBUG_FS) 301 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 302 enum pipe pipe, 303 u32 crc0, u32 crc1, 304 u32 crc2, u32 crc3, 305 u32 crc4) 306 { 307 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 308 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 309 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 310 311 trace_intel_pipe_crc(crtc, crcs); 312 313 spin_lock(&pipe_crc->lock); 314 /* 315 * For some not yet identified reason, the first CRC is 316 * bonkers. So let's just wait for the next vblank and read 317 * out the buggy result. 318 * 319 * On GEN8+ sometimes the second CRC is bonkers as well, so 320 * don't trust that one either. 321 */ 322 if (pipe_crc->skipped <= 0 || 323 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 324 pipe_crc->skipped++; 325 spin_unlock(&pipe_crc->lock); 326 return; 327 } 328 spin_unlock(&pipe_crc->lock); 329 330 drm_crtc_add_crc_entry(&crtc->base, true, 331 drm_crtc_accurate_vblank_count(&crtc->base), 332 crcs); 333 } 334 #else 335 static inline void 336 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 337 enum pipe pipe, 338 u32 crc0, u32 crc1, 339 u32 crc2, u32 crc3, 340 u32 crc4) {} 341 #endif 342 343 static void flip_done_handler(struct drm_i915_private *i915, 344 enum pipe pipe) 345 { 346 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); 347 348 spin_lock(&i915->drm.event_lock); 349 350 if (crtc->flip_done_event) { 351 trace_intel_crtc_flip_done(crtc); 352 drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); 353 crtc->flip_done_event = NULL; 354 } 355 356 spin_unlock(&i915->drm.event_lock); 357 } 358 359 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 360 enum pipe pipe) 361 { 362 display_pipe_crc_irq_handler(dev_priv, pipe, 363 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_HSW(pipe)), 364 0, 0, 0, 0); 365 } 366 367 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 368 enum pipe pipe) 369 { 370 display_pipe_crc_irq_handler(dev_priv, pipe, 371 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 372 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 373 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 374 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 375 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 376 } 377 378 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 379 enum pipe pipe) 380 { 381 u32 res1, res2; 382 383 if (DISPLAY_VER(dev_priv) >= 3) 384 res1 = intel_uncore_read(&dev_priv->uncore, 385 PIPE_CRC_RES_RES1_I915(dev_priv, pipe)); 386 else 387 res1 = 0; 388 389 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 390 res2 = intel_uncore_read(&dev_priv->uncore, 391 PIPE_CRC_RES_RES2_G4X(dev_priv, pipe)); 392 else 393 res2 = 0; 394 395 display_pipe_crc_irq_handler(dev_priv, pipe, 396 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(dev_priv, pipe)), 397 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(dev_priv, pipe)), 398 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(dev_priv, pipe)), 399 res1, res2); 400 } 401 402 void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 403 { 404 enum pipe pipe; 405 406 for_each_pipe(dev_priv, pipe) { 407 intel_uncore_write(&dev_priv->uncore, 408 PIPESTAT(dev_priv, pipe), 409 PIPESTAT_INT_STATUS_MASK | 410 PIPE_FIFO_UNDERRUN_STATUS); 411 412 dev_priv->display.irq.pipestat_irq_mask[pipe] = 0; 413 } 414 } 415 416 void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 417 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 418 { 419 enum pipe pipe; 420 421 spin_lock(&dev_priv->irq_lock); 422 423 if (!dev_priv->display.irq.display_irqs_enabled) { 424 spin_unlock(&dev_priv->irq_lock); 425 return; 426 } 427 428 for_each_pipe(dev_priv, pipe) { 429 i915_reg_t reg; 430 u32 status_mask, enable_mask, iir_bit = 0; 431 432 /* 433 * PIPESTAT bits get signalled even when the interrupt is 434 * disabled with the mask bits, and some of the status bits do 435 * not generate interrupts at all (like the underrun bit). Hence 436 * we need to be careful that we only handle what we want to 437 * handle. 438 */ 439 440 /* fifo underruns are filterered in the underrun handler. */ 441 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 442 443 switch (pipe) { 444 default: 445 case PIPE_A: 446 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 447 break; 448 case PIPE_B: 449 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 450 break; 451 case PIPE_C: 452 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 453 break; 454 } 455 if (iir & iir_bit) 456 status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe]; 457 458 if (!status_mask) 459 continue; 460 461 reg = PIPESTAT(dev_priv, pipe); 462 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 463 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 464 465 /* 466 * Clear the PIPE*STAT regs before the IIR 467 * 468 * Toggle the enable bits to make sure we get an 469 * edge in the ISR pipe event bit if we don't clear 470 * all the enabled status bits. Otherwise the edge 471 * triggered IIR on i965/g4x wouldn't notice that 472 * an interrupt is still pending. 473 */ 474 if (pipe_stats[pipe]) { 475 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 476 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 477 } 478 } 479 spin_unlock(&dev_priv->irq_lock); 480 } 481 482 void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 483 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 484 { 485 enum pipe pipe; 486 487 for_each_pipe(dev_priv, pipe) { 488 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 489 intel_handle_vblank(dev_priv, pipe); 490 491 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 492 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 493 494 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 495 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 496 } 497 } 498 499 void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 500 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 501 { 502 struct intel_display *display = &dev_priv->display; 503 504 bool blc_event = false; 505 enum pipe pipe; 506 507 for_each_pipe(dev_priv, pipe) { 508 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 509 intel_handle_vblank(dev_priv, pipe); 510 511 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 512 blc_event = true; 513 514 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 515 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 516 517 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 518 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 519 } 520 521 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 522 intel_opregion_asle_intr(display); 523 } 524 525 void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 526 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 527 { 528 struct intel_display *display = &dev_priv->display; 529 bool blc_event = false; 530 enum pipe pipe; 531 532 for_each_pipe(dev_priv, pipe) { 533 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 534 intel_handle_vblank(dev_priv, pipe); 535 536 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 537 blc_event = true; 538 539 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 540 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 541 542 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 543 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 544 } 545 546 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 547 intel_opregion_asle_intr(display); 548 549 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 550 intel_gmbus_irq_handler(dev_priv); 551 } 552 553 void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 554 u32 pipe_stats[I915_MAX_PIPES]) 555 { 556 enum pipe pipe; 557 558 for_each_pipe(dev_priv, pipe) { 559 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 560 intel_handle_vblank(dev_priv, pipe); 561 562 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 563 flip_done_handler(dev_priv, pipe); 564 565 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 566 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 567 568 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 569 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 570 } 571 572 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 573 intel_gmbus_irq_handler(dev_priv); 574 } 575 576 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 577 { 578 enum pipe pipe; 579 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 580 581 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 582 583 if (pch_iir & SDE_AUDIO_POWER_MASK) { 584 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 585 SDE_AUDIO_POWER_SHIFT); 586 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 587 port_name(port)); 588 } 589 590 if (pch_iir & SDE_AUX_MASK) 591 intel_dp_aux_irq_handler(dev_priv); 592 593 if (pch_iir & SDE_GMBUS) 594 intel_gmbus_irq_handler(dev_priv); 595 596 if (pch_iir & SDE_AUDIO_HDCP_MASK) 597 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 598 599 if (pch_iir & SDE_AUDIO_TRANS_MASK) 600 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 601 602 if (pch_iir & SDE_POISON) 603 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 604 605 if (pch_iir & SDE_FDI_MASK) { 606 for_each_pipe(dev_priv, pipe) 607 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 608 pipe_name(pipe), 609 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 610 } 611 612 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 613 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 614 615 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 616 drm_dbg(&dev_priv->drm, 617 "PCH transcoder CRC error interrupt\n"); 618 619 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 620 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 621 622 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 623 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 624 } 625 626 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 627 { 628 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 629 enum pipe pipe; 630 631 if (err_int & ERR_INT_POISON) 632 drm_err(&dev_priv->drm, "Poison interrupt\n"); 633 634 for_each_pipe(dev_priv, pipe) { 635 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 636 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 637 638 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 639 if (IS_IVYBRIDGE(dev_priv)) 640 ivb_pipe_crc_irq_handler(dev_priv, pipe); 641 else 642 hsw_pipe_crc_irq_handler(dev_priv, pipe); 643 } 644 } 645 646 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 647 } 648 649 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 650 { 651 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 652 enum pipe pipe; 653 654 if (serr_int & SERR_INT_POISON) 655 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 656 657 for_each_pipe(dev_priv, pipe) 658 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 659 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 660 661 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 662 } 663 664 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 665 { 666 enum pipe pipe; 667 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 668 669 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 670 671 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 672 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 673 SDE_AUDIO_POWER_SHIFT_CPT); 674 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 675 port_name(port)); 676 } 677 678 if (pch_iir & SDE_AUX_MASK_CPT) 679 intel_dp_aux_irq_handler(dev_priv); 680 681 if (pch_iir & SDE_GMBUS_CPT) 682 intel_gmbus_irq_handler(dev_priv); 683 684 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 685 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 686 687 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 688 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 689 690 if (pch_iir & SDE_FDI_MASK_CPT) { 691 for_each_pipe(dev_priv, pipe) 692 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 693 pipe_name(pipe), 694 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 695 } 696 697 if (pch_iir & SDE_ERROR_CPT) 698 cpt_serr_int_handler(dev_priv); 699 } 700 701 void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 702 { 703 struct intel_display *display = &dev_priv->display; 704 enum pipe pipe; 705 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 706 707 if (hotplug_trigger) 708 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 709 710 if (de_iir & DE_AUX_CHANNEL_A) 711 intel_dp_aux_irq_handler(dev_priv); 712 713 if (de_iir & DE_GSE) 714 intel_opregion_asle_intr(display); 715 716 if (de_iir & DE_POISON) 717 drm_err(&dev_priv->drm, "Poison interrupt\n"); 718 719 for_each_pipe(dev_priv, pipe) { 720 if (de_iir & DE_PIPE_VBLANK(pipe)) 721 intel_handle_vblank(dev_priv, pipe); 722 723 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 724 flip_done_handler(dev_priv, pipe); 725 726 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 727 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 728 729 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 730 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 731 } 732 733 /* check event from PCH */ 734 if (de_iir & DE_PCH_EVENT) { 735 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 736 737 if (HAS_PCH_CPT(dev_priv)) 738 cpt_irq_handler(dev_priv, pch_iir); 739 else 740 ibx_irq_handler(dev_priv, pch_iir); 741 742 /* should clear PCH hotplug event before clear CPU irq */ 743 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 744 } 745 746 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 747 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 748 } 749 750 void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 751 { 752 struct intel_display *display = &dev_priv->display; 753 enum pipe pipe; 754 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 755 756 if (hotplug_trigger) 757 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 758 759 if (de_iir & DE_ERR_INT_IVB) 760 ivb_err_int_handler(dev_priv); 761 762 if (de_iir & DE_EDP_PSR_INT_HSW) { 763 struct intel_encoder *encoder; 764 765 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 766 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 767 u32 psr_iir; 768 769 psr_iir = intel_uncore_rmw(&dev_priv->uncore, 770 EDP_PSR_IIR, 0, 0); 771 intel_psr_irq_handler(intel_dp, psr_iir); 772 break; 773 } 774 } 775 776 if (de_iir & DE_AUX_CHANNEL_A_IVB) 777 intel_dp_aux_irq_handler(dev_priv); 778 779 if (de_iir & DE_GSE_IVB) 780 intel_opregion_asle_intr(display); 781 782 for_each_pipe(dev_priv, pipe) { 783 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 784 intel_handle_vblank(dev_priv, pipe); 785 786 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 787 flip_done_handler(dev_priv, pipe); 788 } 789 790 /* check event from PCH */ 791 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 792 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 793 794 cpt_irq_handler(dev_priv, pch_iir); 795 796 /* clear PCH hotplug event before clear CPU irq */ 797 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 798 } 799 } 800 801 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 802 { 803 u32 mask; 804 805 if (DISPLAY_VER(dev_priv) >= 20) 806 return 0; 807 else if (DISPLAY_VER(dev_priv) >= 14) 808 return TGL_DE_PORT_AUX_DDIA | 809 TGL_DE_PORT_AUX_DDIB; 810 else if (DISPLAY_VER(dev_priv) >= 13) 811 return TGL_DE_PORT_AUX_DDIA | 812 TGL_DE_PORT_AUX_DDIB | 813 TGL_DE_PORT_AUX_DDIC | 814 XELPD_DE_PORT_AUX_DDID | 815 XELPD_DE_PORT_AUX_DDIE | 816 TGL_DE_PORT_AUX_USBC1 | 817 TGL_DE_PORT_AUX_USBC2 | 818 TGL_DE_PORT_AUX_USBC3 | 819 TGL_DE_PORT_AUX_USBC4; 820 else if (DISPLAY_VER(dev_priv) >= 12) 821 return TGL_DE_PORT_AUX_DDIA | 822 TGL_DE_PORT_AUX_DDIB | 823 TGL_DE_PORT_AUX_DDIC | 824 TGL_DE_PORT_AUX_USBC1 | 825 TGL_DE_PORT_AUX_USBC2 | 826 TGL_DE_PORT_AUX_USBC3 | 827 TGL_DE_PORT_AUX_USBC4 | 828 TGL_DE_PORT_AUX_USBC5 | 829 TGL_DE_PORT_AUX_USBC6; 830 831 mask = GEN8_AUX_CHANNEL_A; 832 if (DISPLAY_VER(dev_priv) >= 9) 833 mask |= GEN9_AUX_CHANNEL_B | 834 GEN9_AUX_CHANNEL_C | 835 GEN9_AUX_CHANNEL_D; 836 837 if (DISPLAY_VER(dev_priv) == 11) { 838 mask |= ICL_AUX_CHANNEL_F; 839 mask |= ICL_AUX_CHANNEL_E; 840 } 841 842 return mask; 843 } 844 845 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 846 { 847 if (DISPLAY_VER(dev_priv) >= 14) 848 return MTL_PIPEDMC_ATS_FAULT | 849 MTL_PLANE_ATS_FAULT | 850 GEN12_PIPEDMC_FAULT | 851 GEN9_PIPE_CURSOR_FAULT | 852 GEN11_PIPE_PLANE5_FAULT | 853 GEN9_PIPE_PLANE4_FAULT | 854 GEN9_PIPE_PLANE3_FAULT | 855 GEN9_PIPE_PLANE2_FAULT | 856 GEN9_PIPE_PLANE1_FAULT; 857 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) 858 return GEN12_PIPEDMC_FAULT | 859 GEN9_PIPE_CURSOR_FAULT | 860 GEN11_PIPE_PLANE5_FAULT | 861 GEN9_PIPE_PLANE4_FAULT | 862 GEN9_PIPE_PLANE3_FAULT | 863 GEN9_PIPE_PLANE2_FAULT | 864 GEN9_PIPE_PLANE1_FAULT; 865 else if (DISPLAY_VER(dev_priv) == 12) 866 return GEN12_PIPEDMC_FAULT | 867 GEN9_PIPE_CURSOR_FAULT | 868 GEN11_PIPE_PLANE7_FAULT | 869 GEN11_PIPE_PLANE6_FAULT | 870 GEN11_PIPE_PLANE5_FAULT | 871 GEN9_PIPE_PLANE4_FAULT | 872 GEN9_PIPE_PLANE3_FAULT | 873 GEN9_PIPE_PLANE2_FAULT | 874 GEN9_PIPE_PLANE1_FAULT; 875 else if (DISPLAY_VER(dev_priv) == 11) 876 return GEN9_PIPE_CURSOR_FAULT | 877 GEN11_PIPE_PLANE7_FAULT | 878 GEN11_PIPE_PLANE6_FAULT | 879 GEN11_PIPE_PLANE5_FAULT | 880 GEN9_PIPE_PLANE4_FAULT | 881 GEN9_PIPE_PLANE3_FAULT | 882 GEN9_PIPE_PLANE2_FAULT | 883 GEN9_PIPE_PLANE1_FAULT; 884 else if (DISPLAY_VER(dev_priv) >= 9) 885 return GEN9_PIPE_CURSOR_FAULT | 886 GEN9_PIPE_PLANE4_FAULT | 887 GEN9_PIPE_PLANE3_FAULT | 888 GEN9_PIPE_PLANE2_FAULT | 889 GEN9_PIPE_PLANE1_FAULT; 890 else 891 return GEN8_PIPE_CURSOR_FAULT | 892 GEN8_PIPE_SPRITE_FAULT | 893 GEN8_PIPE_PRIMARY_FAULT; 894 } 895 896 static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) 897 { 898 wake_up_all(&dev_priv->display.pmdemand.waitqueue); 899 } 900 901 static void 902 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 903 { 904 struct intel_display *display = &dev_priv->display; 905 bool found = false; 906 907 if (DISPLAY_VER(dev_priv) >= 14) { 908 if (iir & (XELPDP_PMDEMAND_RSP | 909 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 910 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 911 drm_dbg(&dev_priv->drm, 912 "Error waiting for Punit PM Demand Response\n"); 913 914 intel_pmdemand_irq_handler(dev_priv); 915 found = true; 916 } 917 918 if (iir & XELPDP_RM_TIMEOUT) { 919 u32 val = intel_uncore_read(&dev_priv->uncore, 920 RM_TIMEOUT_REG_CAPTURE); 921 drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val); 922 found = true; 923 } 924 } else if (iir & GEN8_DE_MISC_GSE) { 925 intel_opregion_asle_intr(display); 926 found = true; 927 } 928 929 if (iir & GEN8_DE_EDP_PSR) { 930 struct intel_encoder *encoder; 931 u32 psr_iir; 932 i915_reg_t iir_reg; 933 934 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 935 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 936 937 if (DISPLAY_VER(dev_priv) >= 12) 938 iir_reg = TRANS_PSR_IIR(dev_priv, 939 intel_dp->psr.transcoder); 940 else 941 iir_reg = EDP_PSR_IIR; 942 943 psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); 944 945 if (psr_iir) 946 found = true; 947 948 intel_psr_irq_handler(intel_dp, psr_iir); 949 950 /* prior GEN12 only have one EDP PSR */ 951 if (DISPLAY_VER(dev_priv) < 12) 952 break; 953 } 954 } 955 956 if (!found) 957 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); 958 } 959 960 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 961 u32 te_trigger) 962 { 963 enum pipe pipe = INVALID_PIPE; 964 enum transcoder dsi_trans; 965 enum port port; 966 u32 val; 967 968 /* 969 * Incase of dual link, TE comes from DSI_1 970 * this is to check if dual link is enabled 971 */ 972 val = intel_uncore_read(&dev_priv->uncore, 973 TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0)); 974 val &= PORT_SYNC_MODE_ENABLE; 975 976 /* 977 * if dual link is enabled, then read DSI_0 978 * transcoder registers 979 */ 980 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 981 PORT_A : PORT_B; 982 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 983 984 /* Check if DSI configured in command mode */ 985 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 986 val = val & OP_MODE_MASK; 987 988 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 989 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 990 return; 991 } 992 993 /* Get PIPE for handling VBLANK event */ 994 val = intel_uncore_read(&dev_priv->uncore, 995 TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); 996 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 997 case TRANS_DDI_EDP_INPUT_A_ON: 998 pipe = PIPE_A; 999 break; 1000 case TRANS_DDI_EDP_INPUT_B_ONOFF: 1001 pipe = PIPE_B; 1002 break; 1003 case TRANS_DDI_EDP_INPUT_C_ONOFF: 1004 pipe = PIPE_C; 1005 break; 1006 default: 1007 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 1008 return; 1009 } 1010 1011 intel_handle_vblank(dev_priv, pipe); 1012 1013 /* clear TE in dsi IIR */ 1014 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 1015 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 1016 } 1017 1018 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 1019 { 1020 if (DISPLAY_VER(i915) >= 9) 1021 return GEN9_PIPE_PLANE1_FLIP_DONE; 1022 else 1023 return GEN8_PIPE_PRIMARY_FLIP_DONE; 1024 } 1025 1026 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) 1027 { 1028 u32 mask = GEN8_PIPE_FIFO_UNDERRUN; 1029 1030 if (DISPLAY_VER(dev_priv) >= 13) 1031 mask |= XELPD_PIPE_SOFT_UNDERRUN | 1032 XELPD_PIPE_HARD_UNDERRUN; 1033 1034 return mask; 1035 } 1036 1037 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) 1038 { 1039 u32 pica_ier = 0; 1040 1041 *pica_iir = 0; 1042 *pch_iir = intel_de_read(i915, SDEIIR); 1043 if (!*pch_iir) 1044 return; 1045 1046 /** 1047 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 1048 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 1049 * their flags both in the PICA and SDE IIR. 1050 */ 1051 if (*pch_iir & SDE_PICAINTERRUPT) { 1052 drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); 1053 1054 pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0); 1055 *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR); 1056 intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir); 1057 } 1058 1059 intel_de_write(i915, SDEIIR, *pch_iir); 1060 1061 if (pica_ier) 1062 intel_de_write(i915, PICAINTERRUPT_IER, pica_ier); 1063 } 1064 1065 void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 1066 { 1067 u32 iir; 1068 enum pipe pipe; 1069 1070 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 1071 1072 if (master_ctl & GEN8_DE_MISC_IRQ) { 1073 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 1074 if (iir) { 1075 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 1076 gen8_de_misc_irq_handler(dev_priv, iir); 1077 } else { 1078 drm_err_ratelimited(&dev_priv->drm, 1079 "The master control interrupt lied (DE MISC)!\n"); 1080 } 1081 } 1082 1083 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1084 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 1085 if (iir) { 1086 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 1087 gen11_hpd_irq_handler(dev_priv, iir); 1088 } else { 1089 drm_err_ratelimited(&dev_priv->drm, 1090 "The master control interrupt lied, (DE HPD)!\n"); 1091 } 1092 } 1093 1094 if (master_ctl & GEN8_DE_PORT_IRQ) { 1095 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 1096 if (iir) { 1097 bool found = false; 1098 1099 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 1100 1101 if (iir & gen8_de_port_aux_mask(dev_priv)) { 1102 intel_dp_aux_irq_handler(dev_priv); 1103 found = true; 1104 } 1105 1106 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 1107 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1108 1109 if (hotplug_trigger) { 1110 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 1111 found = true; 1112 } 1113 } else if (IS_BROADWELL(dev_priv)) { 1114 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1115 1116 if (hotplug_trigger) { 1117 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 1118 found = true; 1119 } 1120 } 1121 1122 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 1123 (iir & BXT_DE_PORT_GMBUS)) { 1124 intel_gmbus_irq_handler(dev_priv); 1125 found = true; 1126 } 1127 1128 if (DISPLAY_VER(dev_priv) >= 11) { 1129 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1130 1131 if (te_trigger) { 1132 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 1133 found = true; 1134 } 1135 } 1136 1137 if (!found) 1138 drm_err_ratelimited(&dev_priv->drm, 1139 "Unexpected DE Port interrupt\n"); 1140 } else { 1141 drm_err_ratelimited(&dev_priv->drm, 1142 "The master control interrupt lied (DE PORT)!\n"); 1143 } 1144 } 1145 1146 for_each_pipe(dev_priv, pipe) { 1147 u32 fault_errors; 1148 1149 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1150 continue; 1151 1152 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 1153 if (!iir) { 1154 drm_err_ratelimited(&dev_priv->drm, 1155 "The master control interrupt lied (DE PIPE)!\n"); 1156 continue; 1157 } 1158 1159 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 1160 1161 if (iir & GEN8_PIPE_VBLANK) 1162 intel_handle_vblank(dev_priv, pipe); 1163 1164 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 1165 flip_done_handler(dev_priv, pipe); 1166 1167 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1168 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1169 1170 if (iir & gen8_de_pipe_underrun_mask(dev_priv)) 1171 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1172 1173 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 1174 if (fault_errors) 1175 drm_err_ratelimited(&dev_priv->drm, 1176 "Fault errors on pipe %c: 0x%08x\n", 1177 pipe_name(pipe), 1178 fault_errors); 1179 } 1180 1181 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 1182 master_ctl & GEN8_DE_PCH_IRQ) { 1183 u32 pica_iir; 1184 1185 /* 1186 * FIXME(BDW): Assume for now that the new interrupt handling 1187 * scheme also closed the SDE interrupt handling race we've seen 1188 * on older pch-split platforms. But this needs testing. 1189 */ 1190 gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); 1191 if (iir) { 1192 if (pica_iir) 1193 xelpdp_pica_irq_handler(dev_priv, pica_iir); 1194 1195 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1196 icp_irq_handler(dev_priv, iir); 1197 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 1198 spt_irq_handler(dev_priv, iir); 1199 else 1200 cpt_irq_handler(dev_priv, iir); 1201 } else { 1202 /* 1203 * Like on previous PCH there seems to be something 1204 * fishy going on with forwarding PCH interrupts. 1205 */ 1206 drm_dbg(&dev_priv->drm, 1207 "The master control interrupt lied (SDE)!\n"); 1208 } 1209 } 1210 } 1211 1212 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) 1213 { 1214 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 1215 u32 iir; 1216 1217 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1218 return 0; 1219 1220 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 1221 if (likely(iir)) 1222 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 1223 1224 return iir; 1225 } 1226 1227 void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) 1228 { 1229 struct intel_display *display = &i915->display; 1230 1231 if (iir & GEN11_GU_MISC_GSE) 1232 intel_opregion_asle_intr(display); 1233 } 1234 1235 void gen11_display_irq_handler(struct drm_i915_private *i915) 1236 { 1237 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 1238 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 1239 1240 disable_rpm_wakeref_asserts(&i915->runtime_pm); 1241 /* 1242 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1243 * for the display related bits. 1244 */ 1245 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 1246 gen8_de_irq_handler(i915, disp_ctl); 1247 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 1248 GEN11_DISPLAY_IRQ_ENABLE); 1249 1250 enable_rpm_wakeref_asserts(&i915->runtime_pm); 1251 } 1252 1253 /* Called from drm generic code, passed 'crtc' which 1254 * we use as a pipe index 1255 */ 1256 int i8xx_enable_vblank(struct drm_crtc *crtc) 1257 { 1258 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1259 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1260 unsigned long irqflags; 1261 1262 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1263 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1264 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1265 1266 return 0; 1267 } 1268 1269 int i915gm_enable_vblank(struct drm_crtc *crtc) 1270 { 1271 struct drm_i915_private *i915 = to_i915(crtc->dev); 1272 1273 /* 1274 * Vblank interrupts fail to wake the device up from C2+. 1275 * Disabling render clock gating during C-states avoids 1276 * the problem. There is a small power cost so we do this 1277 * only when vblank interrupts are actually enabled. 1278 */ 1279 if (i915->display.irq.vblank_enabled++ == 0) 1280 intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1281 1282 return i8xx_enable_vblank(crtc); 1283 } 1284 1285 int i965_enable_vblank(struct drm_crtc *crtc) 1286 { 1287 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1288 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1289 unsigned long irqflags; 1290 1291 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1292 i915_enable_pipestat(dev_priv, pipe, 1293 PIPE_START_VBLANK_INTERRUPT_STATUS); 1294 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1295 1296 return 0; 1297 } 1298 1299 int ilk_enable_vblank(struct drm_crtc *crtc) 1300 { 1301 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1302 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1303 unsigned long irqflags; 1304 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1305 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1306 1307 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1308 ilk_enable_display_irq(dev_priv, bit); 1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1310 1311 /* Even though there is no DMC, frame counter can get stuck when 1312 * PSR is active as no frames are generated. 1313 */ 1314 if (HAS_PSR(dev_priv)) 1315 drm_crtc_vblank_restore(crtc); 1316 1317 return 0; 1318 } 1319 1320 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1321 bool enable) 1322 { 1323 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 1324 enum port port; 1325 1326 if (!(intel_crtc->mode_flags & 1327 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1328 return false; 1329 1330 /* for dual link cases we consider TE from slave */ 1331 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1332 port = PORT_B; 1333 else 1334 port = PORT_A; 1335 1336 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, 1337 enable ? 0 : DSI_TE_EVENT); 1338 1339 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 1340 1341 return true; 1342 } 1343 1344 int bdw_enable_vblank(struct drm_crtc *_crtc) 1345 { 1346 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1347 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1348 enum pipe pipe = crtc->pipe; 1349 unsigned long irqflags; 1350 1351 if (gen11_dsi_configure_te(crtc, true)) 1352 return 0; 1353 1354 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1355 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1356 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1357 1358 /* Even if there is no DMC, frame counter can get stuck when 1359 * PSR is active as no frames are generated, so check only for PSR. 1360 */ 1361 if (HAS_PSR(dev_priv)) 1362 drm_crtc_vblank_restore(&crtc->base); 1363 1364 return 0; 1365 } 1366 1367 /* Called from drm generic code, passed 'crtc' which 1368 * we use as a pipe index 1369 */ 1370 void i8xx_disable_vblank(struct drm_crtc *crtc) 1371 { 1372 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1373 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1374 unsigned long irqflags; 1375 1376 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1377 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1378 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1379 } 1380 1381 void i915gm_disable_vblank(struct drm_crtc *crtc) 1382 { 1383 struct drm_i915_private *i915 = to_i915(crtc->dev); 1384 1385 i8xx_disable_vblank(crtc); 1386 1387 if (--i915->display.irq.vblank_enabled == 0) 1388 intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1389 } 1390 1391 void i965_disable_vblank(struct drm_crtc *crtc) 1392 { 1393 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1394 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1395 unsigned long irqflags; 1396 1397 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1398 i915_disable_pipestat(dev_priv, pipe, 1399 PIPE_START_VBLANK_INTERRUPT_STATUS); 1400 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1401 } 1402 1403 void ilk_disable_vblank(struct drm_crtc *crtc) 1404 { 1405 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1406 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1407 unsigned long irqflags; 1408 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1409 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1410 1411 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1412 ilk_disable_display_irq(dev_priv, bit); 1413 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1414 } 1415 1416 void bdw_disable_vblank(struct drm_crtc *_crtc) 1417 { 1418 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1419 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1420 enum pipe pipe = crtc->pipe; 1421 unsigned long irqflags; 1422 1423 if (gen11_dsi_configure_te(crtc, false)) 1424 return; 1425 1426 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1427 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1428 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1429 } 1430 1431 void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 1432 { 1433 struct intel_uncore *uncore = &dev_priv->uncore; 1434 1435 if (IS_CHERRYVIEW(dev_priv)) 1436 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1437 else 1438 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1439 1440 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 1441 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0); 1442 1443 i9xx_pipestat_irq_reset(dev_priv); 1444 1445 GEN3_IRQ_RESET(uncore, VLV_); 1446 dev_priv->irq_mask = ~0u; 1447 } 1448 1449 void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 1450 { 1451 struct intel_uncore *uncore = &dev_priv->uncore; 1452 1453 u32 pipestat_mask; 1454 u32 enable_mask; 1455 enum pipe pipe; 1456 1457 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 1458 1459 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1460 for_each_pipe(dev_priv, pipe) 1461 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 1462 1463 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 1464 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1465 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1466 I915_LPE_PIPE_A_INTERRUPT | 1467 I915_LPE_PIPE_B_INTERRUPT; 1468 1469 if (IS_CHERRYVIEW(dev_priv)) 1470 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 1471 I915_LPE_PIPE_C_INTERRUPT; 1472 1473 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 1474 1475 dev_priv->irq_mask = ~enable_mask; 1476 1477 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 1478 } 1479 1480 void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 1481 { 1482 struct intel_uncore *uncore = &dev_priv->uncore; 1483 enum pipe pipe; 1484 1485 if (!HAS_DISPLAY(dev_priv)) 1486 return; 1487 1488 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1489 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1490 1491 for_each_pipe(dev_priv, pipe) 1492 if (intel_display_power_is_enabled(dev_priv, 1493 POWER_DOMAIN_PIPE(pipe))) 1494 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1495 1496 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 1497 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 1498 } 1499 1500 void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 1501 { 1502 struct intel_uncore *uncore = &dev_priv->uncore; 1503 enum pipe pipe; 1504 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1505 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1506 1507 if (!HAS_DISPLAY(dev_priv)) 1508 return; 1509 1510 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 1511 1512 if (DISPLAY_VER(dev_priv) >= 12) { 1513 enum transcoder trans; 1514 1515 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1516 enum intel_display_power_domain domain; 1517 1518 domain = POWER_DOMAIN_TRANSCODER(trans); 1519 if (!intel_display_power_is_enabled(dev_priv, domain)) 1520 continue; 1521 1522 intel_uncore_write(uncore, 1523 TRANS_PSR_IMR(dev_priv, trans), 1524 0xffffffff); 1525 intel_uncore_write(uncore, 1526 TRANS_PSR_IIR(dev_priv, trans), 1527 0xffffffff); 1528 } 1529 } else { 1530 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1531 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1532 } 1533 1534 for_each_pipe(dev_priv, pipe) 1535 if (intel_display_power_is_enabled(dev_priv, 1536 POWER_DOMAIN_PIPE(pipe))) 1537 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1538 1539 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 1540 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 1541 1542 if (DISPLAY_VER(dev_priv) >= 14) 1543 GEN3_IRQ_RESET(uncore, PICAINTERRUPT_); 1544 else 1545 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 1546 1547 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1548 GEN3_IRQ_RESET(uncore, SDE); 1549 } 1550 1551 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 1552 u8 pipe_mask) 1553 { 1554 struct intel_uncore *uncore = &dev_priv->uncore; 1555 u32 extra_ier = GEN8_PIPE_VBLANK | 1556 gen8_de_pipe_underrun_mask(dev_priv) | 1557 gen8_de_pipe_flip_done_mask(dev_priv); 1558 enum pipe pipe; 1559 1560 spin_lock_irq(&dev_priv->irq_lock); 1561 1562 if (!intel_irqs_enabled(dev_priv)) { 1563 spin_unlock_irq(&dev_priv->irq_lock); 1564 return; 1565 } 1566 1567 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1568 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 1569 dev_priv->display.irq.de_irq_mask[pipe], 1570 ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); 1571 1572 spin_unlock_irq(&dev_priv->irq_lock); 1573 } 1574 1575 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 1576 u8 pipe_mask) 1577 { 1578 struct intel_uncore *uncore = &dev_priv->uncore; 1579 enum pipe pipe; 1580 1581 spin_lock_irq(&dev_priv->irq_lock); 1582 1583 if (!intel_irqs_enabled(dev_priv)) { 1584 spin_unlock_irq(&dev_priv->irq_lock); 1585 return; 1586 } 1587 1588 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1589 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1590 1591 spin_unlock_irq(&dev_priv->irq_lock); 1592 1593 /* make sure we're done processing display irqs */ 1594 intel_synchronize_irq(dev_priv); 1595 } 1596 1597 /* 1598 * SDEIER is also touched by the interrupt handler to work around missed PCH 1599 * interrupts. Hence we can't update it after the interrupt handler is enabled - 1600 * instead we unconditionally enable all PCH interrupt sources here, but then 1601 * only unmask them as needed with SDEIMR. 1602 * 1603 * Note that we currently do this after installing the interrupt handler, 1604 * but before we enable the master interrupt. That should be sufficient 1605 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 1606 * interrupts could still race. 1607 */ 1608 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 1609 { 1610 struct intel_uncore *uncore = &dev_priv->uncore; 1611 u32 mask; 1612 1613 if (HAS_PCH_NOP(dev_priv)) 1614 return; 1615 1616 if (HAS_PCH_IBX(dev_priv)) 1617 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 1618 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 1619 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 1620 else 1621 mask = SDE_GMBUS_CPT; 1622 1623 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 1624 } 1625 1626 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 1627 { 1628 lockdep_assert_held(&dev_priv->irq_lock); 1629 1630 if (dev_priv->display.irq.display_irqs_enabled) 1631 return; 1632 1633 dev_priv->display.irq.display_irqs_enabled = true; 1634 1635 if (intel_irqs_enabled(dev_priv)) { 1636 vlv_display_irq_reset(dev_priv); 1637 vlv_display_irq_postinstall(dev_priv); 1638 } 1639 } 1640 1641 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 1642 { 1643 lockdep_assert_held(&dev_priv->irq_lock); 1644 1645 if (!dev_priv->display.irq.display_irqs_enabled) 1646 return; 1647 1648 dev_priv->display.irq.display_irqs_enabled = false; 1649 1650 if (intel_irqs_enabled(dev_priv)) 1651 vlv_display_irq_reset(dev_priv); 1652 } 1653 1654 void ilk_de_irq_postinstall(struct drm_i915_private *i915) 1655 { 1656 struct intel_uncore *uncore = &i915->uncore; 1657 u32 display_mask, extra_mask; 1658 1659 if (DISPLAY_VER(i915) >= 7) { 1660 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1661 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 1662 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 1663 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 1664 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 1665 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 1666 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 1667 DE_DP_A_HOTPLUG_IVB); 1668 } else { 1669 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1670 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 1671 DE_PIPEA_CRC_DONE | DE_POISON); 1672 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 1673 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 1674 DE_PLANE_FLIP_DONE(PLANE_A) | 1675 DE_PLANE_FLIP_DONE(PLANE_B) | 1676 DE_DP_A_HOTPLUG); 1677 } 1678 1679 if (IS_HASWELL(i915)) { 1680 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1681 display_mask |= DE_EDP_PSR_INT_HSW; 1682 } 1683 1684 if (IS_IRONLAKE_M(i915)) 1685 extra_mask |= DE_PCU_EVENT; 1686 1687 i915->irq_mask = ~display_mask; 1688 1689 ibx_irq_postinstall(i915); 1690 1691 GEN3_IRQ_INIT(uncore, DE, i915->irq_mask, 1692 display_mask | extra_mask); 1693 } 1694 1695 static void mtp_irq_postinstall(struct drm_i915_private *i915); 1696 static void icp_irq_postinstall(struct drm_i915_private *i915); 1697 1698 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 1699 { 1700 struct intel_display *display = &dev_priv->display; 1701 struct intel_uncore *uncore = &dev_priv->uncore; 1702 1703 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 1704 GEN8_PIPE_CDCLK_CRC_DONE; 1705 u32 de_pipe_enables; 1706 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 1707 u32 de_port_enables; 1708 u32 de_misc_masked = GEN8_DE_EDP_PSR; 1709 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1710 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1711 enum pipe pipe; 1712 1713 if (!HAS_DISPLAY(dev_priv)) 1714 return; 1715 1716 if (DISPLAY_VER(dev_priv) >= 14) 1717 mtp_irq_postinstall(dev_priv); 1718 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1719 icp_irq_postinstall(dev_priv); 1720 else if (HAS_PCH_SPLIT(dev_priv)) 1721 ibx_irq_postinstall(dev_priv); 1722 1723 if (DISPLAY_VER(dev_priv) < 11) 1724 de_misc_masked |= GEN8_DE_MISC_GSE; 1725 1726 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1727 de_port_masked |= BXT_DE_PORT_GMBUS; 1728 1729 if (DISPLAY_VER(dev_priv) >= 14) { 1730 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 1731 XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; 1732 } else if (DISPLAY_VER(dev_priv) >= 11) { 1733 enum port port; 1734 1735 if (intel_bios_is_dsi_present(display, &port)) 1736 de_port_masked |= DSI0_TE | DSI1_TE; 1737 } 1738 1739 de_pipe_enables = de_pipe_masked | 1740 GEN8_PIPE_VBLANK | 1741 gen8_de_pipe_underrun_mask(dev_priv) | 1742 gen8_de_pipe_flip_done_mask(dev_priv); 1743 1744 de_port_enables = de_port_masked; 1745 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1746 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 1747 else if (IS_BROADWELL(dev_priv)) 1748 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 1749 1750 if (DISPLAY_VER(dev_priv) >= 12) { 1751 enum transcoder trans; 1752 1753 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1754 enum intel_display_power_domain domain; 1755 1756 domain = POWER_DOMAIN_TRANSCODER(trans); 1757 if (!intel_display_power_is_enabled(dev_priv, domain)) 1758 continue; 1759 1760 gen3_assert_iir_is_zero(uncore, 1761 TRANS_PSR_IIR(dev_priv, trans)); 1762 } 1763 } else { 1764 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1765 } 1766 1767 for_each_pipe(dev_priv, pipe) { 1768 dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked; 1769 1770 if (intel_display_power_is_enabled(dev_priv, 1771 POWER_DOMAIN_PIPE(pipe))) 1772 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 1773 dev_priv->display.irq.de_irq_mask[pipe], 1774 de_pipe_enables); 1775 } 1776 1777 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 1778 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 1779 1780 if (IS_DISPLAY_VER(dev_priv, 11, 13)) { 1781 u32 de_hpd_masked = 0; 1782 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 1783 GEN11_DE_TBT_HOTPLUG_MASK; 1784 1785 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 1786 de_hpd_enables); 1787 } 1788 } 1789 1790 static void mtp_irq_postinstall(struct drm_i915_private *i915) 1791 { 1792 struct intel_uncore *uncore = &i915->uncore; 1793 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 1794 u32 de_hpd_mask = XELPDP_AUX_TC_MASK; 1795 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 1796 XELPDP_TBT_HOTPLUG_MASK; 1797 1798 GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask, 1799 de_hpd_enables); 1800 1801 GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff); 1802 } 1803 1804 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 1805 { 1806 struct intel_uncore *uncore = &dev_priv->uncore; 1807 u32 mask = SDE_GMBUS_ICP; 1808 1809 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 1810 } 1811 1812 void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 1813 { 1814 if (!HAS_DISPLAY(dev_priv)) 1815 return; 1816 1817 gen8_de_irq_postinstall(dev_priv); 1818 1819 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 1820 GEN11_DISPLAY_IRQ_ENABLE); 1821 } 1822 1823 void dg1_de_irq_postinstall(struct drm_i915_private *i915) 1824 { 1825 if (!HAS_DISPLAY(i915)) 1826 return; 1827 1828 gen8_de_irq_postinstall(i915); 1829 intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL, 1830 GEN11_DISPLAY_IRQ_ENABLE); 1831 } 1832 1833 void intel_display_irq_init(struct drm_i915_private *i915) 1834 { 1835 i915->drm.vblank_disable_immediate = true; 1836 1837 /* 1838 * Most platforms treat the display irq block as an always-on power 1839 * domain. vlv/chv can disable it at runtime and need special care to 1840 * avoid writing any of the display block registers outside of the power 1841 * domain. We defer setting up the display irqs in this case to the 1842 * runtime pm. 1843 */ 1844 i915->display.irq.display_irqs_enabled = true; 1845 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1846 i915->display.irq.display_irqs_enabled = false; 1847 1848 intel_hotplug_irq_init(i915); 1849 } 1850