1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <drm/drm_vblank.h> 7 8 #include "gt/intel_rps.h" 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "i915_reg.h" 12 #include "icl_dsi_regs.h" 13 #include "intel_crtc.h" 14 #include "intel_de.h" 15 #include "intel_display_irq.h" 16 #include "intel_display_trace.h" 17 #include "intel_display_types.h" 18 #include "intel_dp_aux.h" 19 #include "intel_dsb.h" 20 #include "intel_fdi_regs.h" 21 #include "intel_fifo_underrun.h" 22 #include "intel_gmbus.h" 23 #include "intel_hotplug_irq.h" 24 #include "intel_pipe_crc_regs.h" 25 #include "intel_pmdemand.h" 26 #include "intel_psr.h" 27 #include "intel_psr_regs.h" 28 29 static void 30 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 31 { 32 struct intel_display *display = &dev_priv->display; 33 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 34 35 drm_crtc_handle_vblank(&crtc->base); 36 } 37 38 /** 39 * ilk_update_display_irq - update DEIMR 40 * @dev_priv: driver private 41 * @interrupt_mask: mask of interrupt bits to update 42 * @enabled_irq_mask: mask of interrupt bits to enable 43 */ 44 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 45 u32 interrupt_mask, u32 enabled_irq_mask) 46 { 47 u32 new_val; 48 49 lockdep_assert_held(&dev_priv->irq_lock); 50 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 51 52 new_val = dev_priv->irq_mask; 53 new_val &= ~interrupt_mask; 54 new_val |= (~enabled_irq_mask & interrupt_mask); 55 56 if (new_val != dev_priv->irq_mask && 57 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 58 dev_priv->irq_mask = new_val; 59 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 60 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 61 } 62 } 63 64 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 65 { 66 ilk_update_display_irq(i915, bits, bits); 67 } 68 69 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 70 { 71 ilk_update_display_irq(i915, bits, 0); 72 } 73 74 /** 75 * bdw_update_port_irq - update DE port interrupt 76 * @dev_priv: driver private 77 * @interrupt_mask: mask of interrupt bits to update 78 * @enabled_irq_mask: mask of interrupt bits to enable 79 */ 80 void bdw_update_port_irq(struct drm_i915_private *dev_priv, 81 u32 interrupt_mask, u32 enabled_irq_mask) 82 { 83 u32 new_val; 84 u32 old_val; 85 86 lockdep_assert_held(&dev_priv->irq_lock); 87 88 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 89 90 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 91 return; 92 93 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 94 95 new_val = old_val; 96 new_val &= ~interrupt_mask; 97 new_val |= (~enabled_irq_mask & interrupt_mask); 98 99 if (new_val != old_val) { 100 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 101 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 102 } 103 } 104 105 /** 106 * bdw_update_pipe_irq - update DE pipe interrupt 107 * @dev_priv: driver private 108 * @pipe: pipe whose interrupt to update 109 * @interrupt_mask: mask of interrupt bits to update 110 * @enabled_irq_mask: mask of interrupt bits to enable 111 */ 112 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 113 enum pipe pipe, u32 interrupt_mask, 114 u32 enabled_irq_mask) 115 { 116 u32 new_val; 117 118 lockdep_assert_held(&dev_priv->irq_lock); 119 120 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 121 122 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 123 return; 124 125 new_val = dev_priv->display.irq.de_irq_mask[pipe]; 126 new_val &= ~interrupt_mask; 127 new_val |= (~enabled_irq_mask & interrupt_mask); 128 129 if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) { 130 dev_priv->display.irq.de_irq_mask[pipe] = new_val; 131 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), 132 dev_priv->display.irq.de_irq_mask[pipe]); 133 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 134 } 135 } 136 137 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 138 enum pipe pipe, u32 bits) 139 { 140 bdw_update_pipe_irq(i915, pipe, bits, bits); 141 } 142 143 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 144 enum pipe pipe, u32 bits) 145 { 146 bdw_update_pipe_irq(i915, pipe, bits, 0); 147 } 148 149 /** 150 * ibx_display_interrupt_update - update SDEIMR 151 * @dev_priv: driver private 152 * @interrupt_mask: mask of interrupt bits to update 153 * @enabled_irq_mask: mask of interrupt bits to enable 154 */ 155 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 156 u32 interrupt_mask, 157 u32 enabled_irq_mask) 158 { 159 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 160 161 sdeimr &= ~interrupt_mask; 162 sdeimr |= (~enabled_irq_mask & interrupt_mask); 163 164 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 165 166 lockdep_assert_held(&dev_priv->irq_lock); 167 168 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 169 return; 170 171 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 172 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 173 } 174 175 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 176 { 177 ibx_display_interrupt_update(i915, bits, bits); 178 } 179 180 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 181 { 182 ibx_display_interrupt_update(i915, bits, 0); 183 } 184 185 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 186 enum pipe pipe) 187 { 188 u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe]; 189 u32 enable_mask = status_mask << 16; 190 191 lockdep_assert_held(&dev_priv->irq_lock); 192 193 if (DISPLAY_VER(dev_priv) < 5) 194 goto out; 195 196 /* 197 * On pipe A we don't support the PSR interrupt yet, 198 * on pipe B and C the same bit MBZ. 199 */ 200 if (drm_WARN_ON_ONCE(&dev_priv->drm, 201 status_mask & PIPE_A_PSR_STATUS_VLV)) 202 return 0; 203 /* 204 * On pipe B and C we don't support the PSR interrupt yet, on pipe 205 * A the same bit is for perf counters which we don't use either. 206 */ 207 if (drm_WARN_ON_ONCE(&dev_priv->drm, 208 status_mask & PIPE_B_PSR_STATUS_VLV)) 209 return 0; 210 211 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 212 SPRITE0_FLIP_DONE_INT_EN_VLV | 213 SPRITE1_FLIP_DONE_INT_EN_VLV); 214 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 215 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 216 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 217 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 218 219 out: 220 drm_WARN_ONCE(&dev_priv->drm, 221 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 222 status_mask & ~PIPESTAT_INT_STATUS_MASK, 223 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 224 pipe_name(pipe), enable_mask, status_mask); 225 226 return enable_mask; 227 } 228 229 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 230 enum pipe pipe, u32 status_mask) 231 { 232 i915_reg_t reg = PIPESTAT(dev_priv, pipe); 233 u32 enable_mask; 234 235 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 236 "pipe %c: status_mask=0x%x\n", 237 pipe_name(pipe), status_mask); 238 239 lockdep_assert_held(&dev_priv->irq_lock); 240 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 241 242 if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) 243 return; 244 245 dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask; 246 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 247 248 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 249 intel_uncore_posting_read(&dev_priv->uncore, reg); 250 } 251 252 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 253 enum pipe pipe, u32 status_mask) 254 { 255 i915_reg_t reg = PIPESTAT(dev_priv, pipe); 256 u32 enable_mask; 257 258 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 259 "pipe %c: status_mask=0x%x\n", 260 pipe_name(pipe), status_mask); 261 262 lockdep_assert_held(&dev_priv->irq_lock); 263 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 264 265 if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0) 266 return; 267 268 dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask; 269 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 270 271 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 272 intel_uncore_posting_read(&dev_priv->uncore, reg); 273 } 274 275 static bool i915_has_legacy_blc_interrupt(struct intel_display *display) 276 { 277 struct drm_i915_private *i915 = to_i915(display->drm); 278 279 if (IS_I85X(i915)) 280 return true; 281 282 if (IS_PINEVIEW(i915)) 283 return true; 284 285 return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915); 286 } 287 288 /** 289 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 290 * @dev_priv: i915 device private 291 */ 292 void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 293 { 294 struct intel_display *display = &dev_priv->display; 295 296 if (!intel_opregion_asle_present(display)) 297 return; 298 299 if (!i915_has_legacy_blc_interrupt(display)) 300 return; 301 302 spin_lock_irq(&dev_priv->irq_lock); 303 304 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 305 if (DISPLAY_VER(dev_priv) >= 4) 306 i915_enable_pipestat(dev_priv, PIPE_A, 307 PIPE_LEGACY_BLC_EVENT_STATUS); 308 309 spin_unlock_irq(&dev_priv->irq_lock); 310 } 311 312 #if IS_ENABLED(CONFIG_DEBUG_FS) 313 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 314 enum pipe pipe, 315 u32 crc0, u32 crc1, 316 u32 crc2, u32 crc3, 317 u32 crc4) 318 { 319 struct intel_display *display = &dev_priv->display; 320 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 321 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 322 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 323 324 trace_intel_pipe_crc(crtc, crcs); 325 326 spin_lock(&pipe_crc->lock); 327 /* 328 * For some not yet identified reason, the first CRC is 329 * bonkers. So let's just wait for the next vblank and read 330 * out the buggy result. 331 * 332 * On GEN8+ sometimes the second CRC is bonkers as well, so 333 * don't trust that one either. 334 */ 335 if (pipe_crc->skipped <= 0 || 336 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 337 pipe_crc->skipped++; 338 spin_unlock(&pipe_crc->lock); 339 return; 340 } 341 spin_unlock(&pipe_crc->lock); 342 343 drm_crtc_add_crc_entry(&crtc->base, true, 344 drm_crtc_accurate_vblank_count(&crtc->base), 345 crcs); 346 } 347 #else 348 static inline void 349 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 350 enum pipe pipe, 351 u32 crc0, u32 crc1, 352 u32 crc2, u32 crc3, 353 u32 crc4) {} 354 #endif 355 356 static void flip_done_handler(struct drm_i915_private *i915, 357 enum pipe pipe) 358 { 359 struct intel_display *display = &i915->display; 360 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 361 362 spin_lock(&i915->drm.event_lock); 363 364 if (crtc->flip_done_event) { 365 trace_intel_crtc_flip_done(crtc); 366 drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); 367 crtc->flip_done_event = NULL; 368 } 369 370 spin_unlock(&i915->drm.event_lock); 371 } 372 373 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 374 enum pipe pipe) 375 { 376 display_pipe_crc_irq_handler(dev_priv, pipe, 377 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_HSW(pipe)), 378 0, 0, 0, 0); 379 } 380 381 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 382 enum pipe pipe) 383 { 384 display_pipe_crc_irq_handler(dev_priv, pipe, 385 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 386 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 387 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 388 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 389 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 390 } 391 392 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 393 enum pipe pipe) 394 { 395 u32 res1, res2; 396 397 if (DISPLAY_VER(dev_priv) >= 3) 398 res1 = intel_uncore_read(&dev_priv->uncore, 399 PIPE_CRC_RES_RES1_I915(dev_priv, pipe)); 400 else 401 res1 = 0; 402 403 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 404 res2 = intel_uncore_read(&dev_priv->uncore, 405 PIPE_CRC_RES_RES2_G4X(dev_priv, pipe)); 406 else 407 res2 = 0; 408 409 display_pipe_crc_irq_handler(dev_priv, pipe, 410 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(dev_priv, pipe)), 411 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(dev_priv, pipe)), 412 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(dev_priv, pipe)), 413 res1, res2); 414 } 415 416 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 417 { 418 enum pipe pipe; 419 420 for_each_pipe(dev_priv, pipe) { 421 intel_uncore_write(&dev_priv->uncore, 422 PIPESTAT(dev_priv, pipe), 423 PIPESTAT_INT_STATUS_MASK | 424 PIPE_FIFO_UNDERRUN_STATUS); 425 426 dev_priv->display.irq.pipestat_irq_mask[pipe] = 0; 427 } 428 } 429 430 void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 431 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 432 { 433 enum pipe pipe; 434 435 spin_lock(&dev_priv->irq_lock); 436 437 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 438 !dev_priv->display.irq.vlv_display_irqs_enabled) { 439 spin_unlock(&dev_priv->irq_lock); 440 return; 441 } 442 443 for_each_pipe(dev_priv, pipe) { 444 i915_reg_t reg; 445 u32 status_mask, enable_mask, iir_bit = 0; 446 447 /* 448 * PIPESTAT bits get signalled even when the interrupt is 449 * disabled with the mask bits, and some of the status bits do 450 * not generate interrupts at all (like the underrun bit). Hence 451 * we need to be careful that we only handle what we want to 452 * handle. 453 */ 454 455 /* fifo underruns are filterered in the underrun handler. */ 456 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 457 458 switch (pipe) { 459 default: 460 case PIPE_A: 461 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 462 break; 463 case PIPE_B: 464 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 465 break; 466 case PIPE_C: 467 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 468 break; 469 } 470 if (iir & iir_bit) 471 status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe]; 472 473 if (!status_mask) 474 continue; 475 476 reg = PIPESTAT(dev_priv, pipe); 477 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 478 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 479 480 /* 481 * Clear the PIPE*STAT regs before the IIR 482 * 483 * Toggle the enable bits to make sure we get an 484 * edge in the ISR pipe event bit if we don't clear 485 * all the enabled status bits. Otherwise the edge 486 * triggered IIR on i965/g4x wouldn't notice that 487 * an interrupt is still pending. 488 */ 489 if (pipe_stats[pipe]) { 490 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 491 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 492 } 493 } 494 spin_unlock(&dev_priv->irq_lock); 495 } 496 497 void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 498 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 499 { 500 struct intel_display *display = &dev_priv->display; 501 bool blc_event = false; 502 enum pipe pipe; 503 504 for_each_pipe(dev_priv, pipe) { 505 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 506 intel_handle_vblank(dev_priv, pipe); 507 508 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 509 blc_event = true; 510 511 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 512 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 513 514 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 515 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 516 } 517 518 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 519 intel_opregion_asle_intr(display); 520 } 521 522 void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 523 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 524 { 525 struct intel_display *display = &dev_priv->display; 526 bool blc_event = false; 527 enum pipe pipe; 528 529 for_each_pipe(dev_priv, pipe) { 530 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 531 intel_handle_vblank(dev_priv, pipe); 532 533 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 534 blc_event = true; 535 536 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 537 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 538 539 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 540 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 541 } 542 543 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 544 intel_opregion_asle_intr(display); 545 546 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 547 intel_gmbus_irq_handler(display); 548 } 549 550 void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 551 u32 pipe_stats[I915_MAX_PIPES]) 552 { 553 struct intel_display *display = &dev_priv->display; 554 enum pipe pipe; 555 556 for_each_pipe(dev_priv, pipe) { 557 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 558 intel_handle_vblank(dev_priv, pipe); 559 560 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 561 flip_done_handler(dev_priv, pipe); 562 563 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 564 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 565 566 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 567 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 568 } 569 570 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 571 intel_gmbus_irq_handler(display); 572 } 573 574 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 575 { 576 struct intel_display *display = &dev_priv->display; 577 enum pipe pipe; 578 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 579 580 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 581 582 if (pch_iir & SDE_AUDIO_POWER_MASK) { 583 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 584 SDE_AUDIO_POWER_SHIFT); 585 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 586 port_name(port)); 587 } 588 589 if (pch_iir & SDE_AUX_MASK) 590 intel_dp_aux_irq_handler(display); 591 592 if (pch_iir & SDE_GMBUS) 593 intel_gmbus_irq_handler(display); 594 595 if (pch_iir & SDE_AUDIO_HDCP_MASK) 596 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 597 598 if (pch_iir & SDE_AUDIO_TRANS_MASK) 599 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 600 601 if (pch_iir & SDE_POISON) 602 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 603 604 if (pch_iir & SDE_FDI_MASK) { 605 for_each_pipe(dev_priv, pipe) 606 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 607 pipe_name(pipe), 608 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 609 } 610 611 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 612 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 613 614 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 615 drm_dbg(&dev_priv->drm, 616 "PCH transcoder CRC error interrupt\n"); 617 618 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 619 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 620 621 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 622 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 623 } 624 625 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 626 { 627 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 628 enum pipe pipe; 629 630 if (err_int & ERR_INT_POISON) 631 drm_err(&dev_priv->drm, "Poison interrupt\n"); 632 633 for_each_pipe(dev_priv, pipe) { 634 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 635 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 636 637 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 638 if (IS_IVYBRIDGE(dev_priv)) 639 ivb_pipe_crc_irq_handler(dev_priv, pipe); 640 else 641 hsw_pipe_crc_irq_handler(dev_priv, pipe); 642 } 643 } 644 645 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 646 } 647 648 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 649 { 650 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 651 enum pipe pipe; 652 653 if (serr_int & SERR_INT_POISON) 654 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 655 656 for_each_pipe(dev_priv, pipe) 657 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 658 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 659 660 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 661 } 662 663 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 664 { 665 struct intel_display *display = &dev_priv->display; 666 enum pipe pipe; 667 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 668 669 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 670 671 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 672 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 673 SDE_AUDIO_POWER_SHIFT_CPT); 674 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 675 port_name(port)); 676 } 677 678 if (pch_iir & SDE_AUX_MASK_CPT) 679 intel_dp_aux_irq_handler(display); 680 681 if (pch_iir & SDE_GMBUS_CPT) 682 intel_gmbus_irq_handler(display); 683 684 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 685 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 686 687 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 688 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 689 690 if (pch_iir & SDE_FDI_MASK_CPT) { 691 for_each_pipe(dev_priv, pipe) 692 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 693 pipe_name(pipe), 694 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 695 } 696 697 if (pch_iir & SDE_ERROR_CPT) 698 cpt_serr_int_handler(dev_priv); 699 } 700 701 void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 702 { 703 struct intel_display *display = &dev_priv->display; 704 enum pipe pipe; 705 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 706 707 if (hotplug_trigger) 708 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 709 710 if (de_iir & DE_AUX_CHANNEL_A) 711 intel_dp_aux_irq_handler(display); 712 713 if (de_iir & DE_GSE) 714 intel_opregion_asle_intr(display); 715 716 if (de_iir & DE_POISON) 717 drm_err(&dev_priv->drm, "Poison interrupt\n"); 718 719 for_each_pipe(dev_priv, pipe) { 720 if (de_iir & DE_PIPE_VBLANK(pipe)) 721 intel_handle_vblank(dev_priv, pipe); 722 723 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 724 flip_done_handler(dev_priv, pipe); 725 726 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 727 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 728 729 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 730 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 731 } 732 733 /* check event from PCH */ 734 if (de_iir & DE_PCH_EVENT) { 735 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 736 737 if (HAS_PCH_CPT(dev_priv)) 738 cpt_irq_handler(dev_priv, pch_iir); 739 else 740 ibx_irq_handler(dev_priv, pch_iir); 741 742 /* should clear PCH hotplug event before clear CPU irq */ 743 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 744 } 745 746 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 747 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 748 } 749 750 void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 751 { 752 struct intel_display *display = &dev_priv->display; 753 enum pipe pipe; 754 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 755 756 if (hotplug_trigger) 757 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 758 759 if (de_iir & DE_ERR_INT_IVB) 760 ivb_err_int_handler(dev_priv); 761 762 if (de_iir & DE_EDP_PSR_INT_HSW) { 763 struct intel_encoder *encoder; 764 765 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 766 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 767 u32 psr_iir; 768 769 psr_iir = intel_uncore_rmw(&dev_priv->uncore, 770 EDP_PSR_IIR, 0, 0); 771 intel_psr_irq_handler(intel_dp, psr_iir); 772 break; 773 } 774 } 775 776 if (de_iir & DE_AUX_CHANNEL_A_IVB) 777 intel_dp_aux_irq_handler(display); 778 779 if (de_iir & DE_GSE_IVB) 780 intel_opregion_asle_intr(display); 781 782 for_each_pipe(dev_priv, pipe) { 783 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 784 intel_handle_vblank(dev_priv, pipe); 785 786 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 787 flip_done_handler(dev_priv, pipe); 788 } 789 790 /* check event from PCH */ 791 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 792 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 793 794 cpt_irq_handler(dev_priv, pch_iir); 795 796 /* clear PCH hotplug event before clear CPU irq */ 797 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 798 } 799 } 800 801 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 802 { 803 u32 mask; 804 805 if (DISPLAY_VER(dev_priv) >= 20) 806 return 0; 807 else if (DISPLAY_VER(dev_priv) >= 14) 808 return TGL_DE_PORT_AUX_DDIA | 809 TGL_DE_PORT_AUX_DDIB; 810 else if (DISPLAY_VER(dev_priv) >= 13) 811 return TGL_DE_PORT_AUX_DDIA | 812 TGL_DE_PORT_AUX_DDIB | 813 TGL_DE_PORT_AUX_DDIC | 814 XELPD_DE_PORT_AUX_DDID | 815 XELPD_DE_PORT_AUX_DDIE | 816 TGL_DE_PORT_AUX_USBC1 | 817 TGL_DE_PORT_AUX_USBC2 | 818 TGL_DE_PORT_AUX_USBC3 | 819 TGL_DE_PORT_AUX_USBC4; 820 else if (DISPLAY_VER(dev_priv) >= 12) 821 return TGL_DE_PORT_AUX_DDIA | 822 TGL_DE_PORT_AUX_DDIB | 823 TGL_DE_PORT_AUX_DDIC | 824 TGL_DE_PORT_AUX_USBC1 | 825 TGL_DE_PORT_AUX_USBC2 | 826 TGL_DE_PORT_AUX_USBC3 | 827 TGL_DE_PORT_AUX_USBC4 | 828 TGL_DE_PORT_AUX_USBC5 | 829 TGL_DE_PORT_AUX_USBC6; 830 831 mask = GEN8_AUX_CHANNEL_A; 832 if (DISPLAY_VER(dev_priv) >= 9) 833 mask |= GEN9_AUX_CHANNEL_B | 834 GEN9_AUX_CHANNEL_C | 835 GEN9_AUX_CHANNEL_D; 836 837 if (DISPLAY_VER(dev_priv) == 11) { 838 mask |= ICL_AUX_CHANNEL_F; 839 mask |= ICL_AUX_CHANNEL_E; 840 } 841 842 return mask; 843 } 844 845 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 846 { 847 struct intel_display *display = &dev_priv->display; 848 849 if (DISPLAY_VER(display) >= 14) 850 return MTL_PIPEDMC_ATS_FAULT | 851 MTL_PLANE_ATS_FAULT | 852 GEN12_PIPEDMC_FAULT | 853 GEN9_PIPE_CURSOR_FAULT | 854 GEN11_PIPE_PLANE5_FAULT | 855 GEN9_PIPE_PLANE4_FAULT | 856 GEN9_PIPE_PLANE3_FAULT | 857 GEN9_PIPE_PLANE2_FAULT | 858 GEN9_PIPE_PLANE1_FAULT; 859 if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display)) 860 return GEN12_PIPEDMC_FAULT | 861 GEN9_PIPE_CURSOR_FAULT | 862 GEN11_PIPE_PLANE5_FAULT | 863 GEN9_PIPE_PLANE4_FAULT | 864 GEN9_PIPE_PLANE3_FAULT | 865 GEN9_PIPE_PLANE2_FAULT | 866 GEN9_PIPE_PLANE1_FAULT; 867 else if (DISPLAY_VER(display) == 12) 868 return GEN12_PIPEDMC_FAULT | 869 GEN9_PIPE_CURSOR_FAULT | 870 GEN11_PIPE_PLANE7_FAULT | 871 GEN11_PIPE_PLANE6_FAULT | 872 GEN11_PIPE_PLANE5_FAULT | 873 GEN9_PIPE_PLANE4_FAULT | 874 GEN9_PIPE_PLANE3_FAULT | 875 GEN9_PIPE_PLANE2_FAULT | 876 GEN9_PIPE_PLANE1_FAULT; 877 else if (DISPLAY_VER(display) == 11) 878 return GEN9_PIPE_CURSOR_FAULT | 879 GEN11_PIPE_PLANE7_FAULT | 880 GEN11_PIPE_PLANE6_FAULT | 881 GEN11_PIPE_PLANE5_FAULT | 882 GEN9_PIPE_PLANE4_FAULT | 883 GEN9_PIPE_PLANE3_FAULT | 884 GEN9_PIPE_PLANE2_FAULT | 885 GEN9_PIPE_PLANE1_FAULT; 886 else if (DISPLAY_VER(display) >= 9) 887 return GEN9_PIPE_CURSOR_FAULT | 888 GEN9_PIPE_PLANE4_FAULT | 889 GEN9_PIPE_PLANE3_FAULT | 890 GEN9_PIPE_PLANE2_FAULT | 891 GEN9_PIPE_PLANE1_FAULT; 892 else 893 return GEN8_PIPE_CURSOR_FAULT | 894 GEN8_PIPE_SPRITE_FAULT | 895 GEN8_PIPE_PRIMARY_FAULT; 896 } 897 898 static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) 899 { 900 wake_up_all(&dev_priv->display.pmdemand.waitqueue); 901 } 902 903 static void 904 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 905 { 906 struct intel_display *display = &dev_priv->display; 907 bool found = false; 908 909 if (HAS_DBUF_OVERLAP_DETECTION(display)) { 910 if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) { 911 drm_warn(display->drm, "DBuf overlap detected\n"); 912 found = true; 913 } 914 } 915 916 if (DISPLAY_VER(dev_priv) >= 14) { 917 if (iir & (XELPDP_PMDEMAND_RSP | 918 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 919 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 920 drm_dbg(&dev_priv->drm, 921 "Error waiting for Punit PM Demand Response\n"); 922 923 intel_pmdemand_irq_handler(dev_priv); 924 found = true; 925 } 926 927 if (iir & XELPDP_RM_TIMEOUT) { 928 u32 val = intel_uncore_read(&dev_priv->uncore, 929 RM_TIMEOUT_REG_CAPTURE); 930 drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val); 931 found = true; 932 } 933 } else if (iir & GEN8_DE_MISC_GSE) { 934 intel_opregion_asle_intr(display); 935 found = true; 936 } 937 938 if (iir & GEN8_DE_EDP_PSR) { 939 struct intel_encoder *encoder; 940 u32 psr_iir; 941 i915_reg_t iir_reg; 942 943 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 944 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 945 946 if (DISPLAY_VER(dev_priv) >= 12) 947 iir_reg = TRANS_PSR_IIR(dev_priv, 948 intel_dp->psr.transcoder); 949 else 950 iir_reg = EDP_PSR_IIR; 951 952 psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); 953 954 if (psr_iir) 955 found = true; 956 957 intel_psr_irq_handler(intel_dp, psr_iir); 958 959 /* prior GEN12 only have one EDP PSR */ 960 if (DISPLAY_VER(dev_priv) < 12) 961 break; 962 } 963 } 964 965 if (!found) 966 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); 967 } 968 969 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 970 u32 te_trigger) 971 { 972 enum pipe pipe = INVALID_PIPE; 973 enum transcoder dsi_trans; 974 enum port port; 975 u32 val; 976 977 /* 978 * Incase of dual link, TE comes from DSI_1 979 * this is to check if dual link is enabled 980 */ 981 val = intel_uncore_read(&dev_priv->uncore, 982 TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0)); 983 val &= PORT_SYNC_MODE_ENABLE; 984 985 /* 986 * if dual link is enabled, then read DSI_0 987 * transcoder registers 988 */ 989 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 990 PORT_A : PORT_B; 991 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 992 993 /* Check if DSI configured in command mode */ 994 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 995 val = val & OP_MODE_MASK; 996 997 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 998 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 999 return; 1000 } 1001 1002 /* Get PIPE for handling VBLANK event */ 1003 val = intel_uncore_read(&dev_priv->uncore, 1004 TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); 1005 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 1006 case TRANS_DDI_EDP_INPUT_A_ON: 1007 pipe = PIPE_A; 1008 break; 1009 case TRANS_DDI_EDP_INPUT_B_ONOFF: 1010 pipe = PIPE_B; 1011 break; 1012 case TRANS_DDI_EDP_INPUT_C_ONOFF: 1013 pipe = PIPE_C; 1014 break; 1015 default: 1016 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 1017 return; 1018 } 1019 1020 intel_handle_vblank(dev_priv, pipe); 1021 1022 /* clear TE in dsi IIR */ 1023 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 1024 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 1025 } 1026 1027 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 1028 { 1029 if (DISPLAY_VER(i915) >= 9) 1030 return GEN9_PIPE_PLANE1_FLIP_DONE; 1031 else 1032 return GEN8_PIPE_PRIMARY_FLIP_DONE; 1033 } 1034 1035 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) 1036 { 1037 u32 pica_ier = 0; 1038 1039 *pica_iir = 0; 1040 *pch_iir = intel_de_read(i915, SDEIIR); 1041 if (!*pch_iir) 1042 return; 1043 1044 /** 1045 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 1046 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 1047 * their flags both in the PICA and SDE IIR. 1048 */ 1049 if (*pch_iir & SDE_PICAINTERRUPT) { 1050 drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); 1051 1052 pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0); 1053 *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR); 1054 intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir); 1055 } 1056 1057 intel_de_write(i915, SDEIIR, *pch_iir); 1058 1059 if (pica_ier) 1060 intel_de_write(i915, PICAINTERRUPT_IER, pica_ier); 1061 } 1062 1063 void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 1064 { 1065 struct intel_display *display = &dev_priv->display; 1066 u32 iir; 1067 enum pipe pipe; 1068 1069 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 1070 1071 if (master_ctl & GEN8_DE_MISC_IRQ) { 1072 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 1073 if (iir) { 1074 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 1075 gen8_de_misc_irq_handler(dev_priv, iir); 1076 } else { 1077 drm_err_ratelimited(&dev_priv->drm, 1078 "The master control interrupt lied (DE MISC)!\n"); 1079 } 1080 } 1081 1082 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1083 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 1084 if (iir) { 1085 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 1086 gen11_hpd_irq_handler(dev_priv, iir); 1087 } else { 1088 drm_err_ratelimited(&dev_priv->drm, 1089 "The master control interrupt lied, (DE HPD)!\n"); 1090 } 1091 } 1092 1093 if (master_ctl & GEN8_DE_PORT_IRQ) { 1094 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 1095 if (iir) { 1096 bool found = false; 1097 1098 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 1099 1100 if (iir & gen8_de_port_aux_mask(dev_priv)) { 1101 intel_dp_aux_irq_handler(display); 1102 found = true; 1103 } 1104 1105 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 1106 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1107 1108 if (hotplug_trigger) { 1109 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 1110 found = true; 1111 } 1112 } else if (IS_BROADWELL(dev_priv)) { 1113 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1114 1115 if (hotplug_trigger) { 1116 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 1117 found = true; 1118 } 1119 } 1120 1121 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 1122 (iir & BXT_DE_PORT_GMBUS)) { 1123 intel_gmbus_irq_handler(display); 1124 found = true; 1125 } 1126 1127 if (DISPLAY_VER(dev_priv) >= 11) { 1128 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1129 1130 if (te_trigger) { 1131 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 1132 found = true; 1133 } 1134 } 1135 1136 if (!found) 1137 drm_err_ratelimited(&dev_priv->drm, 1138 "Unexpected DE Port interrupt\n"); 1139 } else { 1140 drm_err_ratelimited(&dev_priv->drm, 1141 "The master control interrupt lied (DE PORT)!\n"); 1142 } 1143 } 1144 1145 for_each_pipe(dev_priv, pipe) { 1146 u32 fault_errors; 1147 1148 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1149 continue; 1150 1151 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 1152 if (!iir) { 1153 drm_err_ratelimited(&dev_priv->drm, 1154 "The master control interrupt lied (DE PIPE)!\n"); 1155 continue; 1156 } 1157 1158 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 1159 1160 if (iir & GEN8_PIPE_VBLANK) 1161 intel_handle_vblank(dev_priv, pipe); 1162 1163 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 1164 flip_done_handler(dev_priv, pipe); 1165 1166 if (HAS_DSB(dev_priv)) { 1167 if (iir & GEN12_DSB_INT(INTEL_DSB_0)) 1168 intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0); 1169 1170 if (iir & GEN12_DSB_INT(INTEL_DSB_1)) 1171 intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1); 1172 1173 if (iir & GEN12_DSB_INT(INTEL_DSB_2)) 1174 intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2); 1175 } 1176 1177 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1178 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1179 1180 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 1181 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1182 1183 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 1184 if (fault_errors) 1185 drm_err_ratelimited(&dev_priv->drm, 1186 "Fault errors on pipe %c: 0x%08x\n", 1187 pipe_name(pipe), 1188 fault_errors); 1189 } 1190 1191 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 1192 master_ctl & GEN8_DE_PCH_IRQ) { 1193 u32 pica_iir; 1194 1195 /* 1196 * FIXME(BDW): Assume for now that the new interrupt handling 1197 * scheme also closed the SDE interrupt handling race we've seen 1198 * on older pch-split platforms. But this needs testing. 1199 */ 1200 gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); 1201 if (iir) { 1202 if (pica_iir) 1203 xelpdp_pica_irq_handler(dev_priv, pica_iir); 1204 1205 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1206 icp_irq_handler(dev_priv, iir); 1207 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 1208 spt_irq_handler(dev_priv, iir); 1209 else 1210 cpt_irq_handler(dev_priv, iir); 1211 } else { 1212 /* 1213 * Like on previous PCH there seems to be something 1214 * fishy going on with forwarding PCH interrupts. 1215 */ 1216 drm_dbg(&dev_priv->drm, 1217 "The master control interrupt lied (SDE)!\n"); 1218 } 1219 } 1220 } 1221 1222 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) 1223 { 1224 u32 iir; 1225 1226 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1227 return 0; 1228 1229 iir = intel_de_read(i915, GEN11_GU_MISC_IIR); 1230 if (likely(iir)) 1231 intel_de_write(i915, GEN11_GU_MISC_IIR, iir); 1232 1233 return iir; 1234 } 1235 1236 void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) 1237 { 1238 struct intel_display *display = &i915->display; 1239 1240 if (iir & GEN11_GU_MISC_GSE) 1241 intel_opregion_asle_intr(display); 1242 } 1243 1244 void gen11_display_irq_handler(struct drm_i915_private *i915) 1245 { 1246 u32 disp_ctl; 1247 1248 disable_rpm_wakeref_asserts(&i915->runtime_pm); 1249 /* 1250 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1251 * for the display related bits. 1252 */ 1253 disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL); 1254 1255 intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0); 1256 gen8_de_irq_handler(i915, disp_ctl); 1257 intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 1258 1259 enable_rpm_wakeref_asserts(&i915->runtime_pm); 1260 } 1261 1262 static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915) 1263 { 1264 lockdep_assert_held(&i915->drm.vblank_time_lock); 1265 1266 /* 1267 * Vblank/CRC interrupts fail to wake the device up from C2+. 1268 * Disabling render clock gating during C-states avoids 1269 * the problem. There is a small power cost so we do this 1270 * only when vblank/CRC interrupts are actually enabled. 1271 */ 1272 if (i915->display.irq.vblank_enabled++ == 0) 1273 intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1274 } 1275 1276 static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915) 1277 { 1278 lockdep_assert_held(&i915->drm.vblank_time_lock); 1279 1280 if (--i915->display.irq.vblank_enabled == 0) 1281 intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1282 } 1283 1284 void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable) 1285 { 1286 spin_lock_irq(&i915->drm.vblank_time_lock); 1287 1288 if (enable) 1289 i915gm_irq_cstate_wa_enable(i915); 1290 else 1291 i915gm_irq_cstate_wa_disable(i915); 1292 1293 spin_unlock_irq(&i915->drm.vblank_time_lock); 1294 } 1295 1296 int i8xx_enable_vblank(struct drm_crtc *crtc) 1297 { 1298 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1299 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1300 unsigned long irqflags; 1301 1302 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1303 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1304 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1305 1306 return 0; 1307 } 1308 1309 void i8xx_disable_vblank(struct drm_crtc *crtc) 1310 { 1311 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1312 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1313 unsigned long irqflags; 1314 1315 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1316 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1317 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1318 } 1319 1320 int i915gm_enable_vblank(struct drm_crtc *crtc) 1321 { 1322 struct drm_i915_private *i915 = to_i915(crtc->dev); 1323 1324 i915gm_irq_cstate_wa_enable(i915); 1325 1326 return i8xx_enable_vblank(crtc); 1327 } 1328 1329 void i915gm_disable_vblank(struct drm_crtc *crtc) 1330 { 1331 struct drm_i915_private *i915 = to_i915(crtc->dev); 1332 1333 i8xx_disable_vblank(crtc); 1334 1335 i915gm_irq_cstate_wa_disable(i915); 1336 } 1337 1338 int i965_enable_vblank(struct drm_crtc *crtc) 1339 { 1340 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1341 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1342 unsigned long irqflags; 1343 1344 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1345 i915_enable_pipestat(dev_priv, pipe, 1346 PIPE_START_VBLANK_INTERRUPT_STATUS); 1347 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1348 1349 return 0; 1350 } 1351 1352 void i965_disable_vblank(struct drm_crtc *crtc) 1353 { 1354 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1355 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1356 unsigned long irqflags; 1357 1358 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1359 i915_disable_pipestat(dev_priv, pipe, 1360 PIPE_START_VBLANK_INTERRUPT_STATUS); 1361 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1362 } 1363 1364 int ilk_enable_vblank(struct drm_crtc *crtc) 1365 { 1366 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1367 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1368 unsigned long irqflags; 1369 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1370 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1371 1372 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1373 ilk_enable_display_irq(dev_priv, bit); 1374 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1375 1376 /* Even though there is no DMC, frame counter can get stuck when 1377 * PSR is active as no frames are generated. 1378 */ 1379 if (HAS_PSR(dev_priv)) 1380 drm_crtc_vblank_restore(crtc); 1381 1382 return 0; 1383 } 1384 1385 void ilk_disable_vblank(struct drm_crtc *crtc) 1386 { 1387 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1388 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1389 unsigned long irqflags; 1390 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1391 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1392 1393 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1394 ilk_disable_display_irq(dev_priv, bit); 1395 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1396 } 1397 1398 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1399 bool enable) 1400 { 1401 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 1402 enum port port; 1403 1404 if (!(intel_crtc->mode_flags & 1405 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1406 return false; 1407 1408 /* for dual link cases we consider TE from slave */ 1409 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1410 port = PORT_B; 1411 else 1412 port = PORT_A; 1413 1414 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, 1415 enable ? 0 : DSI_TE_EVENT); 1416 1417 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 1418 1419 return true; 1420 } 1421 1422 static void intel_display_vblank_dc_work(struct work_struct *work) 1423 { 1424 struct intel_display *display = 1425 container_of(work, typeof(*display), irq.vblank_dc_work); 1426 int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes); 1427 1428 /* 1429 * NOTE: intel_display_power_set_target_dc_state is used only by PSR 1430 * code for DC3CO handling. DC3CO target state is currently disabled in 1431 * PSR code. If DC3CO is taken into use we need take that into account 1432 * here as well. 1433 */ 1434 intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE : 1435 DC_STATE_EN_UPTO_DC6); 1436 } 1437 1438 int bdw_enable_vblank(struct drm_crtc *_crtc) 1439 { 1440 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1441 struct intel_display *display = to_intel_display(crtc); 1442 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1443 enum pipe pipe = crtc->pipe; 1444 unsigned long irqflags; 1445 1446 if (gen11_dsi_configure_te(crtc, true)) 1447 return 0; 1448 1449 if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0) 1450 schedule_work(&display->irq.vblank_dc_work); 1451 1452 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1453 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1454 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1455 1456 /* Even if there is no DMC, frame counter can get stuck when 1457 * PSR is active as no frames are generated, so check only for PSR. 1458 */ 1459 if (HAS_PSR(dev_priv)) 1460 drm_crtc_vblank_restore(&crtc->base); 1461 1462 return 0; 1463 } 1464 1465 void bdw_disable_vblank(struct drm_crtc *_crtc) 1466 { 1467 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1468 struct intel_display *display = to_intel_display(crtc); 1469 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1470 enum pipe pipe = crtc->pipe; 1471 unsigned long irqflags; 1472 1473 if (gen11_dsi_configure_te(crtc, false)) 1474 return; 1475 1476 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1477 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1478 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1479 1480 if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0) 1481 schedule_work(&display->irq.vblank_dc_work); 1482 } 1483 1484 static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) 1485 { 1486 struct intel_uncore *uncore = &dev_priv->uncore; 1487 1488 if (IS_CHERRYVIEW(dev_priv)) 1489 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1490 else 1491 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1492 1493 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 1494 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0); 1495 1496 i9xx_pipestat_irq_reset(dev_priv); 1497 1498 gen2_irq_reset(uncore, VLV_IRQ_REGS); 1499 dev_priv->irq_mask = ~0u; 1500 } 1501 1502 void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 1503 { 1504 if (dev_priv->display.irq.vlv_display_irqs_enabled) 1505 _vlv_display_irq_reset(dev_priv); 1506 } 1507 1508 void i9xx_display_irq_reset(struct drm_i915_private *i915) 1509 { 1510 if (I915_HAS_HOTPLUG(i915)) { 1511 i915_hotplug_interrupt_update(i915, 0xffffffff, 0); 1512 intel_uncore_rmw(&i915->uncore, 1513 PORT_HOTPLUG_STAT(i915), 0, 0); 1514 } 1515 1516 i9xx_pipestat_irq_reset(i915); 1517 } 1518 1519 void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 1520 { 1521 struct intel_uncore *uncore = &dev_priv->uncore; 1522 1523 u32 pipestat_mask; 1524 u32 enable_mask; 1525 enum pipe pipe; 1526 1527 if (!dev_priv->display.irq.vlv_display_irqs_enabled) 1528 return; 1529 1530 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 1531 1532 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1533 for_each_pipe(dev_priv, pipe) 1534 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 1535 1536 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 1537 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1538 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1539 I915_LPE_PIPE_A_INTERRUPT | 1540 I915_LPE_PIPE_B_INTERRUPT; 1541 1542 if (IS_CHERRYVIEW(dev_priv)) 1543 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 1544 I915_LPE_PIPE_C_INTERRUPT; 1545 1546 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 1547 1548 dev_priv->irq_mask = ~enable_mask; 1549 1550 gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); 1551 } 1552 1553 void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 1554 { 1555 struct intel_uncore *uncore = &dev_priv->uncore; 1556 enum pipe pipe; 1557 1558 if (!HAS_DISPLAY(dev_priv)) 1559 return; 1560 1561 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1562 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1563 1564 for_each_pipe(dev_priv, pipe) 1565 if (intel_display_power_is_enabled(dev_priv, 1566 POWER_DOMAIN_PIPE(pipe))) 1567 gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); 1568 1569 gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS); 1570 gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS); 1571 } 1572 1573 void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 1574 { 1575 struct intel_uncore *uncore = &dev_priv->uncore; 1576 enum pipe pipe; 1577 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1578 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1579 1580 if (!HAS_DISPLAY(dev_priv)) 1581 return; 1582 1583 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 1584 1585 if (DISPLAY_VER(dev_priv) >= 12) { 1586 enum transcoder trans; 1587 1588 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1589 enum intel_display_power_domain domain; 1590 1591 domain = POWER_DOMAIN_TRANSCODER(trans); 1592 if (!intel_display_power_is_enabled(dev_priv, domain)) 1593 continue; 1594 1595 intel_uncore_write(uncore, 1596 TRANS_PSR_IMR(dev_priv, trans), 1597 0xffffffff); 1598 intel_uncore_write(uncore, 1599 TRANS_PSR_IIR(dev_priv, trans), 1600 0xffffffff); 1601 } 1602 } else { 1603 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1604 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1605 } 1606 1607 for_each_pipe(dev_priv, pipe) 1608 if (intel_display_power_is_enabled(dev_priv, 1609 POWER_DOMAIN_PIPE(pipe))) 1610 gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); 1611 1612 gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS); 1613 gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS); 1614 1615 if (DISPLAY_VER(dev_priv) >= 14) 1616 gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS); 1617 else 1618 gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS); 1619 1620 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1621 gen2_irq_reset(uncore, SDE_IRQ_REGS); 1622 } 1623 1624 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 1625 u8 pipe_mask) 1626 { 1627 struct intel_uncore *uncore = &dev_priv->uncore; 1628 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 1629 gen8_de_pipe_flip_done_mask(dev_priv); 1630 enum pipe pipe; 1631 1632 spin_lock_irq(&dev_priv->irq_lock); 1633 1634 if (!intel_irqs_enabled(dev_priv)) { 1635 spin_unlock_irq(&dev_priv->irq_lock); 1636 return; 1637 } 1638 1639 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1640 gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe), 1641 dev_priv->display.irq.de_irq_mask[pipe], 1642 ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); 1643 1644 spin_unlock_irq(&dev_priv->irq_lock); 1645 } 1646 1647 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 1648 u8 pipe_mask) 1649 { 1650 struct intel_uncore *uncore = &dev_priv->uncore; 1651 enum pipe pipe; 1652 1653 spin_lock_irq(&dev_priv->irq_lock); 1654 1655 if (!intel_irqs_enabled(dev_priv)) { 1656 spin_unlock_irq(&dev_priv->irq_lock); 1657 return; 1658 } 1659 1660 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1661 gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); 1662 1663 spin_unlock_irq(&dev_priv->irq_lock); 1664 1665 /* make sure we're done processing display irqs */ 1666 intel_synchronize_irq(dev_priv); 1667 } 1668 1669 /* 1670 * SDEIER is also touched by the interrupt handler to work around missed PCH 1671 * interrupts. Hence we can't update it after the interrupt handler is enabled - 1672 * instead we unconditionally enable all PCH interrupt sources here, but then 1673 * only unmask them as needed with SDEIMR. 1674 * 1675 * Note that we currently do this after installing the interrupt handler, 1676 * but before we enable the master interrupt. That should be sufficient 1677 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 1678 * interrupts could still race. 1679 */ 1680 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 1681 { 1682 struct intel_uncore *uncore = &dev_priv->uncore; 1683 u32 mask; 1684 1685 if (HAS_PCH_NOP(dev_priv)) 1686 return; 1687 1688 if (HAS_PCH_IBX(dev_priv)) 1689 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 1690 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 1691 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 1692 else 1693 mask = SDE_GMBUS_CPT; 1694 1695 gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff); 1696 } 1697 1698 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 1699 { 1700 lockdep_assert_held(&dev_priv->irq_lock); 1701 1702 if (dev_priv->display.irq.vlv_display_irqs_enabled) 1703 return; 1704 1705 dev_priv->display.irq.vlv_display_irqs_enabled = true; 1706 1707 if (intel_irqs_enabled(dev_priv)) { 1708 _vlv_display_irq_reset(dev_priv); 1709 vlv_display_irq_postinstall(dev_priv); 1710 } 1711 } 1712 1713 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 1714 { 1715 lockdep_assert_held(&dev_priv->irq_lock); 1716 1717 if (!dev_priv->display.irq.vlv_display_irqs_enabled) 1718 return; 1719 1720 dev_priv->display.irq.vlv_display_irqs_enabled = false; 1721 1722 if (intel_irqs_enabled(dev_priv)) 1723 _vlv_display_irq_reset(dev_priv); 1724 } 1725 1726 void ilk_de_irq_postinstall(struct drm_i915_private *i915) 1727 { 1728 struct intel_uncore *uncore = &i915->uncore; 1729 u32 display_mask, extra_mask; 1730 1731 if (DISPLAY_VER(i915) >= 7) { 1732 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1733 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 1734 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 1735 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 1736 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 1737 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 1738 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 1739 DE_DP_A_HOTPLUG_IVB); 1740 } else { 1741 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1742 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 1743 DE_PIPEA_CRC_DONE | DE_POISON); 1744 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 1745 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 1746 DE_PLANE_FLIP_DONE(PLANE_A) | 1747 DE_PLANE_FLIP_DONE(PLANE_B) | 1748 DE_DP_A_HOTPLUG); 1749 } 1750 1751 if (IS_HASWELL(i915)) { 1752 gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1753 display_mask |= DE_EDP_PSR_INT_HSW; 1754 } 1755 1756 if (IS_IRONLAKE_M(i915)) 1757 extra_mask |= DE_PCU_EVENT; 1758 1759 i915->irq_mask = ~display_mask; 1760 1761 ibx_irq_postinstall(i915); 1762 1763 gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask, 1764 display_mask | extra_mask); 1765 } 1766 1767 static void mtp_irq_postinstall(struct drm_i915_private *i915); 1768 static void icp_irq_postinstall(struct drm_i915_private *i915); 1769 1770 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 1771 { 1772 struct intel_display *display = &dev_priv->display; 1773 struct intel_uncore *uncore = &dev_priv->uncore; 1774 1775 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 1776 GEN8_PIPE_CDCLK_CRC_DONE; 1777 u32 de_pipe_enables; 1778 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 1779 u32 de_port_enables; 1780 u32 de_misc_masked = GEN8_DE_EDP_PSR; 1781 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1782 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1783 enum pipe pipe; 1784 1785 if (!HAS_DISPLAY(dev_priv)) 1786 return; 1787 1788 if (DISPLAY_VER(dev_priv) >= 14) 1789 mtp_irq_postinstall(dev_priv); 1790 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1791 icp_irq_postinstall(dev_priv); 1792 else if (HAS_PCH_SPLIT(dev_priv)) 1793 ibx_irq_postinstall(dev_priv); 1794 1795 if (DISPLAY_VER(dev_priv) < 11) 1796 de_misc_masked |= GEN8_DE_MISC_GSE; 1797 1798 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1799 de_port_masked |= BXT_DE_PORT_GMBUS; 1800 1801 if (DISPLAY_VER(dev_priv) >= 14) { 1802 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 1803 XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; 1804 } else if (DISPLAY_VER(dev_priv) >= 11) { 1805 enum port port; 1806 1807 if (intel_bios_is_dsi_present(display, &port)) 1808 de_port_masked |= DSI0_TE | DSI1_TE; 1809 } 1810 1811 if (HAS_DBUF_OVERLAP_DETECTION(display)) 1812 de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; 1813 1814 if (HAS_DSB(dev_priv)) 1815 de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | 1816 GEN12_DSB_INT(INTEL_DSB_1) | 1817 GEN12_DSB_INT(INTEL_DSB_2); 1818 1819 de_pipe_enables = de_pipe_masked | 1820 GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 1821 gen8_de_pipe_flip_done_mask(dev_priv); 1822 1823 de_port_enables = de_port_masked; 1824 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1825 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 1826 else if (IS_BROADWELL(dev_priv)) 1827 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 1828 1829 if (DISPLAY_VER(dev_priv) >= 12) { 1830 enum transcoder trans; 1831 1832 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1833 enum intel_display_power_domain domain; 1834 1835 domain = POWER_DOMAIN_TRANSCODER(trans); 1836 if (!intel_display_power_is_enabled(dev_priv, domain)) 1837 continue; 1838 1839 gen2_assert_iir_is_zero(uncore, 1840 TRANS_PSR_IIR(dev_priv, trans)); 1841 } 1842 } else { 1843 gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1844 } 1845 1846 for_each_pipe(dev_priv, pipe) { 1847 dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked; 1848 1849 if (intel_display_power_is_enabled(dev_priv, 1850 POWER_DOMAIN_PIPE(pipe))) 1851 gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe), 1852 dev_priv->display.irq.de_irq_mask[pipe], 1853 de_pipe_enables); 1854 } 1855 1856 gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables); 1857 gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked); 1858 1859 if (IS_DISPLAY_VER(dev_priv, 11, 13)) { 1860 u32 de_hpd_masked = 0; 1861 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 1862 GEN11_DE_TBT_HOTPLUG_MASK; 1863 1864 gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, 1865 de_hpd_enables); 1866 } 1867 } 1868 1869 static void mtp_irq_postinstall(struct drm_i915_private *i915) 1870 { 1871 struct intel_uncore *uncore = &i915->uncore; 1872 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 1873 u32 de_hpd_mask = XELPDP_AUX_TC_MASK; 1874 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 1875 XELPDP_TBT_HOTPLUG_MASK; 1876 1877 gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, 1878 de_hpd_enables); 1879 1880 gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); 1881 } 1882 1883 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 1884 { 1885 struct intel_uncore *uncore = &dev_priv->uncore; 1886 u32 mask = SDE_GMBUS_ICP; 1887 1888 gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff); 1889 } 1890 1891 void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 1892 { 1893 if (!HAS_DISPLAY(dev_priv)) 1894 return; 1895 1896 gen8_de_irq_postinstall(dev_priv); 1897 1898 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 1899 GEN11_DISPLAY_IRQ_ENABLE); 1900 } 1901 1902 void dg1_de_irq_postinstall(struct drm_i915_private *i915) 1903 { 1904 if (!HAS_DISPLAY(i915)) 1905 return; 1906 1907 gen8_de_irq_postinstall(i915); 1908 intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL, 1909 GEN11_DISPLAY_IRQ_ENABLE); 1910 } 1911 1912 void intel_display_irq_init(struct drm_i915_private *i915) 1913 { 1914 i915->drm.vblank_disable_immediate = true; 1915 1916 intel_hotplug_irq_init(i915); 1917 1918 INIT_WORK(&i915->display.irq.vblank_dc_work, 1919 intel_display_vblank_dc_work); 1920 } 1921