1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <drm/drm_vblank.h> 7 8 #include "gt/intel_rps.h" 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "i915_reg.h" 12 #include "icl_dsi_regs.h" 13 #include "intel_atomic_plane.h" 14 #include "intel_crtc.h" 15 #include "intel_de.h" 16 #include "intel_display_irq.h" 17 #include "intel_display_rpm.h" 18 #include "intel_display_trace.h" 19 #include "intel_display_types.h" 20 #include "intel_dmc_wl.h" 21 #include "intel_dp_aux.h" 22 #include "intel_dsb.h" 23 #include "intel_fdi_regs.h" 24 #include "intel_fifo_underrun.h" 25 #include "intel_gmbus.h" 26 #include "intel_hotplug_irq.h" 27 #include "intel_pipe_crc_regs.h" 28 #include "intel_pmdemand.h" 29 #include "intel_psr.h" 30 #include "intel_psr_regs.h" 31 #include "intel_uncore.h" 32 33 static void 34 intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs, 35 u32 imr_val, u32 ier_val) 36 { 37 intel_dmc_wl_get(display, regs.imr); 38 intel_dmc_wl_get(display, regs.ier); 39 intel_dmc_wl_get(display, regs.iir); 40 41 gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val); 42 43 intel_dmc_wl_put(display, regs.iir); 44 intel_dmc_wl_put(display, regs.ier); 45 intel_dmc_wl_put(display, regs.imr); 46 } 47 48 static void 49 intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs) 50 { 51 intel_dmc_wl_get(display, regs.imr); 52 intel_dmc_wl_get(display, regs.ier); 53 intel_dmc_wl_get(display, regs.iir); 54 55 gen2_irq_reset(to_intel_uncore(display->drm), regs); 56 57 intel_dmc_wl_put(display, regs.iir); 58 intel_dmc_wl_put(display, regs.ier); 59 intel_dmc_wl_put(display, regs.imr); 60 } 61 62 static void 63 intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg) 64 { 65 intel_dmc_wl_get(display, reg); 66 67 gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg); 68 69 intel_dmc_wl_put(display, reg); 70 } 71 72 struct pipe_fault_handler { 73 bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id); 74 u32 fault; 75 enum plane_id plane_id; 76 }; 77 78 static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id) 79 { 80 struct intel_display *display = to_intel_display(crtc); 81 struct intel_plane_error error = {}; 82 struct intel_plane *plane; 83 84 plane = intel_crtc_get_plane(crtc, plane_id); 85 if (!plane || !plane->capture_error) 86 return false; 87 88 plane->capture_error(crtc, plane, &error); 89 90 drm_err_ratelimited(display->drm, 91 "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n", 92 crtc->base.base.id, crtc->base.name, 93 plane->base.base.id, plane->base.name, 94 error.ctl, error.surf, error.surflive); 95 96 return true; 97 } 98 99 static void intel_pipe_fault_irq_handler(struct intel_display *display, 100 const struct pipe_fault_handler *handlers, 101 enum pipe pipe, u32 fault_errors) 102 { 103 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 104 const struct pipe_fault_handler *handler; 105 106 for (handler = handlers; handler && handler->fault; handler++) { 107 if ((fault_errors & handler->fault) == 0) 108 continue; 109 110 if (handler->handle(crtc, handler->plane_id)) 111 fault_errors &= ~handler->fault; 112 } 113 114 WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n", 115 crtc->base.base.id, crtc->base.name, fault_errors); 116 } 117 118 static void 119 intel_handle_vblank(struct intel_display *display, enum pipe pipe) 120 { 121 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 122 123 drm_crtc_handle_vblank(&crtc->base); 124 } 125 126 /** 127 * ilk_update_display_irq - update DEIMR 128 * @display: display device 129 * @interrupt_mask: mask of interrupt bits to update 130 * @enabled_irq_mask: mask of interrupt bits to enable 131 */ 132 void ilk_update_display_irq(struct intel_display *display, 133 u32 interrupt_mask, u32 enabled_irq_mask) 134 { 135 struct drm_i915_private *dev_priv = to_i915(display->drm); 136 u32 new_val; 137 138 lockdep_assert_held(&dev_priv->irq_lock); 139 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 140 141 new_val = dev_priv->irq_mask; 142 new_val &= ~interrupt_mask; 143 new_val |= (~enabled_irq_mask & interrupt_mask); 144 145 if (new_val != dev_priv->irq_mask && 146 !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) { 147 dev_priv->irq_mask = new_val; 148 intel_de_write(display, DEIMR, dev_priv->irq_mask); 149 intel_de_posting_read(display, DEIMR); 150 } 151 } 152 153 void ilk_enable_display_irq(struct intel_display *display, u32 bits) 154 { 155 ilk_update_display_irq(display, bits, bits); 156 } 157 158 void ilk_disable_display_irq(struct intel_display *display, u32 bits) 159 { 160 ilk_update_display_irq(display, bits, 0); 161 } 162 163 /** 164 * bdw_update_port_irq - update DE port interrupt 165 * @display: display device 166 * @interrupt_mask: mask of interrupt bits to update 167 * @enabled_irq_mask: mask of interrupt bits to enable 168 */ 169 void bdw_update_port_irq(struct intel_display *display, 170 u32 interrupt_mask, u32 enabled_irq_mask) 171 { 172 struct drm_i915_private *dev_priv = to_i915(display->drm); 173 u32 new_val; 174 u32 old_val; 175 176 lockdep_assert_held(&dev_priv->irq_lock); 177 178 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 179 180 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 181 return; 182 183 old_val = intel_de_read(display, GEN8_DE_PORT_IMR); 184 185 new_val = old_val; 186 new_val &= ~interrupt_mask; 187 new_val |= (~enabled_irq_mask & interrupt_mask); 188 189 if (new_val != old_val) { 190 intel_de_write(display, GEN8_DE_PORT_IMR, new_val); 191 intel_de_posting_read(display, GEN8_DE_PORT_IMR); 192 } 193 } 194 195 /** 196 * bdw_update_pipe_irq - update DE pipe interrupt 197 * @display: display device 198 * @pipe: pipe whose interrupt to update 199 * @interrupt_mask: mask of interrupt bits to update 200 * @enabled_irq_mask: mask of interrupt bits to enable 201 */ 202 static void bdw_update_pipe_irq(struct intel_display *display, 203 enum pipe pipe, u32 interrupt_mask, 204 u32 enabled_irq_mask) 205 { 206 struct drm_i915_private *dev_priv = to_i915(display->drm); 207 u32 new_val; 208 209 lockdep_assert_held(&dev_priv->irq_lock); 210 211 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 212 213 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 214 return; 215 216 new_val = display->irq.de_irq_mask[pipe]; 217 new_val &= ~interrupt_mask; 218 new_val |= (~enabled_irq_mask & interrupt_mask); 219 220 if (new_val != display->irq.de_irq_mask[pipe]) { 221 display->irq.de_irq_mask[pipe] = new_val; 222 intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]); 223 intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); 224 } 225 } 226 227 void bdw_enable_pipe_irq(struct intel_display *display, 228 enum pipe pipe, u32 bits) 229 { 230 bdw_update_pipe_irq(display, pipe, bits, bits); 231 } 232 233 void bdw_disable_pipe_irq(struct intel_display *display, 234 enum pipe pipe, u32 bits) 235 { 236 bdw_update_pipe_irq(display, pipe, bits, 0); 237 } 238 239 /** 240 * ibx_display_interrupt_update - update SDEIMR 241 * @display: display device 242 * @interrupt_mask: mask of interrupt bits to update 243 * @enabled_irq_mask: mask of interrupt bits to enable 244 */ 245 void ibx_display_interrupt_update(struct intel_display *display, 246 u32 interrupt_mask, 247 u32 enabled_irq_mask) 248 { 249 struct drm_i915_private *dev_priv = to_i915(display->drm); 250 u32 sdeimr = intel_de_read(display, SDEIMR); 251 252 sdeimr &= ~interrupt_mask; 253 sdeimr |= (~enabled_irq_mask & interrupt_mask); 254 255 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 256 257 lockdep_assert_held(&dev_priv->irq_lock); 258 259 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 260 return; 261 262 intel_de_write(display, SDEIMR, sdeimr); 263 intel_de_posting_read(display, SDEIMR); 264 } 265 266 void ibx_enable_display_interrupt(struct intel_display *display, u32 bits) 267 { 268 ibx_display_interrupt_update(display, bits, bits); 269 } 270 271 void ibx_disable_display_interrupt(struct intel_display *display, u32 bits) 272 { 273 ibx_display_interrupt_update(display, bits, 0); 274 } 275 276 u32 i915_pipestat_enable_mask(struct intel_display *display, 277 enum pipe pipe) 278 { 279 struct drm_i915_private *dev_priv = to_i915(display->drm); 280 u32 status_mask = display->irq.pipestat_irq_mask[pipe]; 281 u32 enable_mask = status_mask << 16; 282 283 lockdep_assert_held(&dev_priv->irq_lock); 284 285 if (DISPLAY_VER(display) < 5) 286 goto out; 287 288 /* 289 * On pipe A we don't support the PSR interrupt yet, 290 * on pipe B and C the same bit MBZ. 291 */ 292 if (drm_WARN_ON_ONCE(display->drm, 293 status_mask & PIPE_A_PSR_STATUS_VLV)) 294 return 0; 295 /* 296 * On pipe B and C we don't support the PSR interrupt yet, on pipe 297 * A the same bit is for perf counters which we don't use either. 298 */ 299 if (drm_WARN_ON_ONCE(display->drm, 300 status_mask & PIPE_B_PSR_STATUS_VLV)) 301 return 0; 302 303 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 304 SPRITE0_FLIP_DONE_INT_EN_VLV | 305 SPRITE1_FLIP_DONE_INT_EN_VLV); 306 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 307 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 308 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 309 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 310 311 out: 312 drm_WARN_ONCE(display->drm, 313 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 314 status_mask & ~PIPESTAT_INT_STATUS_MASK, 315 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 316 pipe_name(pipe), enable_mask, status_mask); 317 318 return enable_mask; 319 } 320 321 void i915_enable_pipestat(struct intel_display *display, 322 enum pipe pipe, u32 status_mask) 323 { 324 struct drm_i915_private *dev_priv = to_i915(display->drm); 325 i915_reg_t reg = PIPESTAT(display, pipe); 326 u32 enable_mask; 327 328 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 329 "pipe %c: status_mask=0x%x\n", 330 pipe_name(pipe), status_mask); 331 332 lockdep_assert_held(&dev_priv->irq_lock); 333 drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); 334 335 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) 336 return; 337 338 display->irq.pipestat_irq_mask[pipe] |= status_mask; 339 enable_mask = i915_pipestat_enable_mask(display, pipe); 340 341 intel_de_write(display, reg, enable_mask | status_mask); 342 intel_de_posting_read(display, reg); 343 } 344 345 void i915_disable_pipestat(struct intel_display *display, 346 enum pipe pipe, u32 status_mask) 347 { 348 struct drm_i915_private *dev_priv = to_i915(display->drm); 349 i915_reg_t reg = PIPESTAT(display, pipe); 350 u32 enable_mask; 351 352 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 353 "pipe %c: status_mask=0x%x\n", 354 pipe_name(pipe), status_mask); 355 356 lockdep_assert_held(&dev_priv->irq_lock); 357 drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); 358 359 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0) 360 return; 361 362 display->irq.pipestat_irq_mask[pipe] &= ~status_mask; 363 enable_mask = i915_pipestat_enable_mask(display, pipe); 364 365 intel_de_write(display, reg, enable_mask | status_mask); 366 intel_de_posting_read(display, reg); 367 } 368 369 static bool i915_has_legacy_blc_interrupt(struct intel_display *display) 370 { 371 if (display->platform.i85x) 372 return true; 373 374 if (display->platform.pineview) 375 return true; 376 377 return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile; 378 } 379 380 /** 381 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 382 * @display: display device 383 */ 384 void i915_enable_asle_pipestat(struct intel_display *display) 385 { 386 struct drm_i915_private *dev_priv = to_i915(display->drm); 387 388 if (!intel_opregion_asle_present(display)) 389 return; 390 391 if (!i915_has_legacy_blc_interrupt(display)) 392 return; 393 394 spin_lock_irq(&dev_priv->irq_lock); 395 396 i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 397 if (DISPLAY_VER(display) >= 4) 398 i915_enable_pipestat(display, PIPE_A, 399 PIPE_LEGACY_BLC_EVENT_STATUS); 400 401 spin_unlock_irq(&dev_priv->irq_lock); 402 } 403 404 #if IS_ENABLED(CONFIG_DEBUG_FS) 405 static void display_pipe_crc_irq_handler(struct intel_display *display, 406 enum pipe pipe, 407 u32 crc0, u32 crc1, 408 u32 crc2, u32 crc3, 409 u32 crc4) 410 { 411 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 412 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 413 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 414 415 trace_intel_pipe_crc(crtc, crcs); 416 417 spin_lock(&pipe_crc->lock); 418 /* 419 * For some not yet identified reason, the first CRC is 420 * bonkers. So let's just wait for the next vblank and read 421 * out the buggy result. 422 * 423 * On GEN8+ sometimes the second CRC is bonkers as well, so 424 * don't trust that one either. 425 */ 426 if (pipe_crc->skipped <= 0 || 427 (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) { 428 pipe_crc->skipped++; 429 spin_unlock(&pipe_crc->lock); 430 return; 431 } 432 spin_unlock(&pipe_crc->lock); 433 434 drm_crtc_add_crc_entry(&crtc->base, true, 435 drm_crtc_accurate_vblank_count(&crtc->base), 436 crcs); 437 } 438 #else 439 static inline void 440 display_pipe_crc_irq_handler(struct intel_display *display, 441 enum pipe pipe, 442 u32 crc0, u32 crc1, 443 u32 crc2, u32 crc3, 444 u32 crc4) {} 445 #endif 446 447 static void flip_done_handler(struct intel_display *display, 448 enum pipe pipe) 449 { 450 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 451 452 spin_lock(&display->drm->event_lock); 453 454 if (crtc->flip_done_event) { 455 trace_intel_crtc_flip_done(crtc); 456 drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); 457 crtc->flip_done_event = NULL; 458 } 459 460 spin_unlock(&display->drm->event_lock); 461 } 462 463 static void hsw_pipe_crc_irq_handler(struct intel_display *display, 464 enum pipe pipe) 465 { 466 display_pipe_crc_irq_handler(display, pipe, 467 intel_de_read(display, PIPE_CRC_RES_HSW(pipe)), 468 0, 0, 0, 0); 469 } 470 471 static void ivb_pipe_crc_irq_handler(struct intel_display *display, 472 enum pipe pipe) 473 { 474 display_pipe_crc_irq_handler(display, pipe, 475 intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)), 476 intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)), 477 intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)), 478 intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)), 479 intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe))); 480 } 481 482 static void i9xx_pipe_crc_irq_handler(struct intel_display *display, 483 enum pipe pipe) 484 { 485 u32 res1, res2; 486 487 if (DISPLAY_VER(display) >= 3) 488 res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe)); 489 else 490 res1 = 0; 491 492 if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 493 res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe)); 494 else 495 res2 = 0; 496 497 display_pipe_crc_irq_handler(display, pipe, 498 intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)), 499 intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)), 500 intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)), 501 res1, res2); 502 } 503 504 static void i9xx_pipestat_irq_reset(struct intel_display *display) 505 { 506 enum pipe pipe; 507 508 for_each_pipe(display, pipe) { 509 intel_de_write(display, 510 PIPESTAT(display, pipe), 511 PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); 512 513 display->irq.pipestat_irq_mask[pipe] = 0; 514 } 515 } 516 517 void i9xx_pipestat_irq_ack(struct intel_display *display, 518 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 519 { 520 struct drm_i915_private *dev_priv = to_i915(display->drm); 521 enum pipe pipe; 522 523 spin_lock(&dev_priv->irq_lock); 524 525 if ((display->platform.valleyview || display->platform.cherryview) && 526 !display->irq.vlv_display_irqs_enabled) { 527 spin_unlock(&dev_priv->irq_lock); 528 return; 529 } 530 531 for_each_pipe(display, pipe) { 532 i915_reg_t reg; 533 u32 status_mask, enable_mask, iir_bit = 0; 534 535 /* 536 * PIPESTAT bits get signalled even when the interrupt is 537 * disabled with the mask bits, and some of the status bits do 538 * not generate interrupts at all (like the underrun bit). Hence 539 * we need to be careful that we only handle what we want to 540 * handle. 541 */ 542 543 /* fifo underruns are filterered in the underrun handler. */ 544 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 545 546 switch (pipe) { 547 default: 548 case PIPE_A: 549 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 550 break; 551 case PIPE_B: 552 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 553 break; 554 case PIPE_C: 555 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 556 break; 557 } 558 if (iir & iir_bit) 559 status_mask |= display->irq.pipestat_irq_mask[pipe]; 560 561 if (!status_mask) 562 continue; 563 564 reg = PIPESTAT(display, pipe); 565 pipe_stats[pipe] = intel_de_read(display, reg) & status_mask; 566 enable_mask = i915_pipestat_enable_mask(display, pipe); 567 568 /* 569 * Clear the PIPE*STAT regs before the IIR 570 * 571 * Toggle the enable bits to make sure we get an 572 * edge in the ISR pipe event bit if we don't clear 573 * all the enabled status bits. Otherwise the edge 574 * triggered IIR on i965/g4x wouldn't notice that 575 * an interrupt is still pending. 576 */ 577 if (pipe_stats[pipe]) { 578 intel_de_write(display, reg, pipe_stats[pipe]); 579 intel_de_write(display, reg, enable_mask); 580 } 581 } 582 spin_unlock(&dev_priv->irq_lock); 583 } 584 585 void i915_pipestat_irq_handler(struct intel_display *display, 586 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 587 { 588 bool blc_event = false; 589 enum pipe pipe; 590 591 for_each_pipe(display, pipe) { 592 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 593 intel_handle_vblank(display, pipe); 594 595 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 596 blc_event = true; 597 598 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 599 i9xx_pipe_crc_irq_handler(display, pipe); 600 601 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 602 intel_cpu_fifo_underrun_irq_handler(display, pipe); 603 } 604 605 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 606 intel_opregion_asle_intr(display); 607 } 608 609 void i965_pipestat_irq_handler(struct intel_display *display, 610 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 611 { 612 bool blc_event = false; 613 enum pipe pipe; 614 615 for_each_pipe(display, pipe) { 616 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 617 intel_handle_vblank(display, pipe); 618 619 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 620 blc_event = true; 621 622 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 623 i9xx_pipe_crc_irq_handler(display, pipe); 624 625 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 626 intel_cpu_fifo_underrun_irq_handler(display, pipe); 627 } 628 629 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 630 intel_opregion_asle_intr(display); 631 632 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 633 intel_gmbus_irq_handler(display); 634 } 635 636 void valleyview_pipestat_irq_handler(struct intel_display *display, 637 u32 pipe_stats[I915_MAX_PIPES]) 638 { 639 enum pipe pipe; 640 641 for_each_pipe(display, pipe) { 642 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 643 intel_handle_vblank(display, pipe); 644 645 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 646 flip_done_handler(display, pipe); 647 648 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 649 i9xx_pipe_crc_irq_handler(display, pipe); 650 651 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 652 intel_cpu_fifo_underrun_irq_handler(display, pipe); 653 } 654 655 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 656 intel_gmbus_irq_handler(display); 657 } 658 659 static void ibx_irq_handler(struct intel_display *display, u32 pch_iir) 660 { 661 enum pipe pipe; 662 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 663 664 ibx_hpd_irq_handler(display, hotplug_trigger); 665 666 if (pch_iir & SDE_AUDIO_POWER_MASK) { 667 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 668 SDE_AUDIO_POWER_SHIFT); 669 drm_dbg(display->drm, "PCH audio power change on port %d\n", 670 port_name(port)); 671 } 672 673 if (pch_iir & SDE_AUX_MASK) 674 intel_dp_aux_irq_handler(display); 675 676 if (pch_iir & SDE_GMBUS) 677 intel_gmbus_irq_handler(display); 678 679 if (pch_iir & SDE_AUDIO_HDCP_MASK) 680 drm_dbg(display->drm, "PCH HDCP audio interrupt\n"); 681 682 if (pch_iir & SDE_AUDIO_TRANS_MASK) 683 drm_dbg(display->drm, "PCH transcoder audio interrupt\n"); 684 685 if (pch_iir & SDE_POISON) 686 drm_err(display->drm, "PCH poison interrupt\n"); 687 688 if (pch_iir & SDE_FDI_MASK) { 689 for_each_pipe(display, pipe) 690 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 691 pipe_name(pipe), 692 intel_de_read(display, FDI_RX_IIR(pipe))); 693 } 694 695 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 696 drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n"); 697 698 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 699 drm_dbg(display->drm, 700 "PCH transcoder CRC error interrupt\n"); 701 702 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 703 intel_pch_fifo_underrun_irq_handler(display, PIPE_A); 704 705 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 706 intel_pch_fifo_underrun_irq_handler(display, PIPE_B); 707 } 708 709 static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe) 710 { 711 switch (pipe) { 712 case PIPE_A: 713 return ERR_INT_SPRITE_A_FAULT | 714 ERR_INT_PRIMARY_A_FAULT | 715 ERR_INT_CURSOR_A_FAULT; 716 case PIPE_B: 717 return ERR_INT_SPRITE_B_FAULT | 718 ERR_INT_PRIMARY_B_FAULT | 719 ERR_INT_CURSOR_B_FAULT; 720 case PIPE_C: 721 return ERR_INT_SPRITE_C_FAULT | 722 ERR_INT_PRIMARY_C_FAULT | 723 ERR_INT_CURSOR_C_FAULT; 724 default: 725 return 0; 726 } 727 } 728 729 static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = { 730 { .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 731 { .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 732 { .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 733 { .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 734 { .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 735 { .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 736 { .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 737 { .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 738 { .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 739 {} 740 }; 741 742 static void ivb_err_int_handler(struct intel_display *display) 743 { 744 u32 err_int = intel_de_read(display, GEN7_ERR_INT); 745 enum pipe pipe; 746 747 if (err_int & ERR_INT_POISON) 748 drm_err(display->drm, "Poison interrupt\n"); 749 750 if (err_int & ERR_INT_INVALID_GTT_PTE) 751 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 752 753 if (err_int & ERR_INT_INVALID_PTE_DATA) 754 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 755 756 for_each_pipe(display, pipe) { 757 u32 fault_errors; 758 759 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 760 intel_cpu_fifo_underrun_irq_handler(display, pipe); 761 762 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 763 if (display->platform.ivybridge) 764 ivb_pipe_crc_irq_handler(display, pipe); 765 else 766 hsw_pipe_crc_irq_handler(display, pipe); 767 } 768 769 fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe); 770 if (fault_errors) 771 intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers, 772 pipe, fault_errors); 773 } 774 775 intel_de_write(display, GEN7_ERR_INT, err_int); 776 } 777 778 static void cpt_serr_int_handler(struct intel_display *display) 779 { 780 u32 serr_int = intel_de_read(display, SERR_INT); 781 enum pipe pipe; 782 783 if (serr_int & SERR_INT_POISON) 784 drm_err(display->drm, "PCH poison interrupt\n"); 785 786 for_each_pipe(display, pipe) 787 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 788 intel_pch_fifo_underrun_irq_handler(display, pipe); 789 790 intel_de_write(display, SERR_INT, serr_int); 791 } 792 793 static void cpt_irq_handler(struct intel_display *display, u32 pch_iir) 794 { 795 enum pipe pipe; 796 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 797 798 ibx_hpd_irq_handler(display, hotplug_trigger); 799 800 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 801 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 802 SDE_AUDIO_POWER_SHIFT_CPT); 803 drm_dbg(display->drm, "PCH audio power change on port %c\n", 804 port_name(port)); 805 } 806 807 if (pch_iir & SDE_AUX_MASK_CPT) 808 intel_dp_aux_irq_handler(display); 809 810 if (pch_iir & SDE_GMBUS_CPT) 811 intel_gmbus_irq_handler(display); 812 813 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 814 drm_dbg(display->drm, "Audio CP request interrupt\n"); 815 816 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 817 drm_dbg(display->drm, "Audio CP change interrupt\n"); 818 819 if (pch_iir & SDE_FDI_MASK_CPT) { 820 for_each_pipe(display, pipe) 821 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 822 pipe_name(pipe), 823 intel_de_read(display, FDI_RX_IIR(pipe))); 824 } 825 826 if (pch_iir & SDE_ERROR_CPT) 827 cpt_serr_int_handler(display); 828 } 829 830 static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe) 831 { 832 switch (pipe) { 833 case PIPE_A: 834 return GTT_FAULT_SPRITE_A_FAULT | 835 GTT_FAULT_PRIMARY_A_FAULT | 836 GTT_FAULT_CURSOR_A_FAULT; 837 case PIPE_B: 838 return GTT_FAULT_SPRITE_B_FAULT | 839 GTT_FAULT_PRIMARY_B_FAULT | 840 GTT_FAULT_CURSOR_B_FAULT; 841 default: 842 return 0; 843 } 844 } 845 846 static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = { 847 { .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 848 { .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 849 { .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 850 { .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 851 { .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 852 { .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 853 {} 854 }; 855 856 static void ilk_gtt_fault_irq_handler(struct intel_display *display) 857 { 858 enum pipe pipe; 859 u32 gtt_fault; 860 861 gtt_fault = intel_de_read(display, ILK_GTT_FAULT); 862 intel_de_write(display, ILK_GTT_FAULT, gtt_fault); 863 864 if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE) 865 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 866 867 if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA) 868 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 869 870 for_each_pipe(display, pipe) { 871 u32 fault_errors; 872 873 fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe); 874 if (fault_errors) 875 intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers, 876 pipe, fault_errors); 877 } 878 } 879 880 void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) 881 { 882 struct drm_i915_private *dev_priv = to_i915(display->drm); 883 enum pipe pipe; 884 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 885 886 if (hotplug_trigger) 887 ilk_hpd_irq_handler(display, hotplug_trigger); 888 889 if (de_iir & DE_AUX_CHANNEL_A) 890 intel_dp_aux_irq_handler(display); 891 892 if (de_iir & DE_GSE) 893 intel_opregion_asle_intr(display); 894 895 if (de_iir & DE_POISON) 896 drm_err(display->drm, "Poison interrupt\n"); 897 898 if (de_iir & DE_GTT_FAULT) 899 ilk_gtt_fault_irq_handler(display); 900 901 for_each_pipe(display, pipe) { 902 if (de_iir & DE_PIPE_VBLANK(pipe)) 903 intel_handle_vblank(display, pipe); 904 905 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 906 flip_done_handler(display, pipe); 907 908 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 909 intel_cpu_fifo_underrun_irq_handler(display, pipe); 910 911 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 912 i9xx_pipe_crc_irq_handler(display, pipe); 913 } 914 915 /* check event from PCH */ 916 if (de_iir & DE_PCH_EVENT) { 917 u32 pch_iir = intel_de_read(display, SDEIIR); 918 919 if (HAS_PCH_CPT(dev_priv)) 920 cpt_irq_handler(display, pch_iir); 921 else 922 ibx_irq_handler(display, pch_iir); 923 924 /* should clear PCH hotplug event before clear CPU irq */ 925 intel_de_write(display, SDEIIR, pch_iir); 926 } 927 928 if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT) 929 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 930 } 931 932 void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) 933 { 934 struct drm_i915_private *dev_priv = to_i915(display->drm); 935 enum pipe pipe; 936 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 937 938 if (hotplug_trigger) 939 ilk_hpd_irq_handler(display, hotplug_trigger); 940 941 if (de_iir & DE_ERR_INT_IVB) 942 ivb_err_int_handler(display); 943 944 if (de_iir & DE_EDP_PSR_INT_HSW) { 945 struct intel_encoder *encoder; 946 947 for_each_intel_encoder_with_psr(display->drm, encoder) { 948 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 949 u32 psr_iir; 950 951 psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0); 952 intel_psr_irq_handler(intel_dp, psr_iir); 953 break; 954 } 955 } 956 957 if (de_iir & DE_AUX_CHANNEL_A_IVB) 958 intel_dp_aux_irq_handler(display); 959 960 if (de_iir & DE_GSE_IVB) 961 intel_opregion_asle_intr(display); 962 963 for_each_pipe(display, pipe) { 964 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 965 intel_handle_vblank(display, pipe); 966 967 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 968 flip_done_handler(display, pipe); 969 } 970 971 /* check event from PCH */ 972 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 973 u32 pch_iir = intel_de_read(display, SDEIIR); 974 975 cpt_irq_handler(display, pch_iir); 976 977 /* clear PCH hotplug event before clear CPU irq */ 978 intel_de_write(display, SDEIIR, pch_iir); 979 } 980 } 981 982 static u32 gen8_de_port_aux_mask(struct intel_display *display) 983 { 984 u32 mask; 985 986 if (DISPLAY_VER(display) >= 20) 987 return 0; 988 else if (DISPLAY_VER(display) >= 14) 989 return TGL_DE_PORT_AUX_DDIA | 990 TGL_DE_PORT_AUX_DDIB; 991 else if (DISPLAY_VER(display) >= 13) 992 return TGL_DE_PORT_AUX_DDIA | 993 TGL_DE_PORT_AUX_DDIB | 994 TGL_DE_PORT_AUX_DDIC | 995 XELPD_DE_PORT_AUX_DDID | 996 XELPD_DE_PORT_AUX_DDIE | 997 TGL_DE_PORT_AUX_USBC1 | 998 TGL_DE_PORT_AUX_USBC2 | 999 TGL_DE_PORT_AUX_USBC3 | 1000 TGL_DE_PORT_AUX_USBC4; 1001 else if (DISPLAY_VER(display) >= 12) 1002 return TGL_DE_PORT_AUX_DDIA | 1003 TGL_DE_PORT_AUX_DDIB | 1004 TGL_DE_PORT_AUX_DDIC | 1005 TGL_DE_PORT_AUX_USBC1 | 1006 TGL_DE_PORT_AUX_USBC2 | 1007 TGL_DE_PORT_AUX_USBC3 | 1008 TGL_DE_PORT_AUX_USBC4 | 1009 TGL_DE_PORT_AUX_USBC5 | 1010 TGL_DE_PORT_AUX_USBC6; 1011 1012 mask = GEN8_AUX_CHANNEL_A; 1013 if (DISPLAY_VER(display) >= 9) 1014 mask |= GEN9_AUX_CHANNEL_B | 1015 GEN9_AUX_CHANNEL_C | 1016 GEN9_AUX_CHANNEL_D; 1017 1018 if (DISPLAY_VER(display) == 11) { 1019 mask |= ICL_AUX_CHANNEL_F; 1020 mask |= ICL_AUX_CHANNEL_E; 1021 } 1022 1023 return mask; 1024 } 1025 1026 static u32 gen8_de_pipe_fault_mask(struct intel_display *display) 1027 { 1028 if (DISPLAY_VER(display) >= 14) 1029 return MTL_PIPEDMC_ATS_FAULT | 1030 MTL_PLANE_ATS_FAULT | 1031 GEN12_PIPEDMC_FAULT | 1032 GEN9_PIPE_CURSOR_FAULT | 1033 GEN11_PIPE_PLANE5_FAULT | 1034 GEN9_PIPE_PLANE4_FAULT | 1035 GEN9_PIPE_PLANE3_FAULT | 1036 GEN9_PIPE_PLANE2_FAULT | 1037 GEN9_PIPE_PLANE1_FAULT; 1038 else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display)) 1039 return GEN12_PIPEDMC_FAULT | 1040 GEN9_PIPE_CURSOR_FAULT | 1041 GEN11_PIPE_PLANE5_FAULT | 1042 GEN9_PIPE_PLANE4_FAULT | 1043 GEN9_PIPE_PLANE3_FAULT | 1044 GEN9_PIPE_PLANE2_FAULT | 1045 GEN9_PIPE_PLANE1_FAULT; 1046 else if (DISPLAY_VER(display) == 12) 1047 return GEN12_PIPEDMC_FAULT | 1048 GEN9_PIPE_CURSOR_FAULT | 1049 GEN11_PIPE_PLANE7_FAULT | 1050 GEN11_PIPE_PLANE6_FAULT | 1051 GEN11_PIPE_PLANE5_FAULT | 1052 GEN9_PIPE_PLANE4_FAULT | 1053 GEN9_PIPE_PLANE3_FAULT | 1054 GEN9_PIPE_PLANE2_FAULT | 1055 GEN9_PIPE_PLANE1_FAULT; 1056 else if (DISPLAY_VER(display) == 11) 1057 return GEN9_PIPE_CURSOR_FAULT | 1058 GEN11_PIPE_PLANE7_FAULT | 1059 GEN11_PIPE_PLANE6_FAULT | 1060 GEN11_PIPE_PLANE5_FAULT | 1061 GEN9_PIPE_PLANE4_FAULT | 1062 GEN9_PIPE_PLANE3_FAULT | 1063 GEN9_PIPE_PLANE2_FAULT | 1064 GEN9_PIPE_PLANE1_FAULT; 1065 else if (DISPLAY_VER(display) >= 9) 1066 return GEN9_PIPE_CURSOR_FAULT | 1067 GEN9_PIPE_PLANE4_FAULT | 1068 GEN9_PIPE_PLANE3_FAULT | 1069 GEN9_PIPE_PLANE2_FAULT | 1070 GEN9_PIPE_PLANE1_FAULT; 1071 else 1072 return GEN8_PIPE_CURSOR_FAULT | 1073 GEN8_PIPE_SPRITE_FAULT | 1074 GEN8_PIPE_PRIMARY_FAULT; 1075 } 1076 1077 static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1078 { 1079 struct intel_display *display = to_intel_display(crtc); 1080 1081 drm_err_ratelimited(display->drm, 1082 "[CRTC:%d:%s] PLANE ATS fault\n", 1083 crtc->base.base.id, crtc->base.name); 1084 1085 return true; 1086 } 1087 1088 static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1089 { 1090 struct intel_display *display = to_intel_display(crtc); 1091 1092 drm_err_ratelimited(display->drm, 1093 "[CRTC:%d:%s] PIPEDMC ATS fault\n", 1094 crtc->base.base.id, crtc->base.name); 1095 1096 return true; 1097 } 1098 1099 static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1100 { 1101 struct intel_display *display = to_intel_display(crtc); 1102 1103 drm_err_ratelimited(display->drm, 1104 "[CRTC:%d:%s] PIPEDMC fault\n", 1105 crtc->base.base.id, crtc->base.name); 1106 1107 return true; 1108 } 1109 1110 static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = { 1111 { .fault = MTL_PLANE_ATS_FAULT, .handle = handle_plane_ats_fault, }, 1112 { .fault = MTL_PIPEDMC_ATS_FAULT, .handle = handle_pipedmc_ats_fault, }, 1113 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1114 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1115 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1116 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1117 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1118 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1119 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1120 {} 1121 }; 1122 1123 static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = { 1124 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1125 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1126 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1127 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1128 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1129 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1130 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1131 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1132 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1133 {} 1134 }; 1135 1136 static const struct pipe_fault_handler icl_pipe_fault_handlers[] = { 1137 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1138 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1139 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1140 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1141 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1142 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1143 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1144 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1145 {} 1146 }; 1147 1148 static const struct pipe_fault_handler skl_pipe_fault_handlers[] = { 1149 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1150 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1151 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1152 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1153 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1154 {} 1155 }; 1156 1157 static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = { 1158 { .fault = GEN8_PIPE_SPRITE_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1159 { .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1160 { .fault = GEN8_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1161 {} 1162 }; 1163 1164 static const struct pipe_fault_handler * 1165 gen8_pipe_fault_handlers(struct intel_display *display) 1166 { 1167 if (DISPLAY_VER(display) >= 14) 1168 return mtl_pipe_fault_handlers; 1169 else if (DISPLAY_VER(display) >= 12) 1170 return tgl_pipe_fault_handlers; 1171 else if (DISPLAY_VER(display) >= 11) 1172 return icl_pipe_fault_handlers; 1173 else if (DISPLAY_VER(display) >= 9) 1174 return skl_pipe_fault_handlers; 1175 else 1176 return bdw_pipe_fault_handlers; 1177 } 1178 1179 static void intel_pmdemand_irq_handler(struct intel_display *display) 1180 { 1181 wake_up_all(&display->pmdemand.waitqueue); 1182 } 1183 1184 static void 1185 gen8_de_misc_irq_handler(struct intel_display *display, u32 iir) 1186 { 1187 bool found = false; 1188 1189 if (HAS_DBUF_OVERLAP_DETECTION(display)) { 1190 if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) { 1191 drm_warn(display->drm, "DBuf overlap detected\n"); 1192 found = true; 1193 } 1194 } 1195 1196 if (DISPLAY_VER(display) >= 14) { 1197 if (iir & (XELPDP_PMDEMAND_RSP | 1198 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 1199 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 1200 drm_dbg(display->drm, 1201 "Error waiting for Punit PM Demand Response\n"); 1202 1203 intel_pmdemand_irq_handler(display); 1204 found = true; 1205 } 1206 1207 if (iir & XELPDP_RM_TIMEOUT) { 1208 u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE); 1209 drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val); 1210 found = true; 1211 } 1212 } else if (iir & GEN8_DE_MISC_GSE) { 1213 intel_opregion_asle_intr(display); 1214 found = true; 1215 } 1216 1217 if (iir & GEN8_DE_EDP_PSR) { 1218 struct intel_encoder *encoder; 1219 u32 psr_iir; 1220 i915_reg_t iir_reg; 1221 1222 for_each_intel_encoder_with_psr(display->drm, encoder) { 1223 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1224 1225 if (DISPLAY_VER(display) >= 12) 1226 iir_reg = TRANS_PSR_IIR(display, 1227 intel_dp->psr.transcoder); 1228 else 1229 iir_reg = EDP_PSR_IIR; 1230 1231 psr_iir = intel_de_rmw(display, iir_reg, 0, 0); 1232 1233 if (psr_iir) 1234 found = true; 1235 1236 intel_psr_irq_handler(intel_dp, psr_iir); 1237 1238 /* prior GEN12 only have one EDP PSR */ 1239 if (DISPLAY_VER(display) < 12) 1240 break; 1241 } 1242 } 1243 1244 if (!found) 1245 drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); 1246 } 1247 1248 static void gen11_dsi_te_interrupt_handler(struct intel_display *display, 1249 u32 te_trigger) 1250 { 1251 enum pipe pipe = INVALID_PIPE; 1252 enum transcoder dsi_trans; 1253 enum port port; 1254 u32 val; 1255 1256 /* 1257 * Incase of dual link, TE comes from DSI_1 1258 * this is to check if dual link is enabled 1259 */ 1260 val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0)); 1261 val &= PORT_SYNC_MODE_ENABLE; 1262 1263 /* 1264 * if dual link is enabled, then read DSI_0 1265 * transcoder registers 1266 */ 1267 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 1268 PORT_A : PORT_B; 1269 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 1270 1271 /* Check if DSI configured in command mode */ 1272 val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); 1273 val = val & OP_MODE_MASK; 1274 1275 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 1276 drm_err(display->drm, "DSI trancoder not configured in command mode\n"); 1277 return; 1278 } 1279 1280 /* Get PIPE for handling VBLANK event */ 1281 val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans)); 1282 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 1283 case TRANS_DDI_EDP_INPUT_A_ON: 1284 pipe = PIPE_A; 1285 break; 1286 case TRANS_DDI_EDP_INPUT_B_ONOFF: 1287 pipe = PIPE_B; 1288 break; 1289 case TRANS_DDI_EDP_INPUT_C_ONOFF: 1290 pipe = PIPE_C; 1291 break; 1292 default: 1293 drm_err(display->drm, "Invalid PIPE\n"); 1294 return; 1295 } 1296 1297 intel_handle_vblank(display, pipe); 1298 1299 /* clear TE in dsi IIR */ 1300 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 1301 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1302 } 1303 1304 static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display) 1305 { 1306 if (DISPLAY_VER(display) >= 9) 1307 return GEN9_PIPE_PLANE1_FLIP_DONE; 1308 else 1309 return GEN8_PIPE_PRIMARY_FLIP_DONE; 1310 } 1311 1312 static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir) 1313 { 1314 struct drm_i915_private *i915 = to_i915(display->drm); 1315 u32 pica_ier = 0; 1316 1317 *pica_iir = 0; 1318 *pch_iir = intel_de_read(display, SDEIIR); 1319 if (!*pch_iir) 1320 return; 1321 1322 /** 1323 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 1324 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 1325 * their flags both in the PICA and SDE IIR. 1326 */ 1327 if (*pch_iir & SDE_PICAINTERRUPT) { 1328 drm_WARN_ON(display->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); 1329 1330 pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0); 1331 *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR); 1332 intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir); 1333 } 1334 1335 intel_de_write(display, SDEIIR, *pch_iir); 1336 1337 if (pica_ier) 1338 intel_de_write(display, PICAINTERRUPT_IER, pica_ier); 1339 } 1340 1341 void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) 1342 { 1343 struct drm_i915_private *dev_priv = to_i915(display->drm); 1344 u32 iir; 1345 enum pipe pipe; 1346 1347 drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display)); 1348 1349 if (master_ctl & GEN8_DE_MISC_IRQ) { 1350 iir = intel_de_read(display, GEN8_DE_MISC_IIR); 1351 if (iir) { 1352 intel_de_write(display, GEN8_DE_MISC_IIR, iir); 1353 gen8_de_misc_irq_handler(display, iir); 1354 } else { 1355 drm_err_ratelimited(display->drm, 1356 "The master control interrupt lied (DE MISC)!\n"); 1357 } 1358 } 1359 1360 if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1361 iir = intel_de_read(display, GEN11_DE_HPD_IIR); 1362 if (iir) { 1363 intel_de_write(display, GEN11_DE_HPD_IIR, iir); 1364 gen11_hpd_irq_handler(display, iir); 1365 } else { 1366 drm_err_ratelimited(display->drm, 1367 "The master control interrupt lied, (DE HPD)!\n"); 1368 } 1369 } 1370 1371 if (master_ctl & GEN8_DE_PORT_IRQ) { 1372 iir = intel_de_read(display, GEN8_DE_PORT_IIR); 1373 if (iir) { 1374 bool found = false; 1375 1376 intel_de_write(display, GEN8_DE_PORT_IIR, iir); 1377 1378 if (iir & gen8_de_port_aux_mask(display)) { 1379 intel_dp_aux_irq_handler(display); 1380 found = true; 1381 } 1382 1383 if (display->platform.geminilake || display->platform.broxton) { 1384 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1385 1386 if (hotplug_trigger) { 1387 bxt_hpd_irq_handler(display, hotplug_trigger); 1388 found = true; 1389 } 1390 } else if (display->platform.broadwell) { 1391 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1392 1393 if (hotplug_trigger) { 1394 ilk_hpd_irq_handler(display, hotplug_trigger); 1395 found = true; 1396 } 1397 } 1398 1399 if ((display->platform.geminilake || display->platform.broxton) && 1400 (iir & BXT_DE_PORT_GMBUS)) { 1401 intel_gmbus_irq_handler(display); 1402 found = true; 1403 } 1404 1405 if (DISPLAY_VER(display) >= 11) { 1406 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1407 1408 if (te_trigger) { 1409 gen11_dsi_te_interrupt_handler(display, te_trigger); 1410 found = true; 1411 } 1412 } 1413 1414 if (!found) 1415 drm_err_ratelimited(display->drm, 1416 "Unexpected DE Port interrupt\n"); 1417 } else { 1418 drm_err_ratelimited(display->drm, 1419 "The master control interrupt lied (DE PORT)!\n"); 1420 } 1421 } 1422 1423 for_each_pipe(display, pipe) { 1424 u32 fault_errors; 1425 1426 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1427 continue; 1428 1429 iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe)); 1430 if (!iir) { 1431 drm_err_ratelimited(display->drm, 1432 "The master control interrupt lied (DE PIPE)!\n"); 1433 continue; 1434 } 1435 1436 intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir); 1437 1438 if (iir & GEN8_PIPE_VBLANK) 1439 intel_handle_vblank(display, pipe); 1440 1441 if (iir & gen8_de_pipe_flip_done_mask(display)) 1442 flip_done_handler(display, pipe); 1443 1444 if (HAS_DSB(display)) { 1445 if (iir & GEN12_DSB_INT(INTEL_DSB_0)) 1446 intel_dsb_irq_handler(display, pipe, INTEL_DSB_0); 1447 1448 if (iir & GEN12_DSB_INT(INTEL_DSB_1)) 1449 intel_dsb_irq_handler(display, pipe, INTEL_DSB_1); 1450 1451 if (iir & GEN12_DSB_INT(INTEL_DSB_2)) 1452 intel_dsb_irq_handler(display, pipe, INTEL_DSB_2); 1453 } 1454 1455 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1456 hsw_pipe_crc_irq_handler(display, pipe); 1457 1458 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 1459 intel_cpu_fifo_underrun_irq_handler(display, pipe); 1460 1461 fault_errors = iir & gen8_de_pipe_fault_mask(display); 1462 if (fault_errors) 1463 intel_pipe_fault_irq_handler(display, 1464 gen8_pipe_fault_handlers(display), 1465 pipe, fault_errors); 1466 } 1467 1468 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 1469 master_ctl & GEN8_DE_PCH_IRQ) { 1470 u32 pica_iir; 1471 1472 /* 1473 * FIXME(BDW): Assume for now that the new interrupt handling 1474 * scheme also closed the SDE interrupt handling race we've seen 1475 * on older pch-split platforms. But this needs testing. 1476 */ 1477 gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir); 1478 if (iir) { 1479 if (pica_iir) 1480 xelpdp_pica_irq_handler(display, pica_iir); 1481 1482 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1483 icp_irq_handler(display, iir); 1484 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 1485 spt_irq_handler(display, iir); 1486 else 1487 cpt_irq_handler(display, iir); 1488 } else { 1489 /* 1490 * Like on previous PCH there seems to be something 1491 * fishy going on with forwarding PCH interrupts. 1492 */ 1493 drm_dbg(display->drm, 1494 "The master control interrupt lied (SDE)!\n"); 1495 } 1496 } 1497 } 1498 1499 u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) 1500 { 1501 u32 iir; 1502 1503 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1504 return 0; 1505 1506 iir = intel_de_read(display, GEN11_GU_MISC_IIR); 1507 if (likely(iir)) 1508 intel_de_write(display, GEN11_GU_MISC_IIR, iir); 1509 1510 return iir; 1511 } 1512 1513 void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir) 1514 { 1515 if (iir & GEN11_GU_MISC_GSE) 1516 intel_opregion_asle_intr(display); 1517 } 1518 1519 void gen11_display_irq_handler(struct intel_display *display) 1520 { 1521 u32 disp_ctl; 1522 1523 intel_display_rpm_assert_block(display); 1524 /* 1525 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1526 * for the display related bits. 1527 */ 1528 disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL); 1529 1530 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 1531 gen8_de_irq_handler(display, disp_ctl); 1532 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 1533 1534 intel_display_rpm_assert_unblock(display); 1535 } 1536 1537 static void i915gm_irq_cstate_wa_enable(struct intel_display *display) 1538 { 1539 lockdep_assert_held(&display->drm->vblank_time_lock); 1540 1541 /* 1542 * Vblank/CRC interrupts fail to wake the device up from C2+. 1543 * Disabling render clock gating during C-states avoids 1544 * the problem. There is a small power cost so we do this 1545 * only when vblank/CRC interrupts are actually enabled. 1546 */ 1547 if (display->irq.vblank_enabled++ == 0) 1548 intel_de_write(display, SCPD0, 1549 _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1550 } 1551 1552 static void i915gm_irq_cstate_wa_disable(struct intel_display *display) 1553 { 1554 lockdep_assert_held(&display->drm->vblank_time_lock); 1555 1556 if (--display->irq.vblank_enabled == 0) 1557 intel_de_write(display, SCPD0, 1558 _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1559 } 1560 1561 void i915gm_irq_cstate_wa(struct intel_display *display, bool enable) 1562 { 1563 spin_lock_irq(&display->drm->vblank_time_lock); 1564 1565 if (enable) 1566 i915gm_irq_cstate_wa_enable(display); 1567 else 1568 i915gm_irq_cstate_wa_disable(display); 1569 1570 spin_unlock_irq(&display->drm->vblank_time_lock); 1571 } 1572 1573 int i8xx_enable_vblank(struct drm_crtc *crtc) 1574 { 1575 struct intel_display *display = to_intel_display(crtc->dev); 1576 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1577 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1578 unsigned long irqflags; 1579 1580 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1581 i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1582 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1583 1584 return 0; 1585 } 1586 1587 void i8xx_disable_vblank(struct drm_crtc *crtc) 1588 { 1589 struct intel_display *display = to_intel_display(crtc->dev); 1590 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1591 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1592 unsigned long irqflags; 1593 1594 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1595 i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1596 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1597 } 1598 1599 int i915gm_enable_vblank(struct drm_crtc *crtc) 1600 { 1601 struct intel_display *display = to_intel_display(crtc->dev); 1602 1603 i915gm_irq_cstate_wa_enable(display); 1604 1605 return i8xx_enable_vblank(crtc); 1606 } 1607 1608 void i915gm_disable_vblank(struct drm_crtc *crtc) 1609 { 1610 struct intel_display *display = to_intel_display(crtc->dev); 1611 1612 i8xx_disable_vblank(crtc); 1613 1614 i915gm_irq_cstate_wa_disable(display); 1615 } 1616 1617 int i965_enable_vblank(struct drm_crtc *crtc) 1618 { 1619 struct intel_display *display = to_intel_display(crtc->dev); 1620 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1621 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1622 unsigned long irqflags; 1623 1624 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1625 i915_enable_pipestat(display, pipe, 1626 PIPE_START_VBLANK_INTERRUPT_STATUS); 1627 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1628 1629 return 0; 1630 } 1631 1632 void i965_disable_vblank(struct drm_crtc *crtc) 1633 { 1634 struct intel_display *display = to_intel_display(crtc->dev); 1635 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1636 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1637 unsigned long irqflags; 1638 1639 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1640 i915_disable_pipestat(display, pipe, 1641 PIPE_START_VBLANK_INTERRUPT_STATUS); 1642 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1643 } 1644 1645 int ilk_enable_vblank(struct drm_crtc *crtc) 1646 { 1647 struct intel_display *display = to_intel_display(crtc->dev); 1648 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1649 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1650 unsigned long irqflags; 1651 u32 bit = DISPLAY_VER(display) >= 7 ? 1652 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1653 1654 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1655 ilk_enable_display_irq(display, bit); 1656 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1657 1658 /* Even though there is no DMC, frame counter can get stuck when 1659 * PSR is active as no frames are generated. 1660 */ 1661 if (HAS_PSR(display)) 1662 drm_crtc_vblank_restore(crtc); 1663 1664 return 0; 1665 } 1666 1667 void ilk_disable_vblank(struct drm_crtc *crtc) 1668 { 1669 struct intel_display *display = to_intel_display(crtc->dev); 1670 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1671 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1672 unsigned long irqflags; 1673 u32 bit = DISPLAY_VER(display) >= 7 ? 1674 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1675 1676 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1677 ilk_disable_display_irq(display, bit); 1678 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1679 } 1680 1681 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1682 bool enable) 1683 { 1684 struct intel_display *display = to_intel_display(intel_crtc); 1685 enum port port; 1686 1687 if (!(intel_crtc->mode_flags & 1688 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1689 return false; 1690 1691 /* for dual link cases we consider TE from slave */ 1692 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1693 port = PORT_B; 1694 else 1695 port = PORT_A; 1696 1697 intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT); 1698 1699 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1700 1701 return true; 1702 } 1703 1704 static void intel_display_vblank_dc_work(struct work_struct *work) 1705 { 1706 struct intel_display *display = 1707 container_of(work, typeof(*display), irq.vblank_dc_work); 1708 int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes); 1709 1710 /* 1711 * NOTE: intel_display_power_set_target_dc_state is used only by PSR 1712 * code for DC3CO handling. DC3CO target state is currently disabled in 1713 * PSR code. If DC3CO is taken into use we need take that into account 1714 * here as well. 1715 */ 1716 intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE : 1717 DC_STATE_EN_UPTO_DC6); 1718 } 1719 1720 int bdw_enable_vblank(struct drm_crtc *_crtc) 1721 { 1722 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1723 struct intel_display *display = to_intel_display(crtc); 1724 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1725 enum pipe pipe = crtc->pipe; 1726 unsigned long irqflags; 1727 1728 if (gen11_dsi_configure_te(crtc, true)) 1729 return 0; 1730 1731 if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0) 1732 schedule_work(&display->irq.vblank_dc_work); 1733 1734 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1735 bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1736 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1737 1738 /* Even if there is no DMC, frame counter can get stuck when 1739 * PSR is active as no frames are generated, so check only for PSR. 1740 */ 1741 if (HAS_PSR(display)) 1742 drm_crtc_vblank_restore(&crtc->base); 1743 1744 return 0; 1745 } 1746 1747 void bdw_disable_vblank(struct drm_crtc *_crtc) 1748 { 1749 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1750 struct intel_display *display = to_intel_display(crtc); 1751 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1752 enum pipe pipe = crtc->pipe; 1753 unsigned long irqflags; 1754 1755 if (gen11_dsi_configure_te(crtc, false)) 1756 return; 1757 1758 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1759 bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1760 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1761 1762 if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0) 1763 schedule_work(&display->irq.vblank_dc_work); 1764 } 1765 1766 static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe) 1767 { 1768 switch (pipe) { 1769 case PIPE_A: 1770 return SPRITEB_INVALID_GTT_STATUS | 1771 SPRITEA_INVALID_GTT_STATUS | 1772 PLANEA_INVALID_GTT_STATUS | 1773 CURSORA_INVALID_GTT_STATUS; 1774 case PIPE_B: 1775 return SPRITED_INVALID_GTT_STATUS | 1776 SPRITEC_INVALID_GTT_STATUS | 1777 PLANEB_INVALID_GTT_STATUS | 1778 CURSORB_INVALID_GTT_STATUS; 1779 case PIPE_C: 1780 return SPRITEF_INVALID_GTT_STATUS | 1781 SPRITEE_INVALID_GTT_STATUS | 1782 PLANEC_INVALID_GTT_STATUS | 1783 CURSORC_INVALID_GTT_STATUS; 1784 default: 1785 return 0; 1786 } 1787 } 1788 1789 static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = { 1790 { .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1791 { .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1792 { .fault = PLANEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1793 { .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1794 { .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1795 { .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1796 { .fault = PLANEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1797 { .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1798 { .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1799 { .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1800 { .fault = PLANEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1801 { .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1802 {} 1803 }; 1804 1805 static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt) 1806 { 1807 u32 status, enable, tmp; 1808 1809 tmp = intel_de_read(display, DPINVGTT); 1810 1811 enable = tmp >> 16; 1812 status = tmp & 0xffff; 1813 1814 /* 1815 * Despite what the docs claim, the status bits seem to get 1816 * stuck permanently (similar the old PGTBL_ER register), so 1817 * we have to disable and ignore them once set. They do get 1818 * reset if the display power well goes down, so no need to 1819 * track the enable mask explicitly. 1820 */ 1821 *dpinvgtt = status & enable; 1822 enable &= ~status; 1823 1824 /* customary ack+disable then re-enable to guarantee an edge */ 1825 intel_de_write(display, DPINVGTT, status); 1826 intel_de_write(display, DPINVGTT, enable << 16); 1827 } 1828 1829 static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt) 1830 { 1831 enum pipe pipe; 1832 1833 for_each_pipe(display, pipe) { 1834 u32 fault_errors; 1835 1836 fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe); 1837 if (fault_errors) 1838 intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers, 1839 pipe, fault_errors); 1840 } 1841 } 1842 1843 void vlv_display_error_irq_ack(struct intel_display *display, 1844 u32 *eir, u32 *dpinvgtt) 1845 { 1846 u32 emr; 1847 1848 *eir = intel_de_read(display, VLV_EIR); 1849 1850 if (*eir & VLV_ERROR_PAGE_TABLE) 1851 vlv_page_table_error_irq_ack(display, dpinvgtt); 1852 1853 intel_de_write(display, VLV_EIR, *eir); 1854 1855 /* 1856 * Toggle all EMR bits to make sure we get an edge 1857 * in the ISR master error bit if we don't clear 1858 * all the EIR bits. 1859 */ 1860 emr = intel_de_read(display, VLV_EMR); 1861 intel_de_write(display, VLV_EMR, 0xffffffff); 1862 intel_de_write(display, VLV_EMR, emr); 1863 } 1864 1865 void vlv_display_error_irq_handler(struct intel_display *display, 1866 u32 eir, u32 dpinvgtt) 1867 { 1868 drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir); 1869 1870 if (eir & VLV_ERROR_PAGE_TABLE) 1871 vlv_page_table_error_irq_handler(display, dpinvgtt); 1872 } 1873 1874 static void _vlv_display_irq_reset(struct intel_display *display) 1875 { 1876 struct drm_i915_private *dev_priv = to_i915(display->drm); 1877 1878 if (display->platform.cherryview) 1879 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1880 else 1881 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1882 1883 gen2_error_reset(to_intel_uncore(display->drm), 1884 VLV_ERROR_REGS); 1885 1886 i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0); 1887 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1888 1889 i9xx_pipestat_irq_reset(display); 1890 1891 intel_display_irq_regs_reset(display, VLV_IRQ_REGS); 1892 dev_priv->irq_mask = ~0u; 1893 } 1894 1895 void vlv_display_irq_reset(struct intel_display *display) 1896 { 1897 if (display->irq.vlv_display_irqs_enabled) 1898 _vlv_display_irq_reset(display); 1899 } 1900 1901 void i9xx_display_irq_reset(struct intel_display *display) 1902 { 1903 if (HAS_HOTPLUG(display)) { 1904 i915_hotplug_interrupt_update(display, 0xffffffff, 0); 1905 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1906 } 1907 1908 i9xx_pipestat_irq_reset(display); 1909 } 1910 1911 static u32 vlv_error_mask(void) 1912 { 1913 /* TODO enable other errors too? */ 1914 return VLV_ERROR_PAGE_TABLE; 1915 } 1916 1917 void vlv_display_irq_postinstall(struct intel_display *display) 1918 { 1919 struct drm_i915_private *dev_priv = to_i915(display->drm); 1920 u32 pipestat_mask; 1921 u32 enable_mask; 1922 enum pipe pipe; 1923 1924 if (!display->irq.vlv_display_irqs_enabled) 1925 return; 1926 1927 if (display->platform.cherryview) 1928 intel_de_write(display, DPINVGTT, 1929 DPINVGTT_STATUS_MASK_CHV | 1930 DPINVGTT_EN_MASK_CHV); 1931 else 1932 intel_de_write(display, DPINVGTT, 1933 DPINVGTT_STATUS_MASK_VLV | 1934 DPINVGTT_EN_MASK_VLV); 1935 1936 gen2_error_init(to_intel_uncore(display->drm), 1937 VLV_ERROR_REGS, ~vlv_error_mask()); 1938 1939 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 1940 1941 i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1942 for_each_pipe(display, pipe) 1943 i915_enable_pipestat(display, pipe, pipestat_mask); 1944 1945 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 1946 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1947 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1948 I915_LPE_PIPE_A_INTERRUPT | 1949 I915_LPE_PIPE_B_INTERRUPT | 1950 I915_MASTER_ERROR_INTERRUPT; 1951 1952 if (display->platform.cherryview) 1953 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 1954 I915_LPE_PIPE_C_INTERRUPT; 1955 1956 drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u); 1957 1958 dev_priv->irq_mask = ~enable_mask; 1959 1960 intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); 1961 } 1962 1963 void gen8_display_irq_reset(struct intel_display *display) 1964 { 1965 enum pipe pipe; 1966 1967 if (!HAS_DISPLAY(display)) 1968 return; 1969 1970 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 1971 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 1972 1973 for_each_pipe(display, pipe) 1974 if (intel_display_power_is_enabled(display, 1975 POWER_DOMAIN_PIPE(pipe))) 1976 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 1977 1978 intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); 1979 intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); 1980 } 1981 1982 void gen11_display_irq_reset(struct intel_display *display) 1983 { 1984 struct drm_i915_private *dev_priv = to_i915(display->drm); 1985 enum pipe pipe; 1986 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1987 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1988 1989 if (!HAS_DISPLAY(display)) 1990 return; 1991 1992 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 1993 1994 if (DISPLAY_VER(display) >= 12) { 1995 enum transcoder trans; 1996 1997 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 1998 enum intel_display_power_domain domain; 1999 2000 domain = POWER_DOMAIN_TRANSCODER(trans); 2001 if (!intel_display_power_is_enabled(display, domain)) 2002 continue; 2003 2004 intel_de_write(display, 2005 TRANS_PSR_IMR(display, trans), 2006 0xffffffff); 2007 intel_de_write(display, 2008 TRANS_PSR_IIR(display, trans), 2009 0xffffffff); 2010 } 2011 } else { 2012 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 2013 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 2014 } 2015 2016 for_each_pipe(display, pipe) 2017 if (intel_display_power_is_enabled(display, 2018 POWER_DOMAIN_PIPE(pipe))) 2019 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2020 2021 intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); 2022 intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); 2023 2024 if (DISPLAY_VER(display) >= 14) 2025 intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS); 2026 else 2027 intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS); 2028 2029 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2030 intel_display_irq_regs_reset(display, SDE_IRQ_REGS); 2031 } 2032 2033 void gen8_irq_power_well_post_enable(struct intel_display *display, 2034 u8 pipe_mask) 2035 { 2036 struct drm_i915_private *dev_priv = to_i915(display->drm); 2037 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2038 gen8_de_pipe_flip_done_mask(display); 2039 enum pipe pipe; 2040 2041 spin_lock_irq(&dev_priv->irq_lock); 2042 2043 if (!intel_irqs_enabled(dev_priv)) { 2044 spin_unlock_irq(&dev_priv->irq_lock); 2045 return; 2046 } 2047 2048 for_each_pipe_masked(display, pipe, pipe_mask) 2049 intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2050 display->irq.de_irq_mask[pipe], 2051 ~display->irq.de_irq_mask[pipe] | extra_ier); 2052 2053 spin_unlock_irq(&dev_priv->irq_lock); 2054 } 2055 2056 void gen8_irq_power_well_pre_disable(struct intel_display *display, 2057 u8 pipe_mask) 2058 { 2059 struct drm_i915_private *dev_priv = to_i915(display->drm); 2060 enum pipe pipe; 2061 2062 spin_lock_irq(&dev_priv->irq_lock); 2063 2064 if (!intel_irqs_enabled(dev_priv)) { 2065 spin_unlock_irq(&dev_priv->irq_lock); 2066 return; 2067 } 2068 2069 for_each_pipe_masked(display, pipe, pipe_mask) 2070 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2071 2072 spin_unlock_irq(&dev_priv->irq_lock); 2073 2074 /* make sure we're done processing display irqs */ 2075 intel_synchronize_irq(dev_priv); 2076 } 2077 2078 /* 2079 * SDEIER is also touched by the interrupt handler to work around missed PCH 2080 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2081 * instead we unconditionally enable all PCH interrupt sources here, but then 2082 * only unmask them as needed with SDEIMR. 2083 * 2084 * Note that we currently do this after installing the interrupt handler, 2085 * but before we enable the master interrupt. That should be sufficient 2086 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 2087 * interrupts could still race. 2088 */ 2089 static void ibx_irq_postinstall(struct intel_display *display) 2090 { 2091 struct drm_i915_private *dev_priv = to_i915(display->drm); 2092 u32 mask; 2093 2094 if (HAS_PCH_NOP(dev_priv)) 2095 return; 2096 2097 if (HAS_PCH_IBX(dev_priv)) 2098 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2099 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2100 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2101 else 2102 mask = SDE_GMBUS_CPT; 2103 2104 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2105 } 2106 2107 void valleyview_enable_display_irqs(struct intel_display *display) 2108 { 2109 struct drm_i915_private *dev_priv = to_i915(display->drm); 2110 2111 lockdep_assert_held(&dev_priv->irq_lock); 2112 2113 if (display->irq.vlv_display_irqs_enabled) 2114 return; 2115 2116 display->irq.vlv_display_irqs_enabled = true; 2117 2118 if (intel_irqs_enabled(dev_priv)) { 2119 _vlv_display_irq_reset(display); 2120 vlv_display_irq_postinstall(display); 2121 } 2122 } 2123 2124 void valleyview_disable_display_irqs(struct intel_display *display) 2125 { 2126 struct drm_i915_private *dev_priv = to_i915(display->drm); 2127 2128 lockdep_assert_held(&dev_priv->irq_lock); 2129 2130 if (!display->irq.vlv_display_irqs_enabled) 2131 return; 2132 2133 display->irq.vlv_display_irqs_enabled = false; 2134 2135 if (intel_irqs_enabled(dev_priv)) 2136 _vlv_display_irq_reset(display); 2137 } 2138 2139 void ilk_de_irq_postinstall(struct intel_display *display) 2140 { 2141 struct drm_i915_private *i915 = to_i915(display->drm); 2142 2143 u32 display_mask, extra_mask; 2144 2145 if (DISPLAY_VER(display) >= 7) { 2146 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2147 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 2148 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2149 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 2150 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 2151 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 2152 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 2153 DE_DP_A_HOTPLUG_IVB); 2154 } else { 2155 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | 2156 DE_PCH_EVENT | DE_GTT_FAULT | 2157 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 2158 DE_PIPEA_CRC_DONE | DE_POISON); 2159 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 2160 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 2161 DE_PLANE_FLIP_DONE(PLANE_A) | 2162 DE_PLANE_FLIP_DONE(PLANE_B) | 2163 DE_DP_A_HOTPLUG); 2164 } 2165 2166 if (display->platform.haswell) { 2167 intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); 2168 display_mask |= DE_EDP_PSR_INT_HSW; 2169 } 2170 2171 if (display->platform.ironlake && display->platform.mobile) 2172 extra_mask |= DE_PCU_EVENT; 2173 2174 i915->irq_mask = ~display_mask; 2175 2176 ibx_irq_postinstall(display); 2177 2178 intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask, 2179 display_mask | extra_mask); 2180 } 2181 2182 static void mtp_irq_postinstall(struct intel_display *display); 2183 static void icp_irq_postinstall(struct intel_display *display); 2184 2185 void gen8_de_irq_postinstall(struct intel_display *display) 2186 { 2187 struct drm_i915_private *dev_priv = to_i915(display->drm); 2188 2189 u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) | 2190 GEN8_PIPE_CDCLK_CRC_DONE; 2191 u32 de_pipe_enables; 2192 u32 de_port_masked = gen8_de_port_aux_mask(display); 2193 u32 de_port_enables; 2194 u32 de_misc_masked = GEN8_DE_EDP_PSR; 2195 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2196 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2197 enum pipe pipe; 2198 2199 if (!HAS_DISPLAY(display)) 2200 return; 2201 2202 if (DISPLAY_VER(display) >= 14) 2203 mtp_irq_postinstall(display); 2204 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2205 icp_irq_postinstall(display); 2206 else if (HAS_PCH_SPLIT(dev_priv)) 2207 ibx_irq_postinstall(display); 2208 2209 if (DISPLAY_VER(display) < 11) 2210 de_misc_masked |= GEN8_DE_MISC_GSE; 2211 2212 if (display->platform.geminilake || display->platform.broxton) 2213 de_port_masked |= BXT_DE_PORT_GMBUS; 2214 2215 if (DISPLAY_VER(display) >= 14) { 2216 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 2217 XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; 2218 } else if (DISPLAY_VER(display) >= 11) { 2219 enum port port; 2220 2221 if (intel_bios_is_dsi_present(display, &port)) 2222 de_port_masked |= DSI0_TE | DSI1_TE; 2223 } 2224 2225 if (HAS_DBUF_OVERLAP_DETECTION(display)) 2226 de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; 2227 2228 if (HAS_DSB(display)) 2229 de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | 2230 GEN12_DSB_INT(INTEL_DSB_1) | 2231 GEN12_DSB_INT(INTEL_DSB_2); 2232 2233 de_pipe_enables = de_pipe_masked | 2234 GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2235 gen8_de_pipe_flip_done_mask(display); 2236 2237 de_port_enables = de_port_masked; 2238 if (display->platform.geminilake || display->platform.broxton) 2239 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 2240 else if (display->platform.broadwell) 2241 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 2242 2243 if (DISPLAY_VER(display) >= 12) { 2244 enum transcoder trans; 2245 2246 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 2247 enum intel_display_power_domain domain; 2248 2249 domain = POWER_DOMAIN_TRANSCODER(trans); 2250 if (!intel_display_power_is_enabled(display, domain)) 2251 continue; 2252 2253 intel_display_irq_regs_assert_irr_is_zero(display, 2254 TRANS_PSR_IIR(display, trans)); 2255 } 2256 } else { 2257 intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); 2258 } 2259 2260 for_each_pipe(display, pipe) { 2261 display->irq.de_irq_mask[pipe] = ~de_pipe_masked; 2262 2263 if (intel_display_power_is_enabled(display, 2264 POWER_DOMAIN_PIPE(pipe))) 2265 intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2266 display->irq.de_irq_mask[pipe], 2267 de_pipe_enables); 2268 } 2269 2270 intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, 2271 de_port_enables); 2272 intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, 2273 de_misc_masked); 2274 2275 if (IS_DISPLAY_VER(display, 11, 13)) { 2276 u32 de_hpd_masked = 0; 2277 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 2278 GEN11_DE_TBT_HOTPLUG_MASK; 2279 2280 intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, 2281 de_hpd_enables); 2282 } 2283 } 2284 2285 static void mtp_irq_postinstall(struct intel_display *display) 2286 { 2287 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 2288 u32 de_hpd_mask = XELPDP_AUX_TC_MASK; 2289 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 2290 XELPDP_TBT_HOTPLUG_MASK; 2291 2292 intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, 2293 de_hpd_enables); 2294 2295 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); 2296 } 2297 2298 static void icp_irq_postinstall(struct intel_display *display) 2299 { 2300 u32 mask = SDE_GMBUS_ICP; 2301 2302 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2303 } 2304 2305 void gen11_de_irq_postinstall(struct intel_display *display) 2306 { 2307 if (!HAS_DISPLAY(display)) 2308 return; 2309 2310 gen8_de_irq_postinstall(display); 2311 2312 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2313 } 2314 2315 void dg1_de_irq_postinstall(struct intel_display *display) 2316 { 2317 if (!HAS_DISPLAY(display)) 2318 return; 2319 2320 gen8_de_irq_postinstall(display); 2321 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2322 } 2323 2324 void intel_display_irq_init(struct intel_display *display) 2325 { 2326 display->drm->vblank_disable_immediate = true; 2327 2328 intel_hotplug_irq_init(display); 2329 2330 INIT_WORK(&display->irq.vblank_dc_work, intel_display_vblank_dc_work); 2331 } 2332