1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <drm/drm_vblank.h> 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "i915_reg.h" 11 #include "icl_dsi_regs.h" 12 #include "intel_crtc.h" 13 #include "intel_de.h" 14 #include "intel_display_irq.h" 15 #include "intel_display_regs.h" 16 #include "intel_display_rpm.h" 17 #include "intel_display_rps.h" 18 #include "intel_display_trace.h" 19 #include "intel_display_types.h" 20 #include "intel_dmc.h" 21 #include "intel_dmc_wl.h" 22 #include "intel_dp_aux.h" 23 #include "intel_dsb.h" 24 #include "intel_fdi_regs.h" 25 #include "intel_fifo_underrun.h" 26 #include "intel_gmbus.h" 27 #include "intel_hotplug_irq.h" 28 #include "intel_pipe_crc_regs.h" 29 #include "intel_plane.h" 30 #include "intel_pmdemand.h" 31 #include "intel_psr.h" 32 #include "intel_psr_regs.h" 33 #include "intel_uncore.h" 34 35 static void 36 intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs, 37 u32 imr_val, u32 ier_val) 38 { 39 intel_dmc_wl_get(display, regs.imr); 40 intel_dmc_wl_get(display, regs.ier); 41 intel_dmc_wl_get(display, regs.iir); 42 43 gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val); 44 45 intel_dmc_wl_put(display, regs.iir); 46 intel_dmc_wl_put(display, regs.ier); 47 intel_dmc_wl_put(display, regs.imr); 48 } 49 50 static void 51 intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs) 52 { 53 intel_dmc_wl_get(display, regs.imr); 54 intel_dmc_wl_get(display, regs.ier); 55 intel_dmc_wl_get(display, regs.iir); 56 57 gen2_irq_reset(to_intel_uncore(display->drm), regs); 58 59 intel_dmc_wl_put(display, regs.iir); 60 intel_dmc_wl_put(display, regs.ier); 61 intel_dmc_wl_put(display, regs.imr); 62 } 63 64 static void 65 intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg) 66 { 67 intel_dmc_wl_get(display, reg); 68 69 gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg); 70 71 intel_dmc_wl_put(display, reg); 72 } 73 74 struct pipe_fault_handler { 75 bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id); 76 u32 fault; 77 enum plane_id plane_id; 78 }; 79 80 static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id) 81 { 82 struct intel_display *display = to_intel_display(crtc); 83 struct intel_plane_error error = {}; 84 struct intel_plane *plane; 85 86 plane = intel_crtc_get_plane(crtc, plane_id); 87 if (!plane || !plane->capture_error) 88 return false; 89 90 plane->capture_error(crtc, plane, &error); 91 92 drm_err_ratelimited(display->drm, 93 "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n", 94 crtc->base.base.id, crtc->base.name, 95 plane->base.base.id, plane->base.name, 96 error.ctl, error.surf, error.surflive); 97 98 return true; 99 } 100 101 static void intel_pipe_fault_irq_handler(struct intel_display *display, 102 const struct pipe_fault_handler *handlers, 103 enum pipe pipe, u32 fault_errors) 104 { 105 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 106 const struct pipe_fault_handler *handler; 107 108 for (handler = handlers; handler && handler->fault; handler++) { 109 if ((fault_errors & handler->fault) == 0) 110 continue; 111 112 if (handler->handle(crtc, handler->plane_id)) 113 fault_errors &= ~handler->fault; 114 } 115 116 WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n", 117 crtc->base.base.id, crtc->base.name, fault_errors); 118 } 119 120 static void 121 intel_handle_vblank(struct intel_display *display, enum pipe pipe) 122 { 123 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 124 125 drm_crtc_handle_vblank(&crtc->base); 126 } 127 128 /** 129 * ilk_update_display_irq - update DEIMR 130 * @display: display device 131 * @interrupt_mask: mask of interrupt bits to update 132 * @enabled_irq_mask: mask of interrupt bits to enable 133 */ 134 void ilk_update_display_irq(struct intel_display *display, 135 u32 interrupt_mask, u32 enabled_irq_mask) 136 { 137 struct drm_i915_private *dev_priv = to_i915(display->drm); 138 u32 new_val; 139 140 lockdep_assert_held(&display->irq.lock); 141 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 142 143 new_val = dev_priv->irq_mask; 144 new_val &= ~interrupt_mask; 145 new_val |= (~enabled_irq_mask & interrupt_mask); 146 147 if (new_val != dev_priv->irq_mask && 148 !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) { 149 dev_priv->irq_mask = new_val; 150 intel_de_write(display, DEIMR, dev_priv->irq_mask); 151 intel_de_posting_read(display, DEIMR); 152 } 153 } 154 155 void ilk_enable_display_irq(struct intel_display *display, u32 bits) 156 { 157 ilk_update_display_irq(display, bits, bits); 158 } 159 160 void ilk_disable_display_irq(struct intel_display *display, u32 bits) 161 { 162 ilk_update_display_irq(display, bits, 0); 163 } 164 165 /** 166 * bdw_update_port_irq - update DE port interrupt 167 * @display: display device 168 * @interrupt_mask: mask of interrupt bits to update 169 * @enabled_irq_mask: mask of interrupt bits to enable 170 */ 171 void bdw_update_port_irq(struct intel_display *display, 172 u32 interrupt_mask, u32 enabled_irq_mask) 173 { 174 struct drm_i915_private *dev_priv = to_i915(display->drm); 175 u32 new_val; 176 u32 old_val; 177 178 lockdep_assert_held(&display->irq.lock); 179 180 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 181 182 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 183 return; 184 185 old_val = intel_de_read(display, GEN8_DE_PORT_IMR); 186 187 new_val = old_val; 188 new_val &= ~interrupt_mask; 189 new_val |= (~enabled_irq_mask & interrupt_mask); 190 191 if (new_val != old_val) { 192 intel_de_write(display, GEN8_DE_PORT_IMR, new_val); 193 intel_de_posting_read(display, GEN8_DE_PORT_IMR); 194 } 195 } 196 197 /** 198 * bdw_update_pipe_irq - update DE pipe interrupt 199 * @display: display device 200 * @pipe: pipe whose interrupt to update 201 * @interrupt_mask: mask of interrupt bits to update 202 * @enabled_irq_mask: mask of interrupt bits to enable 203 */ 204 static void bdw_update_pipe_irq(struct intel_display *display, 205 enum pipe pipe, u32 interrupt_mask, 206 u32 enabled_irq_mask) 207 { 208 struct drm_i915_private *dev_priv = to_i915(display->drm); 209 u32 new_val; 210 211 lockdep_assert_held(&display->irq.lock); 212 213 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 214 215 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 216 return; 217 218 new_val = display->irq.de_irq_mask[pipe]; 219 new_val &= ~interrupt_mask; 220 new_val |= (~enabled_irq_mask & interrupt_mask); 221 222 if (new_val != display->irq.de_irq_mask[pipe]) { 223 display->irq.de_irq_mask[pipe] = new_val; 224 intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]); 225 intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); 226 } 227 } 228 229 void bdw_enable_pipe_irq(struct intel_display *display, 230 enum pipe pipe, u32 bits) 231 { 232 bdw_update_pipe_irq(display, pipe, bits, bits); 233 } 234 235 void bdw_disable_pipe_irq(struct intel_display *display, 236 enum pipe pipe, u32 bits) 237 { 238 bdw_update_pipe_irq(display, pipe, bits, 0); 239 } 240 241 /** 242 * ibx_display_interrupt_update - update SDEIMR 243 * @display: display device 244 * @interrupt_mask: mask of interrupt bits to update 245 * @enabled_irq_mask: mask of interrupt bits to enable 246 */ 247 void ibx_display_interrupt_update(struct intel_display *display, 248 u32 interrupt_mask, 249 u32 enabled_irq_mask) 250 { 251 struct drm_i915_private *dev_priv = to_i915(display->drm); 252 u32 sdeimr = intel_de_read(display, SDEIMR); 253 254 sdeimr &= ~interrupt_mask; 255 sdeimr |= (~enabled_irq_mask & interrupt_mask); 256 257 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 258 259 lockdep_assert_held(&display->irq.lock); 260 261 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 262 return; 263 264 intel_de_write(display, SDEIMR, sdeimr); 265 intel_de_posting_read(display, SDEIMR); 266 } 267 268 void ibx_enable_display_interrupt(struct intel_display *display, u32 bits) 269 { 270 ibx_display_interrupt_update(display, bits, bits); 271 } 272 273 void ibx_disable_display_interrupt(struct intel_display *display, u32 bits) 274 { 275 ibx_display_interrupt_update(display, bits, 0); 276 } 277 278 u32 i915_pipestat_enable_mask(struct intel_display *display, 279 enum pipe pipe) 280 { 281 u32 status_mask = display->irq.pipestat_irq_mask[pipe]; 282 u32 enable_mask = status_mask << 16; 283 284 lockdep_assert_held(&display->irq.lock); 285 286 if (DISPLAY_VER(display) < 5) 287 goto out; 288 289 /* 290 * On pipe A we don't support the PSR interrupt yet, 291 * on pipe B and C the same bit MBZ. 292 */ 293 if (drm_WARN_ON_ONCE(display->drm, 294 status_mask & PIPE_A_PSR_STATUS_VLV)) 295 return 0; 296 /* 297 * On pipe B and C we don't support the PSR interrupt yet, on pipe 298 * A the same bit is for perf counters which we don't use either. 299 */ 300 if (drm_WARN_ON_ONCE(display->drm, 301 status_mask & PIPE_B_PSR_STATUS_VLV)) 302 return 0; 303 304 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 305 SPRITE0_FLIP_DONE_INT_EN_VLV | 306 SPRITE1_FLIP_DONE_INT_EN_VLV); 307 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 308 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 309 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 310 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 311 312 out: 313 drm_WARN_ONCE(display->drm, 314 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 315 status_mask & ~PIPESTAT_INT_STATUS_MASK, 316 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 317 pipe_name(pipe), enable_mask, status_mask); 318 319 return enable_mask; 320 } 321 322 void i915_enable_pipestat(struct intel_display *display, 323 enum pipe pipe, u32 status_mask) 324 { 325 struct drm_i915_private *dev_priv = to_i915(display->drm); 326 i915_reg_t reg = PIPESTAT(display, pipe); 327 u32 enable_mask; 328 329 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 330 "pipe %c: status_mask=0x%x\n", 331 pipe_name(pipe), status_mask); 332 333 lockdep_assert_held(&display->irq.lock); 334 drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); 335 336 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) 337 return; 338 339 display->irq.pipestat_irq_mask[pipe] |= status_mask; 340 enable_mask = i915_pipestat_enable_mask(display, pipe); 341 342 intel_de_write(display, reg, enable_mask | status_mask); 343 intel_de_posting_read(display, reg); 344 } 345 346 void i915_disable_pipestat(struct intel_display *display, 347 enum pipe pipe, u32 status_mask) 348 { 349 struct drm_i915_private *dev_priv = to_i915(display->drm); 350 i915_reg_t reg = PIPESTAT(display, pipe); 351 u32 enable_mask; 352 353 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 354 "pipe %c: status_mask=0x%x\n", 355 pipe_name(pipe), status_mask); 356 357 lockdep_assert_held(&display->irq.lock); 358 drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); 359 360 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0) 361 return; 362 363 display->irq.pipestat_irq_mask[pipe] &= ~status_mask; 364 enable_mask = i915_pipestat_enable_mask(display, pipe); 365 366 intel_de_write(display, reg, enable_mask | status_mask); 367 intel_de_posting_read(display, reg); 368 } 369 370 static bool i915_has_legacy_blc_interrupt(struct intel_display *display) 371 { 372 if (display->platform.i85x) 373 return true; 374 375 if (display->platform.pineview) 376 return true; 377 378 return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile; 379 } 380 381 /* enable ASLE pipestat for OpRegion */ 382 static void i915_enable_asle_pipestat(struct intel_display *display) 383 { 384 if (!intel_opregion_asle_present(display)) 385 return; 386 387 if (!i915_has_legacy_blc_interrupt(display)) 388 return; 389 390 spin_lock_irq(&display->irq.lock); 391 392 i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 393 if (DISPLAY_VER(display) >= 4) 394 i915_enable_pipestat(display, PIPE_A, 395 PIPE_LEGACY_BLC_EVENT_STATUS); 396 397 spin_unlock_irq(&display->irq.lock); 398 } 399 400 #if IS_ENABLED(CONFIG_DEBUG_FS) 401 static void display_pipe_crc_irq_handler(struct intel_display *display, 402 enum pipe pipe, 403 u32 crc0, u32 crc1, 404 u32 crc2, u32 crc3, 405 u32 crc4) 406 { 407 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 408 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 409 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 410 411 trace_intel_pipe_crc(crtc, crcs); 412 413 spin_lock(&pipe_crc->lock); 414 /* 415 * For some not yet identified reason, the first CRC is 416 * bonkers. So let's just wait for the next vblank and read 417 * out the buggy result. 418 * 419 * On GEN8+ sometimes the second CRC is bonkers as well, so 420 * don't trust that one either. 421 */ 422 if (pipe_crc->skipped <= 0 || 423 (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) { 424 pipe_crc->skipped++; 425 spin_unlock(&pipe_crc->lock); 426 return; 427 } 428 spin_unlock(&pipe_crc->lock); 429 430 drm_crtc_add_crc_entry(&crtc->base, true, 431 drm_crtc_accurate_vblank_count(&crtc->base), 432 crcs); 433 } 434 #else 435 static inline void 436 display_pipe_crc_irq_handler(struct intel_display *display, 437 enum pipe pipe, 438 u32 crc0, u32 crc1, 439 u32 crc2, u32 crc3, 440 u32 crc4) {} 441 #endif 442 443 static void flip_done_handler(struct intel_display *display, 444 enum pipe pipe) 445 { 446 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 447 448 spin_lock(&display->drm->event_lock); 449 450 if (crtc->flip_done_event) { 451 trace_intel_crtc_flip_done(crtc); 452 drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); 453 crtc->flip_done_event = NULL; 454 } 455 456 spin_unlock(&display->drm->event_lock); 457 } 458 459 static void hsw_pipe_crc_irq_handler(struct intel_display *display, 460 enum pipe pipe) 461 { 462 display_pipe_crc_irq_handler(display, pipe, 463 intel_de_read(display, PIPE_CRC_RES_HSW(pipe)), 464 0, 0, 0, 0); 465 } 466 467 static void ivb_pipe_crc_irq_handler(struct intel_display *display, 468 enum pipe pipe) 469 { 470 display_pipe_crc_irq_handler(display, pipe, 471 intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)), 472 intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)), 473 intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)), 474 intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)), 475 intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe))); 476 } 477 478 static void i9xx_pipe_crc_irq_handler(struct intel_display *display, 479 enum pipe pipe) 480 { 481 u32 res1, res2; 482 483 if (DISPLAY_VER(display) >= 3) 484 res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe)); 485 else 486 res1 = 0; 487 488 if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 489 res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe)); 490 else 491 res2 = 0; 492 493 display_pipe_crc_irq_handler(display, pipe, 494 intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)), 495 intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)), 496 intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)), 497 res1, res2); 498 } 499 500 static void i9xx_pipestat_irq_reset(struct intel_display *display) 501 { 502 enum pipe pipe; 503 504 for_each_pipe(display, pipe) { 505 intel_de_write(display, 506 PIPESTAT(display, pipe), 507 PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); 508 509 display->irq.pipestat_irq_mask[pipe] = 0; 510 } 511 } 512 513 void i9xx_pipestat_irq_ack(struct intel_display *display, 514 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 515 { 516 enum pipe pipe; 517 518 spin_lock(&display->irq.lock); 519 520 if ((display->platform.valleyview || display->platform.cherryview) && 521 !display->irq.vlv_display_irqs_enabled) { 522 spin_unlock(&display->irq.lock); 523 return; 524 } 525 526 for_each_pipe(display, pipe) { 527 i915_reg_t reg; 528 u32 status_mask, enable_mask, iir_bit = 0; 529 530 /* 531 * PIPESTAT bits get signalled even when the interrupt is 532 * disabled with the mask bits, and some of the status bits do 533 * not generate interrupts at all (like the underrun bit). Hence 534 * we need to be careful that we only handle what we want to 535 * handle. 536 */ 537 538 /* fifo underruns are filterered in the underrun handler. */ 539 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 540 541 switch (pipe) { 542 default: 543 case PIPE_A: 544 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 545 break; 546 case PIPE_B: 547 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 548 break; 549 case PIPE_C: 550 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 551 break; 552 } 553 if (iir & iir_bit) 554 status_mask |= display->irq.pipestat_irq_mask[pipe]; 555 556 if (!status_mask) 557 continue; 558 559 reg = PIPESTAT(display, pipe); 560 pipe_stats[pipe] = intel_de_read(display, reg) & status_mask; 561 enable_mask = i915_pipestat_enable_mask(display, pipe); 562 563 /* 564 * Clear the PIPE*STAT regs before the IIR 565 * 566 * Toggle the enable bits to make sure we get an 567 * edge in the ISR pipe event bit if we don't clear 568 * all the enabled status bits. Otherwise the edge 569 * triggered IIR on i965/g4x wouldn't notice that 570 * an interrupt is still pending. 571 */ 572 if (pipe_stats[pipe]) { 573 intel_de_write(display, reg, pipe_stats[pipe]); 574 intel_de_write(display, reg, enable_mask); 575 } 576 } 577 spin_unlock(&display->irq.lock); 578 } 579 580 void i915_pipestat_irq_handler(struct intel_display *display, 581 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 582 { 583 bool blc_event = false; 584 enum pipe pipe; 585 586 for_each_pipe(display, pipe) { 587 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 588 intel_handle_vblank(display, pipe); 589 590 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 591 blc_event = true; 592 593 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 594 i9xx_pipe_crc_irq_handler(display, pipe); 595 596 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 597 intel_cpu_fifo_underrun_irq_handler(display, pipe); 598 } 599 600 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 601 intel_opregion_asle_intr(display); 602 } 603 604 void i965_pipestat_irq_handler(struct intel_display *display, 605 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 606 { 607 bool blc_event = false; 608 enum pipe pipe; 609 610 for_each_pipe(display, pipe) { 611 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 612 intel_handle_vblank(display, pipe); 613 614 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 615 blc_event = true; 616 617 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 618 i9xx_pipe_crc_irq_handler(display, pipe); 619 620 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 621 intel_cpu_fifo_underrun_irq_handler(display, pipe); 622 } 623 624 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 625 intel_opregion_asle_intr(display); 626 627 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 628 intel_gmbus_irq_handler(display); 629 } 630 631 void valleyview_pipestat_irq_handler(struct intel_display *display, 632 u32 pipe_stats[I915_MAX_PIPES]) 633 { 634 enum pipe pipe; 635 636 for_each_pipe(display, pipe) { 637 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 638 intel_handle_vblank(display, pipe); 639 640 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 641 flip_done_handler(display, pipe); 642 643 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 644 i9xx_pipe_crc_irq_handler(display, pipe); 645 646 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 647 intel_cpu_fifo_underrun_irq_handler(display, pipe); 648 } 649 650 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 651 intel_gmbus_irq_handler(display); 652 } 653 654 static void ibx_irq_handler(struct intel_display *display, u32 pch_iir) 655 { 656 enum pipe pipe; 657 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 658 659 ibx_hpd_irq_handler(display, hotplug_trigger); 660 661 if (pch_iir & SDE_AUDIO_POWER_MASK) { 662 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 663 SDE_AUDIO_POWER_SHIFT); 664 drm_dbg(display->drm, "PCH audio power change on port %d\n", 665 port_name(port)); 666 } 667 668 if (pch_iir & SDE_AUX_MASK) 669 intel_dp_aux_irq_handler(display); 670 671 if (pch_iir & SDE_GMBUS) 672 intel_gmbus_irq_handler(display); 673 674 if (pch_iir & SDE_AUDIO_HDCP_MASK) 675 drm_dbg(display->drm, "PCH HDCP audio interrupt\n"); 676 677 if (pch_iir & SDE_AUDIO_TRANS_MASK) 678 drm_dbg(display->drm, "PCH transcoder audio interrupt\n"); 679 680 if (pch_iir & SDE_POISON) 681 drm_err(display->drm, "PCH poison interrupt\n"); 682 683 if (pch_iir & SDE_FDI_MASK) { 684 for_each_pipe(display, pipe) 685 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 686 pipe_name(pipe), 687 intel_de_read(display, FDI_RX_IIR(pipe))); 688 } 689 690 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 691 drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n"); 692 693 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 694 drm_dbg(display->drm, 695 "PCH transcoder CRC error interrupt\n"); 696 697 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 698 intel_pch_fifo_underrun_irq_handler(display, PIPE_A); 699 700 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 701 intel_pch_fifo_underrun_irq_handler(display, PIPE_B); 702 } 703 704 static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe) 705 { 706 switch (pipe) { 707 case PIPE_A: 708 return ERR_INT_SPRITE_A_FAULT | 709 ERR_INT_PRIMARY_A_FAULT | 710 ERR_INT_CURSOR_A_FAULT; 711 case PIPE_B: 712 return ERR_INT_SPRITE_B_FAULT | 713 ERR_INT_PRIMARY_B_FAULT | 714 ERR_INT_CURSOR_B_FAULT; 715 case PIPE_C: 716 return ERR_INT_SPRITE_C_FAULT | 717 ERR_INT_PRIMARY_C_FAULT | 718 ERR_INT_CURSOR_C_FAULT; 719 default: 720 return 0; 721 } 722 } 723 724 static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = { 725 { .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 726 { .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 727 { .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 728 { .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 729 { .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 730 { .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 731 { .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 732 { .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 733 { .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 734 {} 735 }; 736 737 static void ivb_err_int_handler(struct intel_display *display) 738 { 739 u32 err_int = intel_de_read(display, GEN7_ERR_INT); 740 enum pipe pipe; 741 742 if (err_int & ERR_INT_POISON) 743 drm_err(display->drm, "Poison interrupt\n"); 744 745 if (err_int & ERR_INT_INVALID_GTT_PTE) 746 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 747 748 if (err_int & ERR_INT_INVALID_PTE_DATA) 749 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 750 751 for_each_pipe(display, pipe) { 752 u32 fault_errors; 753 754 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 755 intel_cpu_fifo_underrun_irq_handler(display, pipe); 756 757 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 758 if (display->platform.ivybridge) 759 ivb_pipe_crc_irq_handler(display, pipe); 760 else 761 hsw_pipe_crc_irq_handler(display, pipe); 762 } 763 764 fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe); 765 if (fault_errors) 766 intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers, 767 pipe, fault_errors); 768 } 769 770 intel_de_write(display, GEN7_ERR_INT, err_int); 771 } 772 773 static void cpt_serr_int_handler(struct intel_display *display) 774 { 775 u32 serr_int = intel_de_read(display, SERR_INT); 776 enum pipe pipe; 777 778 if (serr_int & SERR_INT_POISON) 779 drm_err(display->drm, "PCH poison interrupt\n"); 780 781 for_each_pipe(display, pipe) 782 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 783 intel_pch_fifo_underrun_irq_handler(display, pipe); 784 785 intel_de_write(display, SERR_INT, serr_int); 786 } 787 788 static void cpt_irq_handler(struct intel_display *display, u32 pch_iir) 789 { 790 enum pipe pipe; 791 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 792 793 ibx_hpd_irq_handler(display, hotplug_trigger); 794 795 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 796 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 797 SDE_AUDIO_POWER_SHIFT_CPT); 798 drm_dbg(display->drm, "PCH audio power change on port %c\n", 799 port_name(port)); 800 } 801 802 if (pch_iir & SDE_AUX_MASK_CPT) 803 intel_dp_aux_irq_handler(display); 804 805 if (pch_iir & SDE_GMBUS_CPT) 806 intel_gmbus_irq_handler(display); 807 808 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 809 drm_dbg(display->drm, "Audio CP request interrupt\n"); 810 811 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 812 drm_dbg(display->drm, "Audio CP change interrupt\n"); 813 814 if (pch_iir & SDE_FDI_MASK_CPT) { 815 for_each_pipe(display, pipe) 816 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 817 pipe_name(pipe), 818 intel_de_read(display, FDI_RX_IIR(pipe))); 819 } 820 821 if (pch_iir & SDE_ERROR_CPT) 822 cpt_serr_int_handler(display); 823 } 824 825 static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe) 826 { 827 switch (pipe) { 828 case PIPE_A: 829 return GTT_FAULT_SPRITE_A_FAULT | 830 GTT_FAULT_PRIMARY_A_FAULT | 831 GTT_FAULT_CURSOR_A_FAULT; 832 case PIPE_B: 833 return GTT_FAULT_SPRITE_B_FAULT | 834 GTT_FAULT_PRIMARY_B_FAULT | 835 GTT_FAULT_CURSOR_B_FAULT; 836 default: 837 return 0; 838 } 839 } 840 841 static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = { 842 { .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 843 { .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 844 { .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 845 { .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 846 { .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 847 { .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 848 {} 849 }; 850 851 static void ilk_gtt_fault_irq_handler(struct intel_display *display) 852 { 853 enum pipe pipe; 854 u32 gtt_fault; 855 856 gtt_fault = intel_de_read(display, ILK_GTT_FAULT); 857 intel_de_write(display, ILK_GTT_FAULT, gtt_fault); 858 859 if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE) 860 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 861 862 if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA) 863 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 864 865 for_each_pipe(display, pipe) { 866 u32 fault_errors; 867 868 fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe); 869 if (fault_errors) 870 intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers, 871 pipe, fault_errors); 872 } 873 } 874 875 void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) 876 { 877 enum pipe pipe; 878 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 879 880 if (hotplug_trigger) 881 ilk_hpd_irq_handler(display, hotplug_trigger); 882 883 if (de_iir & DE_AUX_CHANNEL_A) 884 intel_dp_aux_irq_handler(display); 885 886 if (de_iir & DE_GSE) 887 intel_opregion_asle_intr(display); 888 889 if (de_iir & DE_POISON) 890 drm_err(display->drm, "Poison interrupt\n"); 891 892 if (de_iir & DE_GTT_FAULT) 893 ilk_gtt_fault_irq_handler(display); 894 895 for_each_pipe(display, pipe) { 896 if (de_iir & DE_PIPE_VBLANK(pipe)) 897 intel_handle_vblank(display, pipe); 898 899 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 900 flip_done_handler(display, pipe); 901 902 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 903 intel_cpu_fifo_underrun_irq_handler(display, pipe); 904 905 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 906 i9xx_pipe_crc_irq_handler(display, pipe); 907 } 908 909 /* check event from PCH */ 910 if (de_iir & DE_PCH_EVENT) { 911 u32 pch_iir = intel_de_read(display, SDEIIR); 912 913 if (HAS_PCH_CPT(display)) 914 cpt_irq_handler(display, pch_iir); 915 else 916 ibx_irq_handler(display, pch_iir); 917 918 /* should clear PCH hotplug event before clear CPU irq */ 919 intel_de_write(display, SDEIIR, pch_iir); 920 } 921 922 if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT) 923 ilk_display_rps_irq_handler(display); 924 } 925 926 void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) 927 { 928 enum pipe pipe; 929 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 930 931 if (hotplug_trigger) 932 ilk_hpd_irq_handler(display, hotplug_trigger); 933 934 if (de_iir & DE_ERR_INT_IVB) 935 ivb_err_int_handler(display); 936 937 if (de_iir & DE_EDP_PSR_INT_HSW) { 938 struct intel_encoder *encoder; 939 940 for_each_intel_encoder_with_psr(display->drm, encoder) { 941 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 942 u32 psr_iir; 943 944 psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0); 945 intel_psr_irq_handler(intel_dp, psr_iir); 946 break; 947 } 948 } 949 950 if (de_iir & DE_AUX_CHANNEL_A_IVB) 951 intel_dp_aux_irq_handler(display); 952 953 if (de_iir & DE_GSE_IVB) 954 intel_opregion_asle_intr(display); 955 956 for_each_pipe(display, pipe) { 957 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 958 intel_handle_vblank(display, pipe); 959 960 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 961 flip_done_handler(display, pipe); 962 } 963 964 /* check event from PCH */ 965 if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) { 966 u32 pch_iir = intel_de_read(display, SDEIIR); 967 968 cpt_irq_handler(display, pch_iir); 969 970 /* clear PCH hotplug event before clear CPU irq */ 971 intel_de_write(display, SDEIIR, pch_iir); 972 } 973 } 974 975 static u32 gen8_de_port_aux_mask(struct intel_display *display) 976 { 977 u32 mask; 978 979 if (DISPLAY_VER(display) >= 20) 980 return 0; 981 else if (DISPLAY_VER(display) >= 14) 982 return TGL_DE_PORT_AUX_DDIA | 983 TGL_DE_PORT_AUX_DDIB; 984 else if (DISPLAY_VER(display) >= 13) 985 return TGL_DE_PORT_AUX_DDIA | 986 TGL_DE_PORT_AUX_DDIB | 987 TGL_DE_PORT_AUX_DDIC | 988 XELPD_DE_PORT_AUX_DDID | 989 XELPD_DE_PORT_AUX_DDIE | 990 TGL_DE_PORT_AUX_USBC1 | 991 TGL_DE_PORT_AUX_USBC2 | 992 TGL_DE_PORT_AUX_USBC3 | 993 TGL_DE_PORT_AUX_USBC4; 994 else if (DISPLAY_VER(display) >= 12) 995 return TGL_DE_PORT_AUX_DDIA | 996 TGL_DE_PORT_AUX_DDIB | 997 TGL_DE_PORT_AUX_DDIC | 998 TGL_DE_PORT_AUX_USBC1 | 999 TGL_DE_PORT_AUX_USBC2 | 1000 TGL_DE_PORT_AUX_USBC3 | 1001 TGL_DE_PORT_AUX_USBC4 | 1002 TGL_DE_PORT_AUX_USBC5 | 1003 TGL_DE_PORT_AUX_USBC6; 1004 1005 mask = GEN8_AUX_CHANNEL_A; 1006 if (DISPLAY_VER(display) >= 9) 1007 mask |= GEN9_AUX_CHANNEL_B | 1008 GEN9_AUX_CHANNEL_C | 1009 GEN9_AUX_CHANNEL_D; 1010 1011 if (DISPLAY_VER(display) == 11) { 1012 mask |= ICL_AUX_CHANNEL_F; 1013 mask |= ICL_AUX_CHANNEL_E; 1014 } 1015 1016 return mask; 1017 } 1018 1019 static u32 gen8_de_pipe_fault_mask(struct intel_display *display) 1020 { 1021 if (DISPLAY_VER(display) >= 20) 1022 return MTL_PLANE_ATS_FAULT | 1023 GEN9_PIPE_CURSOR_FAULT | 1024 GEN11_PIPE_PLANE5_FAULT | 1025 GEN9_PIPE_PLANE4_FAULT | 1026 GEN9_PIPE_PLANE3_FAULT | 1027 GEN9_PIPE_PLANE2_FAULT | 1028 GEN9_PIPE_PLANE1_FAULT; 1029 else if (DISPLAY_VER(display) >= 14) 1030 return MTL_PIPEDMC_ATS_FAULT | 1031 MTL_PLANE_ATS_FAULT | 1032 GEN12_PIPEDMC_FAULT | 1033 GEN9_PIPE_CURSOR_FAULT | 1034 GEN11_PIPE_PLANE5_FAULT | 1035 GEN9_PIPE_PLANE4_FAULT | 1036 GEN9_PIPE_PLANE3_FAULT | 1037 GEN9_PIPE_PLANE2_FAULT | 1038 GEN9_PIPE_PLANE1_FAULT; 1039 else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display)) 1040 return GEN12_PIPEDMC_FAULT | 1041 GEN9_PIPE_CURSOR_FAULT | 1042 GEN11_PIPE_PLANE5_FAULT | 1043 GEN9_PIPE_PLANE4_FAULT | 1044 GEN9_PIPE_PLANE3_FAULT | 1045 GEN9_PIPE_PLANE2_FAULT | 1046 GEN9_PIPE_PLANE1_FAULT; 1047 else if (DISPLAY_VER(display) == 12) 1048 return GEN12_PIPEDMC_FAULT | 1049 GEN9_PIPE_CURSOR_FAULT | 1050 GEN11_PIPE_PLANE7_FAULT | 1051 GEN11_PIPE_PLANE6_FAULT | 1052 GEN11_PIPE_PLANE5_FAULT | 1053 GEN9_PIPE_PLANE4_FAULT | 1054 GEN9_PIPE_PLANE3_FAULT | 1055 GEN9_PIPE_PLANE2_FAULT | 1056 GEN9_PIPE_PLANE1_FAULT; 1057 else if (DISPLAY_VER(display) == 11) 1058 return GEN9_PIPE_CURSOR_FAULT | 1059 GEN11_PIPE_PLANE7_FAULT | 1060 GEN11_PIPE_PLANE6_FAULT | 1061 GEN11_PIPE_PLANE5_FAULT | 1062 GEN9_PIPE_PLANE4_FAULT | 1063 GEN9_PIPE_PLANE3_FAULT | 1064 GEN9_PIPE_PLANE2_FAULT | 1065 GEN9_PIPE_PLANE1_FAULT; 1066 else if (DISPLAY_VER(display) >= 9) 1067 return GEN9_PIPE_CURSOR_FAULT | 1068 GEN9_PIPE_PLANE4_FAULT | 1069 GEN9_PIPE_PLANE3_FAULT | 1070 GEN9_PIPE_PLANE2_FAULT | 1071 GEN9_PIPE_PLANE1_FAULT; 1072 else 1073 return GEN8_PIPE_CURSOR_FAULT | 1074 GEN8_PIPE_SPRITE_FAULT | 1075 GEN8_PIPE_PRIMARY_FAULT; 1076 } 1077 1078 static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1079 { 1080 struct intel_display *display = to_intel_display(crtc); 1081 1082 drm_err_ratelimited(display->drm, 1083 "[CRTC:%d:%s] PLANE ATS fault\n", 1084 crtc->base.base.id, crtc->base.name); 1085 1086 return true; 1087 } 1088 1089 static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1090 { 1091 struct intel_display *display = to_intel_display(crtc); 1092 1093 drm_err_ratelimited(display->drm, 1094 "[CRTC:%d:%s] PIPEDMC ATS fault\n", 1095 crtc->base.base.id, crtc->base.name); 1096 1097 return true; 1098 } 1099 1100 static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1101 { 1102 struct intel_display *display = to_intel_display(crtc); 1103 1104 drm_err_ratelimited(display->drm, 1105 "[CRTC:%d:%s] PIPEDMC fault\n", 1106 crtc->base.base.id, crtc->base.name); 1107 1108 return true; 1109 } 1110 1111 static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = { 1112 { .fault = MTL_PLANE_ATS_FAULT, .handle = handle_plane_ats_fault, }, 1113 { .fault = MTL_PIPEDMC_ATS_FAULT, .handle = handle_pipedmc_ats_fault, }, 1114 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1115 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1116 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1117 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1118 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1119 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1120 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1121 {} 1122 }; 1123 1124 static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = { 1125 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1126 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1127 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1128 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1129 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1130 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1131 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1132 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1133 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1134 {} 1135 }; 1136 1137 static const struct pipe_fault_handler icl_pipe_fault_handlers[] = { 1138 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1139 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1140 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1141 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1142 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1143 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1144 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1145 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1146 {} 1147 }; 1148 1149 static const struct pipe_fault_handler skl_pipe_fault_handlers[] = { 1150 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1151 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1152 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1153 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1154 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1155 {} 1156 }; 1157 1158 static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = { 1159 { .fault = GEN8_PIPE_SPRITE_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1160 { .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1161 { .fault = GEN8_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1162 {} 1163 }; 1164 1165 static const struct pipe_fault_handler * 1166 gen8_pipe_fault_handlers(struct intel_display *display) 1167 { 1168 if (DISPLAY_VER(display) >= 14) 1169 return mtl_pipe_fault_handlers; 1170 else if (DISPLAY_VER(display) >= 12) 1171 return tgl_pipe_fault_handlers; 1172 else if (DISPLAY_VER(display) >= 11) 1173 return icl_pipe_fault_handlers; 1174 else if (DISPLAY_VER(display) >= 9) 1175 return skl_pipe_fault_handlers; 1176 else 1177 return bdw_pipe_fault_handlers; 1178 } 1179 1180 static void intel_pmdemand_irq_handler(struct intel_display *display) 1181 { 1182 wake_up_all(&display->pmdemand.waitqueue); 1183 } 1184 1185 static void 1186 gen8_de_misc_irq_handler(struct intel_display *display, u32 iir) 1187 { 1188 bool found = false; 1189 1190 if (HAS_DBUF_OVERLAP_DETECTION(display)) { 1191 if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) { 1192 drm_warn(display->drm, "DBuf overlap detected\n"); 1193 found = true; 1194 } 1195 } 1196 1197 if (DISPLAY_VER(display) >= 14) { 1198 if (iir & (XELPDP_PMDEMAND_RSP | 1199 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 1200 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 1201 drm_dbg(display->drm, 1202 "Error waiting for Punit PM Demand Response\n"); 1203 1204 intel_pmdemand_irq_handler(display); 1205 found = true; 1206 } 1207 1208 if (iir & XELPDP_RM_TIMEOUT) { 1209 u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE); 1210 drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val); 1211 found = true; 1212 } 1213 } else if (iir & GEN8_DE_MISC_GSE) { 1214 intel_opregion_asle_intr(display); 1215 found = true; 1216 } 1217 1218 if (iir & GEN8_DE_EDP_PSR) { 1219 struct intel_encoder *encoder; 1220 u32 psr_iir; 1221 i915_reg_t iir_reg; 1222 1223 for_each_intel_encoder_with_psr(display->drm, encoder) { 1224 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1225 1226 if (DISPLAY_VER(display) >= 12) 1227 iir_reg = TRANS_PSR_IIR(display, 1228 intel_dp->psr.transcoder); 1229 else 1230 iir_reg = EDP_PSR_IIR; 1231 1232 psr_iir = intel_de_rmw(display, iir_reg, 0, 0); 1233 1234 if (psr_iir) 1235 found = true; 1236 1237 intel_psr_irq_handler(intel_dp, psr_iir); 1238 1239 /* prior GEN12 only have one EDP PSR */ 1240 if (DISPLAY_VER(display) < 12) 1241 break; 1242 } 1243 } 1244 1245 if (!found) 1246 drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); 1247 } 1248 1249 static void gen11_dsi_te_interrupt_handler(struct intel_display *display, 1250 u32 te_trigger) 1251 { 1252 enum pipe pipe = INVALID_PIPE; 1253 enum transcoder dsi_trans; 1254 enum port port; 1255 u32 val; 1256 1257 /* 1258 * Incase of dual link, TE comes from DSI_1 1259 * this is to check if dual link is enabled 1260 */ 1261 val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0)); 1262 val &= PORT_SYNC_MODE_ENABLE; 1263 1264 /* 1265 * if dual link is enabled, then read DSI_0 1266 * transcoder registers 1267 */ 1268 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 1269 PORT_A : PORT_B; 1270 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 1271 1272 /* Check if DSI configured in command mode */ 1273 val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); 1274 val = val & OP_MODE_MASK; 1275 1276 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 1277 drm_err(display->drm, "DSI trancoder not configured in command mode\n"); 1278 return; 1279 } 1280 1281 /* Get PIPE for handling VBLANK event */ 1282 val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans)); 1283 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 1284 case TRANS_DDI_EDP_INPUT_A_ON: 1285 pipe = PIPE_A; 1286 break; 1287 case TRANS_DDI_EDP_INPUT_B_ONOFF: 1288 pipe = PIPE_B; 1289 break; 1290 case TRANS_DDI_EDP_INPUT_C_ONOFF: 1291 pipe = PIPE_C; 1292 break; 1293 default: 1294 drm_err(display->drm, "Invalid PIPE\n"); 1295 return; 1296 } 1297 1298 intel_handle_vblank(display, pipe); 1299 1300 /* clear TE in dsi IIR */ 1301 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 1302 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1303 } 1304 1305 static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display) 1306 { 1307 if (DISPLAY_VER(display) >= 9) 1308 return GEN9_PIPE_PLANE1_FLIP_DONE; 1309 else 1310 return GEN8_PIPE_PRIMARY_FLIP_DONE; 1311 } 1312 1313 static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir) 1314 { 1315 u32 pica_ier = 0; 1316 1317 *pica_iir = 0; 1318 *pch_iir = intel_de_read(display, SDEIIR); 1319 if (!*pch_iir) 1320 return; 1321 1322 /** 1323 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 1324 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 1325 * their flags both in the PICA and SDE IIR. 1326 */ 1327 if (*pch_iir & SDE_PICAINTERRUPT) { 1328 drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL); 1329 1330 pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0); 1331 *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR); 1332 intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir); 1333 } 1334 1335 intel_de_write(display, SDEIIR, *pch_iir); 1336 1337 if (pica_ier) 1338 intel_de_write(display, PICAINTERRUPT_IER, pica_ier); 1339 } 1340 1341 void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) 1342 { 1343 u32 iir; 1344 enum pipe pipe; 1345 1346 drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display)); 1347 1348 if (master_ctl & GEN8_DE_MISC_IRQ) { 1349 iir = intel_de_read(display, GEN8_DE_MISC_IIR); 1350 if (iir) { 1351 intel_de_write(display, GEN8_DE_MISC_IIR, iir); 1352 gen8_de_misc_irq_handler(display, iir); 1353 } else { 1354 drm_err_ratelimited(display->drm, 1355 "The master control interrupt lied (DE MISC)!\n"); 1356 } 1357 } 1358 1359 if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1360 iir = intel_de_read(display, GEN11_DE_HPD_IIR); 1361 if (iir) { 1362 intel_de_write(display, GEN11_DE_HPD_IIR, iir); 1363 gen11_hpd_irq_handler(display, iir); 1364 } else { 1365 drm_err_ratelimited(display->drm, 1366 "The master control interrupt lied, (DE HPD)!\n"); 1367 } 1368 } 1369 1370 if (master_ctl & GEN8_DE_PORT_IRQ) { 1371 iir = intel_de_read(display, GEN8_DE_PORT_IIR); 1372 if (iir) { 1373 bool found = false; 1374 1375 intel_de_write(display, GEN8_DE_PORT_IIR, iir); 1376 1377 if (iir & gen8_de_port_aux_mask(display)) { 1378 intel_dp_aux_irq_handler(display); 1379 found = true; 1380 } 1381 1382 if (display->platform.geminilake || display->platform.broxton) { 1383 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1384 1385 if (hotplug_trigger) { 1386 bxt_hpd_irq_handler(display, hotplug_trigger); 1387 found = true; 1388 } 1389 } else if (display->platform.broadwell) { 1390 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1391 1392 if (hotplug_trigger) { 1393 ilk_hpd_irq_handler(display, hotplug_trigger); 1394 found = true; 1395 } 1396 } 1397 1398 if ((display->platform.geminilake || display->platform.broxton) && 1399 (iir & BXT_DE_PORT_GMBUS)) { 1400 intel_gmbus_irq_handler(display); 1401 found = true; 1402 } 1403 1404 if (DISPLAY_VER(display) >= 11) { 1405 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1406 1407 if (te_trigger) { 1408 gen11_dsi_te_interrupt_handler(display, te_trigger); 1409 found = true; 1410 } 1411 } 1412 1413 if (!found) 1414 drm_err_ratelimited(display->drm, 1415 "Unexpected DE Port interrupt\n"); 1416 } else { 1417 drm_err_ratelimited(display->drm, 1418 "The master control interrupt lied (DE PORT)!\n"); 1419 } 1420 } 1421 1422 for_each_pipe(display, pipe) { 1423 u32 fault_errors; 1424 1425 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1426 continue; 1427 1428 iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe)); 1429 if (!iir) { 1430 drm_err_ratelimited(display->drm, 1431 "The master control interrupt lied (DE PIPE %c)!\n", 1432 pipe_name(pipe)); 1433 continue; 1434 } 1435 1436 intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir); 1437 1438 if (iir & GEN8_PIPE_VBLANK) 1439 intel_handle_vblank(display, pipe); 1440 1441 if (iir & gen8_de_pipe_flip_done_mask(display)) 1442 flip_done_handler(display, pipe); 1443 1444 if (HAS_DSB(display)) { 1445 if (iir & GEN12_DSB_INT(INTEL_DSB_0)) 1446 intel_dsb_irq_handler(display, pipe, INTEL_DSB_0); 1447 1448 if (iir & GEN12_DSB_INT(INTEL_DSB_1)) 1449 intel_dsb_irq_handler(display, pipe, INTEL_DSB_1); 1450 1451 if (iir & GEN12_DSB_INT(INTEL_DSB_2)) 1452 intel_dsb_irq_handler(display, pipe, INTEL_DSB_2); 1453 } 1454 1455 if (HAS_PIPEDMC(display) && iir & GEN12_PIPEDMC_INTERRUPT) 1456 intel_pipedmc_irq_handler(display, pipe); 1457 1458 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1459 hsw_pipe_crc_irq_handler(display, pipe); 1460 1461 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 1462 intel_cpu_fifo_underrun_irq_handler(display, pipe); 1463 1464 fault_errors = iir & gen8_de_pipe_fault_mask(display); 1465 if (fault_errors) 1466 intel_pipe_fault_irq_handler(display, 1467 gen8_pipe_fault_handlers(display), 1468 pipe, fault_errors); 1469 } 1470 1471 if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) && 1472 master_ctl & GEN8_DE_PCH_IRQ) { 1473 u32 pica_iir; 1474 1475 /* 1476 * FIXME(BDW): Assume for now that the new interrupt handling 1477 * scheme also closed the SDE interrupt handling race we've seen 1478 * on older pch-split platforms. But this needs testing. 1479 */ 1480 gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir); 1481 if (iir) { 1482 if (pica_iir) 1483 xelpdp_pica_irq_handler(display, pica_iir); 1484 1485 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 1486 icp_irq_handler(display, iir); 1487 else if (INTEL_PCH_TYPE(display) >= PCH_SPT) 1488 spt_irq_handler(display, iir); 1489 else 1490 cpt_irq_handler(display, iir); 1491 } else { 1492 /* 1493 * Like on previous PCH there seems to be something 1494 * fishy going on with forwarding PCH interrupts. 1495 */ 1496 drm_dbg(display->drm, 1497 "The master control interrupt lied (SDE)!\n"); 1498 } 1499 } 1500 } 1501 1502 u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) 1503 { 1504 u32 iir; 1505 1506 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1507 return 0; 1508 1509 intel_display_rpm_assert_block(display); 1510 1511 iir = intel_de_read(display, GEN11_GU_MISC_IIR); 1512 if (likely(iir)) 1513 intel_de_write(display, GEN11_GU_MISC_IIR, iir); 1514 1515 intel_display_rpm_assert_unblock(display); 1516 1517 return iir; 1518 } 1519 1520 void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir) 1521 { 1522 if (iir & GEN11_GU_MISC_GSE) 1523 intel_opregion_asle_intr(display); 1524 } 1525 1526 void gen11_display_irq_handler(struct intel_display *display) 1527 { 1528 u32 disp_ctl; 1529 1530 intel_display_rpm_assert_block(display); 1531 /* 1532 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1533 * for the display related bits. 1534 */ 1535 disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL); 1536 1537 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 1538 gen8_de_irq_handler(display, disp_ctl); 1539 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 1540 1541 intel_display_rpm_assert_unblock(display); 1542 } 1543 1544 static void i915gm_irq_cstate_wa_enable(struct intel_display *display) 1545 { 1546 lockdep_assert_held(&display->drm->vblank_time_lock); 1547 1548 /* 1549 * Vblank/CRC interrupts fail to wake the device up from C2+. 1550 * Disabling render clock gating during C-states avoids 1551 * the problem. There is a small power cost so we do this 1552 * only when vblank/CRC interrupts are actually enabled. 1553 */ 1554 if (display->irq.vblank_enabled++ == 0) 1555 intel_de_write(display, SCPD0, 1556 _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1557 } 1558 1559 static void i915gm_irq_cstate_wa_disable(struct intel_display *display) 1560 { 1561 lockdep_assert_held(&display->drm->vblank_time_lock); 1562 1563 if (--display->irq.vblank_enabled == 0) 1564 intel_de_write(display, SCPD0, 1565 _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1566 } 1567 1568 void i915gm_irq_cstate_wa(struct intel_display *display, bool enable) 1569 { 1570 spin_lock_irq(&display->drm->vblank_time_lock); 1571 1572 if (enable) 1573 i915gm_irq_cstate_wa_enable(display); 1574 else 1575 i915gm_irq_cstate_wa_disable(display); 1576 1577 spin_unlock_irq(&display->drm->vblank_time_lock); 1578 } 1579 1580 int i8xx_enable_vblank(struct drm_crtc *crtc) 1581 { 1582 struct intel_display *display = to_intel_display(crtc->dev); 1583 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1584 unsigned long irqflags; 1585 1586 spin_lock_irqsave(&display->irq.lock, irqflags); 1587 i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1588 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1589 1590 return 0; 1591 } 1592 1593 void i8xx_disable_vblank(struct drm_crtc *crtc) 1594 { 1595 struct intel_display *display = to_intel_display(crtc->dev); 1596 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1597 unsigned long irqflags; 1598 1599 spin_lock_irqsave(&display->irq.lock, irqflags); 1600 i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1601 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1602 } 1603 1604 int i915gm_enable_vblank(struct drm_crtc *crtc) 1605 { 1606 struct intel_display *display = to_intel_display(crtc->dev); 1607 1608 i915gm_irq_cstate_wa_enable(display); 1609 1610 return i8xx_enable_vblank(crtc); 1611 } 1612 1613 void i915gm_disable_vblank(struct drm_crtc *crtc) 1614 { 1615 struct intel_display *display = to_intel_display(crtc->dev); 1616 1617 i8xx_disable_vblank(crtc); 1618 1619 i915gm_irq_cstate_wa_disable(display); 1620 } 1621 1622 int i965_enable_vblank(struct drm_crtc *crtc) 1623 { 1624 struct intel_display *display = to_intel_display(crtc->dev); 1625 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1626 unsigned long irqflags; 1627 1628 spin_lock_irqsave(&display->irq.lock, irqflags); 1629 i915_enable_pipestat(display, pipe, 1630 PIPE_START_VBLANK_INTERRUPT_STATUS); 1631 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1632 1633 return 0; 1634 } 1635 1636 void i965_disable_vblank(struct drm_crtc *crtc) 1637 { 1638 struct intel_display *display = to_intel_display(crtc->dev); 1639 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1640 unsigned long irqflags; 1641 1642 spin_lock_irqsave(&display->irq.lock, irqflags); 1643 i915_disable_pipestat(display, pipe, 1644 PIPE_START_VBLANK_INTERRUPT_STATUS); 1645 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1646 } 1647 1648 int ilk_enable_vblank(struct drm_crtc *crtc) 1649 { 1650 struct intel_display *display = to_intel_display(crtc->dev); 1651 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1652 unsigned long irqflags; 1653 u32 bit = DISPLAY_VER(display) >= 7 ? 1654 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1655 1656 spin_lock_irqsave(&display->irq.lock, irqflags); 1657 ilk_enable_display_irq(display, bit); 1658 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1659 1660 /* Even though there is no DMC, frame counter can get stuck when 1661 * PSR is active as no frames are generated. 1662 */ 1663 if (HAS_PSR(display)) 1664 drm_crtc_vblank_restore(crtc); 1665 1666 return 0; 1667 } 1668 1669 void ilk_disable_vblank(struct drm_crtc *crtc) 1670 { 1671 struct intel_display *display = to_intel_display(crtc->dev); 1672 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1673 unsigned long irqflags; 1674 u32 bit = DISPLAY_VER(display) >= 7 ? 1675 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1676 1677 spin_lock_irqsave(&display->irq.lock, irqflags); 1678 ilk_disable_display_irq(display, bit); 1679 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1680 } 1681 1682 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1683 bool enable) 1684 { 1685 struct intel_display *display = to_intel_display(intel_crtc); 1686 enum port port; 1687 1688 if (!(intel_crtc->mode_flags & 1689 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1690 return false; 1691 1692 /* for dual link cases we consider TE from slave */ 1693 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1694 port = PORT_B; 1695 else 1696 port = PORT_A; 1697 1698 intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT); 1699 1700 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1701 1702 return true; 1703 } 1704 1705 static void intel_display_vblank_notify_work(struct work_struct *work) 1706 { 1707 struct intel_display *display = 1708 container_of(work, typeof(*display), irq.vblank_notify_work); 1709 int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count); 1710 1711 intel_psr_notify_vblank_enable_disable(display, vblank_enable_count); 1712 } 1713 1714 int bdw_enable_vblank(struct drm_crtc *_crtc) 1715 { 1716 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1717 struct intel_display *display = to_intel_display(crtc); 1718 enum pipe pipe = crtc->pipe; 1719 unsigned long irqflags; 1720 1721 if (gen11_dsi_configure_te(crtc, true)) 1722 return 0; 1723 1724 if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0) 1725 schedule_work(&display->irq.vblank_notify_work); 1726 1727 spin_lock_irqsave(&display->irq.lock, irqflags); 1728 bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1729 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1730 1731 /* Even if there is no DMC, frame counter can get stuck when 1732 * PSR is active as no frames are generated, so check only for PSR. 1733 */ 1734 if (HAS_PSR(display)) 1735 drm_crtc_vblank_restore(&crtc->base); 1736 1737 return 0; 1738 } 1739 1740 void bdw_disable_vblank(struct drm_crtc *_crtc) 1741 { 1742 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1743 struct intel_display *display = to_intel_display(crtc); 1744 enum pipe pipe = crtc->pipe; 1745 unsigned long irqflags; 1746 1747 if (gen11_dsi_configure_te(crtc, false)) 1748 return; 1749 1750 spin_lock_irqsave(&display->irq.lock, irqflags); 1751 bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1752 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1753 1754 if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0) 1755 schedule_work(&display->irq.vblank_notify_work); 1756 } 1757 1758 static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe) 1759 { 1760 switch (pipe) { 1761 case PIPE_A: 1762 return SPRITEB_INVALID_GTT_STATUS | 1763 SPRITEA_INVALID_GTT_STATUS | 1764 PLANEA_INVALID_GTT_STATUS | 1765 CURSORA_INVALID_GTT_STATUS; 1766 case PIPE_B: 1767 return SPRITED_INVALID_GTT_STATUS | 1768 SPRITEC_INVALID_GTT_STATUS | 1769 PLANEB_INVALID_GTT_STATUS | 1770 CURSORB_INVALID_GTT_STATUS; 1771 case PIPE_C: 1772 return SPRITEF_INVALID_GTT_STATUS | 1773 SPRITEE_INVALID_GTT_STATUS | 1774 PLANEC_INVALID_GTT_STATUS | 1775 CURSORC_INVALID_GTT_STATUS; 1776 default: 1777 return 0; 1778 } 1779 } 1780 1781 static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = { 1782 { .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1783 { .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1784 { .fault = PLANEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1785 { .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1786 { .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1787 { .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1788 { .fault = PLANEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1789 { .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1790 { .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1791 { .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1792 { .fault = PLANEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1793 { .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1794 {} 1795 }; 1796 1797 static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt) 1798 { 1799 u32 status, enable, tmp; 1800 1801 tmp = intel_de_read(display, DPINVGTT); 1802 1803 enable = tmp >> 16; 1804 status = tmp & 0xffff; 1805 1806 /* 1807 * Despite what the docs claim, the status bits seem to get 1808 * stuck permanently (similar the old PGTBL_ER register), so 1809 * we have to disable and ignore them once set. They do get 1810 * reset if the display power well goes down, so no need to 1811 * track the enable mask explicitly. 1812 */ 1813 *dpinvgtt = status & enable; 1814 enable &= ~status; 1815 1816 /* customary ack+disable then re-enable to guarantee an edge */ 1817 intel_de_write(display, DPINVGTT, status); 1818 intel_de_write(display, DPINVGTT, enable << 16); 1819 } 1820 1821 static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt) 1822 { 1823 enum pipe pipe; 1824 1825 for_each_pipe(display, pipe) { 1826 u32 fault_errors; 1827 1828 fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe); 1829 if (fault_errors) 1830 intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers, 1831 pipe, fault_errors); 1832 } 1833 } 1834 1835 void vlv_display_error_irq_ack(struct intel_display *display, 1836 u32 *eir, u32 *dpinvgtt) 1837 { 1838 u32 emr; 1839 1840 *eir = intel_de_read(display, VLV_EIR); 1841 1842 if (*eir & VLV_ERROR_PAGE_TABLE) 1843 vlv_page_table_error_irq_ack(display, dpinvgtt); 1844 1845 intel_de_write(display, VLV_EIR, *eir); 1846 1847 /* 1848 * Toggle all EMR bits to make sure we get an edge 1849 * in the ISR master error bit if we don't clear 1850 * all the EIR bits. 1851 */ 1852 emr = intel_de_read(display, VLV_EMR); 1853 intel_de_write(display, VLV_EMR, 0xffffffff); 1854 intel_de_write(display, VLV_EMR, emr); 1855 } 1856 1857 void vlv_display_error_irq_handler(struct intel_display *display, 1858 u32 eir, u32 dpinvgtt) 1859 { 1860 drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir); 1861 1862 if (eir & VLV_ERROR_PAGE_TABLE) 1863 vlv_page_table_error_irq_handler(display, dpinvgtt); 1864 } 1865 1866 static void _vlv_display_irq_reset(struct intel_display *display) 1867 { 1868 struct drm_i915_private *dev_priv = to_i915(display->drm); 1869 1870 if (display->platform.cherryview) 1871 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1872 else 1873 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1874 1875 gen2_error_reset(to_intel_uncore(display->drm), 1876 VLV_ERROR_REGS); 1877 1878 i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0); 1879 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1880 1881 i9xx_pipestat_irq_reset(display); 1882 1883 intel_display_irq_regs_reset(display, VLV_IRQ_REGS); 1884 dev_priv->irq_mask = ~0u; 1885 } 1886 1887 void vlv_display_irq_reset(struct intel_display *display) 1888 { 1889 spin_lock_irq(&display->irq.lock); 1890 if (display->irq.vlv_display_irqs_enabled) 1891 _vlv_display_irq_reset(display); 1892 spin_unlock_irq(&display->irq.lock); 1893 } 1894 1895 void i9xx_display_irq_reset(struct intel_display *display) 1896 { 1897 if (HAS_HOTPLUG(display)) { 1898 i915_hotplug_interrupt_update(display, 0xffffffff, 0); 1899 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1900 } 1901 1902 i9xx_pipestat_irq_reset(display); 1903 } 1904 1905 void i915_display_irq_postinstall(struct intel_display *display) 1906 { 1907 /* 1908 * Interrupt setup is already guaranteed to be single-threaded, this is 1909 * just to make the assert_spin_locked check happy. 1910 */ 1911 spin_lock_irq(&display->irq.lock); 1912 i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 1913 i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 1914 spin_unlock_irq(&display->irq.lock); 1915 1916 i915_enable_asle_pipestat(display); 1917 } 1918 1919 void i965_display_irq_postinstall(struct intel_display *display) 1920 { 1921 /* 1922 * Interrupt setup is already guaranteed to be single-threaded, this is 1923 * just to make the assert_spin_locked check happy. 1924 */ 1925 spin_lock_irq(&display->irq.lock); 1926 i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1927 i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 1928 i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 1929 spin_unlock_irq(&display->irq.lock); 1930 1931 i915_enable_asle_pipestat(display); 1932 } 1933 1934 static u32 vlv_error_mask(void) 1935 { 1936 /* TODO enable other errors too? */ 1937 return VLV_ERROR_PAGE_TABLE; 1938 } 1939 1940 static void _vlv_display_irq_postinstall(struct intel_display *display) 1941 { 1942 struct drm_i915_private *dev_priv = to_i915(display->drm); 1943 u32 pipestat_mask; 1944 u32 enable_mask; 1945 enum pipe pipe; 1946 1947 if (display->platform.cherryview) 1948 intel_de_write(display, DPINVGTT, 1949 DPINVGTT_STATUS_MASK_CHV | 1950 DPINVGTT_EN_MASK_CHV); 1951 else 1952 intel_de_write(display, DPINVGTT, 1953 DPINVGTT_STATUS_MASK_VLV | 1954 DPINVGTT_EN_MASK_VLV); 1955 1956 gen2_error_init(to_intel_uncore(display->drm), 1957 VLV_ERROR_REGS, ~vlv_error_mask()); 1958 1959 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 1960 1961 i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1962 for_each_pipe(display, pipe) 1963 i915_enable_pipestat(display, pipe, pipestat_mask); 1964 1965 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 1966 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1967 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1968 I915_LPE_PIPE_A_INTERRUPT | 1969 I915_LPE_PIPE_B_INTERRUPT | 1970 I915_MASTER_ERROR_INTERRUPT; 1971 1972 if (display->platform.cherryview) 1973 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 1974 I915_LPE_PIPE_C_INTERRUPT; 1975 1976 drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u); 1977 1978 dev_priv->irq_mask = ~enable_mask; 1979 1980 intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); 1981 } 1982 1983 void vlv_display_irq_postinstall(struct intel_display *display) 1984 { 1985 spin_lock_irq(&display->irq.lock); 1986 if (display->irq.vlv_display_irqs_enabled) 1987 _vlv_display_irq_postinstall(display); 1988 spin_unlock_irq(&display->irq.lock); 1989 } 1990 1991 void ibx_display_irq_reset(struct intel_display *display) 1992 { 1993 struct drm_i915_private *i915 = to_i915(display->drm); 1994 1995 if (HAS_PCH_NOP(i915)) 1996 return; 1997 1998 gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS); 1999 2000 if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915)) 2001 intel_de_write(display, SERR_INT, 0xffffffff); 2002 } 2003 2004 void gen8_display_irq_reset(struct intel_display *display) 2005 { 2006 struct drm_i915_private *i915 = to_i915(display->drm); 2007 enum pipe pipe; 2008 2009 if (!HAS_DISPLAY(display)) 2010 return; 2011 2012 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 2013 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 2014 2015 for_each_pipe(display, pipe) 2016 if (intel_display_power_is_enabled(display, 2017 POWER_DOMAIN_PIPE(pipe))) 2018 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2019 2020 intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); 2021 intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); 2022 2023 if (HAS_PCH_SPLIT(i915)) 2024 ibx_display_irq_reset(display); 2025 } 2026 2027 void gen11_display_irq_reset(struct intel_display *display) 2028 { 2029 enum pipe pipe; 2030 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2031 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2032 2033 if (!HAS_DISPLAY(display)) 2034 return; 2035 2036 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 2037 2038 if (DISPLAY_VER(display) >= 12) { 2039 enum transcoder trans; 2040 2041 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 2042 enum intel_display_power_domain domain; 2043 2044 domain = POWER_DOMAIN_TRANSCODER(trans); 2045 if (!intel_display_power_is_enabled(display, domain)) 2046 continue; 2047 2048 intel_de_write(display, 2049 TRANS_PSR_IMR(display, trans), 2050 0xffffffff); 2051 intel_de_write(display, 2052 TRANS_PSR_IIR(display, trans), 2053 0xffffffff); 2054 } 2055 } else { 2056 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 2057 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 2058 } 2059 2060 for_each_pipe(display, pipe) 2061 if (intel_display_power_is_enabled(display, 2062 POWER_DOMAIN_PIPE(pipe))) 2063 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2064 2065 intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); 2066 intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); 2067 2068 if (DISPLAY_VER(display) >= 14) 2069 intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS); 2070 else 2071 intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS); 2072 2073 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 2074 intel_display_irq_regs_reset(display, SDE_IRQ_REGS); 2075 } 2076 2077 void gen8_irq_power_well_post_enable(struct intel_display *display, 2078 u8 pipe_mask) 2079 { 2080 struct drm_i915_private *dev_priv = to_i915(display->drm); 2081 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2082 gen8_de_pipe_flip_done_mask(display); 2083 enum pipe pipe; 2084 2085 spin_lock_irq(&display->irq.lock); 2086 2087 if (!intel_irqs_enabled(dev_priv)) { 2088 spin_unlock_irq(&display->irq.lock); 2089 return; 2090 } 2091 2092 for_each_pipe_masked(display, pipe, pipe_mask) 2093 intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2094 display->irq.de_irq_mask[pipe], 2095 ~display->irq.de_irq_mask[pipe] | extra_ier); 2096 2097 spin_unlock_irq(&display->irq.lock); 2098 } 2099 2100 void gen8_irq_power_well_pre_disable(struct intel_display *display, 2101 u8 pipe_mask) 2102 { 2103 struct drm_i915_private *dev_priv = to_i915(display->drm); 2104 enum pipe pipe; 2105 2106 spin_lock_irq(&display->irq.lock); 2107 2108 if (!intel_irqs_enabled(dev_priv)) { 2109 spin_unlock_irq(&display->irq.lock); 2110 return; 2111 } 2112 2113 for_each_pipe_masked(display, pipe, pipe_mask) 2114 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2115 2116 spin_unlock_irq(&display->irq.lock); 2117 2118 /* make sure we're done processing display irqs */ 2119 intel_synchronize_irq(dev_priv); 2120 } 2121 2122 /* 2123 * SDEIER is also touched by the interrupt handler to work around missed PCH 2124 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2125 * instead we unconditionally enable all PCH interrupt sources here, but then 2126 * only unmask them as needed with SDEIMR. 2127 * 2128 * Note that we currently do this after installing the interrupt handler, 2129 * but before we enable the master interrupt. That should be sufficient 2130 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 2131 * interrupts could still race. 2132 */ 2133 static void ibx_irq_postinstall(struct intel_display *display) 2134 { 2135 u32 mask; 2136 2137 if (HAS_PCH_NOP(display)) 2138 return; 2139 2140 if (HAS_PCH_IBX(display)) 2141 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2142 else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display)) 2143 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2144 else 2145 mask = SDE_GMBUS_CPT; 2146 2147 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2148 } 2149 2150 void valleyview_enable_display_irqs(struct intel_display *display) 2151 { 2152 struct drm_i915_private *dev_priv = to_i915(display->drm); 2153 2154 spin_lock_irq(&display->irq.lock); 2155 2156 if (display->irq.vlv_display_irqs_enabled) 2157 goto out; 2158 2159 display->irq.vlv_display_irqs_enabled = true; 2160 2161 if (intel_irqs_enabled(dev_priv)) { 2162 _vlv_display_irq_reset(display); 2163 _vlv_display_irq_postinstall(display); 2164 } 2165 2166 out: 2167 spin_unlock_irq(&display->irq.lock); 2168 } 2169 2170 void valleyview_disable_display_irqs(struct intel_display *display) 2171 { 2172 struct drm_i915_private *dev_priv = to_i915(display->drm); 2173 2174 spin_lock_irq(&display->irq.lock); 2175 2176 if (!display->irq.vlv_display_irqs_enabled) 2177 goto out; 2178 2179 display->irq.vlv_display_irqs_enabled = false; 2180 2181 if (intel_irqs_enabled(dev_priv)) 2182 _vlv_display_irq_reset(display); 2183 out: 2184 spin_unlock_irq(&display->irq.lock); 2185 } 2186 2187 void ilk_de_irq_postinstall(struct intel_display *display) 2188 { 2189 struct drm_i915_private *i915 = to_i915(display->drm); 2190 2191 u32 display_mask, extra_mask; 2192 2193 if (DISPLAY_VER(display) >= 7) { 2194 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2195 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 2196 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2197 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 2198 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 2199 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 2200 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 2201 DE_DP_A_HOTPLUG_IVB); 2202 } else { 2203 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | 2204 DE_PCH_EVENT | DE_GTT_FAULT | 2205 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 2206 DE_PIPEA_CRC_DONE | DE_POISON); 2207 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 2208 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 2209 DE_PLANE_FLIP_DONE(PLANE_A) | 2210 DE_PLANE_FLIP_DONE(PLANE_B) | 2211 DE_DP_A_HOTPLUG); 2212 } 2213 2214 if (display->platform.haswell) { 2215 intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); 2216 display_mask |= DE_EDP_PSR_INT_HSW; 2217 } 2218 2219 if (display->platform.ironlake && display->platform.mobile) 2220 extra_mask |= DE_PCU_EVENT; 2221 2222 i915->irq_mask = ~display_mask; 2223 2224 ibx_irq_postinstall(display); 2225 2226 intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask, 2227 display_mask | extra_mask); 2228 } 2229 2230 static void mtp_irq_postinstall(struct intel_display *display); 2231 static void icp_irq_postinstall(struct intel_display *display); 2232 2233 void gen8_de_irq_postinstall(struct intel_display *display) 2234 { 2235 u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) | 2236 GEN8_PIPE_CDCLK_CRC_DONE; 2237 u32 de_pipe_enables; 2238 u32 de_port_masked = gen8_de_port_aux_mask(display); 2239 u32 de_port_enables; 2240 u32 de_misc_masked = GEN8_DE_EDP_PSR; 2241 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2242 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2243 enum pipe pipe; 2244 2245 if (!HAS_DISPLAY(display)) 2246 return; 2247 2248 if (DISPLAY_VER(display) >= 14) 2249 mtp_irq_postinstall(display); 2250 else if (INTEL_PCH_TYPE(display) >= PCH_ICP) 2251 icp_irq_postinstall(display); 2252 else if (HAS_PCH_SPLIT(display)) 2253 ibx_irq_postinstall(display); 2254 2255 if (DISPLAY_VER(display) < 11) 2256 de_misc_masked |= GEN8_DE_MISC_GSE; 2257 2258 if (display->platform.geminilake || display->platform.broxton) 2259 de_port_masked |= BXT_DE_PORT_GMBUS; 2260 2261 if (DISPLAY_VER(display) >= 14) { 2262 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 2263 XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; 2264 } else if (DISPLAY_VER(display) >= 11) { 2265 enum port port; 2266 2267 if (intel_bios_is_dsi_present(display, &port)) 2268 de_port_masked |= DSI0_TE | DSI1_TE; 2269 } 2270 2271 if (HAS_DBUF_OVERLAP_DETECTION(display)) 2272 de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; 2273 2274 if (HAS_DSB(display)) 2275 de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | 2276 GEN12_DSB_INT(INTEL_DSB_1) | 2277 GEN12_DSB_INT(INTEL_DSB_2); 2278 2279 /* TODO figure PIPEDMC interrupts for pre-LNL */ 2280 if (DISPLAY_VER(display) >= 20) 2281 de_pipe_masked |= GEN12_PIPEDMC_INTERRUPT; 2282 2283 de_pipe_enables = de_pipe_masked | 2284 GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2285 gen8_de_pipe_flip_done_mask(display); 2286 2287 de_port_enables = de_port_masked; 2288 if (display->platform.geminilake || display->platform.broxton) 2289 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 2290 else if (display->platform.broadwell) 2291 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 2292 2293 if (DISPLAY_VER(display) >= 12) { 2294 enum transcoder trans; 2295 2296 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 2297 enum intel_display_power_domain domain; 2298 2299 domain = POWER_DOMAIN_TRANSCODER(trans); 2300 if (!intel_display_power_is_enabled(display, domain)) 2301 continue; 2302 2303 intel_display_irq_regs_assert_irr_is_zero(display, 2304 TRANS_PSR_IIR(display, trans)); 2305 } 2306 } else { 2307 intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); 2308 } 2309 2310 for_each_pipe(display, pipe) { 2311 display->irq.de_irq_mask[pipe] = ~de_pipe_masked; 2312 2313 if (intel_display_power_is_enabled(display, 2314 POWER_DOMAIN_PIPE(pipe))) 2315 intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2316 display->irq.de_irq_mask[pipe], 2317 de_pipe_enables); 2318 } 2319 2320 intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, 2321 de_port_enables); 2322 intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, 2323 de_misc_masked); 2324 2325 if (IS_DISPLAY_VER(display, 11, 13)) { 2326 u32 de_hpd_masked = 0; 2327 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 2328 GEN11_DE_TBT_HOTPLUG_MASK; 2329 2330 intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, 2331 de_hpd_enables); 2332 } 2333 } 2334 2335 static void mtp_irq_postinstall(struct intel_display *display) 2336 { 2337 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 2338 u32 de_hpd_mask = XELPDP_AUX_TC_MASK; 2339 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 2340 XELPDP_TBT_HOTPLUG_MASK; 2341 2342 intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, 2343 de_hpd_enables); 2344 2345 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); 2346 } 2347 2348 static void icp_irq_postinstall(struct intel_display *display) 2349 { 2350 u32 mask = SDE_GMBUS_ICP; 2351 2352 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2353 } 2354 2355 void gen11_de_irq_postinstall(struct intel_display *display) 2356 { 2357 if (!HAS_DISPLAY(display)) 2358 return; 2359 2360 gen8_de_irq_postinstall(display); 2361 2362 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2363 } 2364 2365 void dg1_de_irq_postinstall(struct intel_display *display) 2366 { 2367 if (!HAS_DISPLAY(display)) 2368 return; 2369 2370 gen8_de_irq_postinstall(display); 2371 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2372 } 2373 2374 void intel_display_irq_init(struct intel_display *display) 2375 { 2376 spin_lock_init(&display->irq.lock); 2377 2378 display->drm->vblank_disable_immediate = true; 2379 2380 intel_hotplug_irq_init(display); 2381 2382 INIT_WORK(&display->irq.vblank_notify_work, 2383 intel_display_vblank_notify_work); 2384 } 2385 2386 struct intel_display_irq_snapshot { 2387 u32 derrmr; 2388 }; 2389 2390 struct intel_display_irq_snapshot * 2391 intel_display_irq_snapshot_capture(struct intel_display *display) 2392 { 2393 struct intel_display_irq_snapshot *snapshot; 2394 2395 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC); 2396 if (!snapshot) 2397 return NULL; 2398 2399 if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display)) 2400 snapshot->derrmr = intel_de_read(display, DERRMR); 2401 2402 return snapshot; 2403 } 2404 2405 void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, 2406 struct drm_printer *p) 2407 { 2408 if (!snapshot) 2409 return; 2410 2411 drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr); 2412 } 2413