1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <drm/drm_print.h> 7 #include <drm/drm_vblank.h> 8 9 #include "i915_reg.h" 10 #include "icl_dsi_regs.h" 11 #include "intel_crtc.h" 12 #include "intel_de.h" 13 #include "intel_display_irq.h" 14 #include "intel_display_regs.h" 15 #include "intel_display_rpm.h" 16 #include "intel_display_rps.h" 17 #include "intel_display_trace.h" 18 #include "intel_display_types.h" 19 #include "intel_dmc.h" 20 #include "intel_dp_aux.h" 21 #include "intel_dsb.h" 22 #include "intel_fdi_regs.h" 23 #include "intel_fifo_underrun.h" 24 #include "intel_gmbus.h" 25 #include "intel_hotplug_irq.h" 26 #include "intel_parent.h" 27 #include "intel_pipe_crc_regs.h" 28 #include "intel_plane.h" 29 #include "intel_pmdemand.h" 30 #include "intel_psr.h" 31 #include "intel_psr_regs.h" 32 33 static void irq_reset(struct intel_display *display, struct i915_irq_regs regs) 34 { 35 intel_de_write(display, regs.imr, 0xffffffff); 36 intel_de_posting_read(display, regs.imr); 37 38 intel_de_write(display, regs.ier, 0); 39 40 /* IIR can theoretically queue up two events. Be paranoid. */ 41 intel_de_write(display, regs.iir, 0xffffffff); 42 intel_de_posting_read(display, regs.iir); 43 intel_de_write(display, regs.iir, 0xffffffff); 44 intel_de_posting_read(display, regs.iir); 45 } 46 47 /* 48 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 49 */ 50 static void assert_iir_is_zero(struct intel_display *display, i915_reg_t reg) 51 { 52 u32 val = intel_de_read(display, reg); 53 54 if (val == 0) 55 return; 56 57 drm_WARN(display->drm, 1, 58 "Interrupt register 0x%x is not zero: 0x%08x\n", 59 i915_mmio_reg_offset(reg), val); 60 intel_de_write(display, reg, 0xffffffff); 61 intel_de_posting_read(display, reg); 62 intel_de_write(display, reg, 0xffffffff); 63 intel_de_posting_read(display, reg); 64 } 65 66 static void irq_init(struct intel_display *display, struct i915_irq_regs regs, 67 u32 imr_val, u32 ier_val) 68 { 69 assert_iir_is_zero(display, regs.iir); 70 71 intel_de_write(display, regs.ier, ier_val); 72 intel_de_write(display, regs.imr, imr_val); 73 intel_de_posting_read(display, regs.imr); 74 } 75 76 static void error_reset(struct intel_display *display, struct i915_error_regs regs) 77 { 78 intel_de_write(display, regs.emr, 0xffffffff); 79 intel_de_posting_read(display, regs.emr); 80 81 intel_de_write(display, regs.eir, 0xffffffff); 82 intel_de_posting_read(display, regs.eir); 83 intel_de_write(display, regs.eir, 0xffffffff); 84 intel_de_posting_read(display, regs.eir); 85 } 86 87 static void error_init(struct intel_display *display, struct i915_error_regs regs, 88 u32 emr_val) 89 { 90 intel_de_write(display, regs.eir, 0xffffffff); 91 intel_de_posting_read(display, regs.eir); 92 intel_de_write(display, regs.eir, 0xffffffff); 93 intel_de_posting_read(display, regs.eir); 94 95 intel_de_write(display, regs.emr, emr_val); 96 intel_de_posting_read(display, regs.emr); 97 } 98 99 struct pipe_fault_handler { 100 bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id); 101 u32 fault; 102 enum plane_id plane_id; 103 }; 104 105 static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id) 106 { 107 struct intel_display *display = to_intel_display(crtc); 108 struct intel_plane_error error = {}; 109 struct intel_plane *plane; 110 111 plane = intel_crtc_get_plane(crtc, plane_id); 112 if (!plane || !plane->capture_error) 113 return false; 114 115 plane->capture_error(crtc, plane, &error); 116 117 drm_err_ratelimited(display->drm, 118 "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n", 119 crtc->base.base.id, crtc->base.name, 120 plane->base.base.id, plane->base.name, 121 error.ctl, error.surf, error.surflive); 122 123 return true; 124 } 125 126 static void intel_pipe_fault_irq_handler(struct intel_display *display, 127 const struct pipe_fault_handler *handlers, 128 enum pipe pipe, u32 fault_errors) 129 { 130 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 131 const struct pipe_fault_handler *handler; 132 133 for (handler = handlers; handler && handler->fault; handler++) { 134 if ((fault_errors & handler->fault) == 0) 135 continue; 136 137 if (handler->handle(crtc, handler->plane_id)) 138 fault_errors &= ~handler->fault; 139 } 140 141 WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n", 142 crtc->base.base.id, crtc->base.name, fault_errors); 143 } 144 145 static void 146 intel_handle_vblank(struct intel_display *display, enum pipe pipe) 147 { 148 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 149 150 drm_crtc_handle_vblank(&crtc->base); 151 } 152 153 /** 154 * ilk_update_display_irq - update DEIMR 155 * @display: display device 156 * @interrupt_mask: mask of interrupt bits to update 157 * @enabled_irq_mask: mask of interrupt bits to enable 158 */ 159 void ilk_update_display_irq(struct intel_display *display, 160 u32 interrupt_mask, u32 enabled_irq_mask) 161 { 162 u32 new_val; 163 164 lockdep_assert_held(&display->irq.lock); 165 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 166 167 new_val = display->irq.ilk_de_imr_mask; 168 new_val &= ~interrupt_mask; 169 new_val |= (~enabled_irq_mask & interrupt_mask); 170 171 if (new_val != display->irq.ilk_de_imr_mask && 172 !drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display))) { 173 display->irq.ilk_de_imr_mask = new_val; 174 intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask); 175 intel_de_posting_read(display, DEIMR); 176 } 177 } 178 179 void ilk_enable_display_irq(struct intel_display *display, u32 bits) 180 { 181 ilk_update_display_irq(display, bits, bits); 182 } 183 184 void ilk_disable_display_irq(struct intel_display *display, u32 bits) 185 { 186 ilk_update_display_irq(display, bits, 0); 187 } 188 189 /** 190 * bdw_update_port_irq - update DE port interrupt 191 * @display: display device 192 * @interrupt_mask: mask of interrupt bits to update 193 * @enabled_irq_mask: mask of interrupt bits to enable 194 */ 195 void bdw_update_port_irq(struct intel_display *display, 196 u32 interrupt_mask, u32 enabled_irq_mask) 197 { 198 u32 new_val; 199 u32 old_val; 200 201 lockdep_assert_held(&display->irq.lock); 202 203 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 204 205 if (drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display))) 206 return; 207 208 old_val = intel_de_read(display, GEN8_DE_PORT_IMR); 209 210 new_val = old_val; 211 new_val &= ~interrupt_mask; 212 new_val |= (~enabled_irq_mask & interrupt_mask); 213 214 if (new_val != old_val) { 215 intel_de_write(display, GEN8_DE_PORT_IMR, new_val); 216 intel_de_posting_read(display, GEN8_DE_PORT_IMR); 217 } 218 } 219 220 /** 221 * bdw_update_pipe_irq - update DE pipe interrupt 222 * @display: display device 223 * @pipe: pipe whose interrupt to update 224 * @interrupt_mask: mask of interrupt bits to update 225 * @enabled_irq_mask: mask of interrupt bits to enable 226 */ 227 static void bdw_update_pipe_irq(struct intel_display *display, 228 enum pipe pipe, u32 interrupt_mask, 229 u32 enabled_irq_mask) 230 { 231 u32 new_val; 232 233 lockdep_assert_held(&display->irq.lock); 234 235 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 236 237 if (drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display))) 238 return; 239 240 new_val = display->irq.de_pipe_imr_mask[pipe]; 241 new_val &= ~interrupt_mask; 242 new_val |= (~enabled_irq_mask & interrupt_mask); 243 244 if (new_val != display->irq.de_pipe_imr_mask[pipe]) { 245 display->irq.de_pipe_imr_mask[pipe] = new_val; 246 intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]); 247 intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); 248 } 249 } 250 251 void bdw_enable_pipe_irq(struct intel_display *display, 252 enum pipe pipe, u32 bits) 253 { 254 bdw_update_pipe_irq(display, pipe, bits, bits); 255 } 256 257 void bdw_disable_pipe_irq(struct intel_display *display, 258 enum pipe pipe, u32 bits) 259 { 260 bdw_update_pipe_irq(display, pipe, bits, 0); 261 } 262 263 /** 264 * ibx_display_interrupt_update - update SDEIMR 265 * @display: display device 266 * @interrupt_mask: mask of interrupt bits to update 267 * @enabled_irq_mask: mask of interrupt bits to enable 268 */ 269 void ibx_display_interrupt_update(struct intel_display *display, 270 u32 interrupt_mask, 271 u32 enabled_irq_mask) 272 { 273 u32 sdeimr = intel_de_read(display, SDEIMR); 274 275 sdeimr &= ~interrupt_mask; 276 sdeimr |= (~enabled_irq_mask & interrupt_mask); 277 278 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 279 280 lockdep_assert_held(&display->irq.lock); 281 282 if (drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display))) 283 return; 284 285 intel_de_write(display, SDEIMR, sdeimr); 286 intel_de_posting_read(display, SDEIMR); 287 } 288 289 void ibx_enable_display_interrupt(struct intel_display *display, u32 bits) 290 { 291 ibx_display_interrupt_update(display, bits, bits); 292 } 293 294 void ibx_disable_display_interrupt(struct intel_display *display, u32 bits) 295 { 296 ibx_display_interrupt_update(display, bits, 0); 297 } 298 299 u32 i915_pipestat_enable_mask(struct intel_display *display, 300 enum pipe pipe) 301 { 302 u32 status_mask = display->irq.pipestat_irq_mask[pipe]; 303 u32 enable_mask = status_mask << 16; 304 305 lockdep_assert_held(&display->irq.lock); 306 307 if (DISPLAY_VER(display) < 5) 308 goto out; 309 310 /* 311 * On pipe A we don't support the PSR interrupt yet, 312 * on pipe B and C the same bit MBZ. 313 */ 314 if (drm_WARN_ON_ONCE(display->drm, 315 status_mask & PIPE_A_PSR_STATUS_VLV)) 316 return 0; 317 /* 318 * On pipe B and C we don't support the PSR interrupt yet, on pipe 319 * A the same bit is for perf counters which we don't use either. 320 */ 321 if (drm_WARN_ON_ONCE(display->drm, 322 status_mask & PIPE_B_PSR_STATUS_VLV)) 323 return 0; 324 325 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 326 SPRITE0_FLIP_DONE_INT_EN_VLV | 327 SPRITE1_FLIP_DONE_INT_EN_VLV); 328 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 329 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 330 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 331 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 332 333 out: 334 drm_WARN_ONCE(display->drm, 335 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 336 status_mask & ~PIPESTAT_INT_STATUS_MASK, 337 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 338 pipe_name(pipe), enable_mask, status_mask); 339 340 return enable_mask; 341 } 342 343 void i915_enable_pipestat(struct intel_display *display, 344 enum pipe pipe, u32 status_mask) 345 { 346 i915_reg_t reg = PIPESTAT(display, pipe); 347 u32 enable_mask; 348 349 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 350 "pipe %c: status_mask=0x%x\n", 351 pipe_name(pipe), status_mask); 352 353 lockdep_assert_held(&display->irq.lock); 354 drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display)); 355 356 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) 357 return; 358 359 display->irq.pipestat_irq_mask[pipe] |= status_mask; 360 enable_mask = i915_pipestat_enable_mask(display, pipe); 361 362 intel_de_write(display, reg, enable_mask | status_mask); 363 intel_de_posting_read(display, reg); 364 } 365 366 void i915_disable_pipestat(struct intel_display *display, 367 enum pipe pipe, u32 status_mask) 368 { 369 i915_reg_t reg = PIPESTAT(display, pipe); 370 u32 enable_mask; 371 372 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 373 "pipe %c: status_mask=0x%x\n", 374 pipe_name(pipe), status_mask); 375 376 lockdep_assert_held(&display->irq.lock); 377 drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display)); 378 379 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0) 380 return; 381 382 display->irq.pipestat_irq_mask[pipe] &= ~status_mask; 383 enable_mask = i915_pipestat_enable_mask(display, pipe); 384 385 intel_de_write(display, reg, enable_mask | status_mask); 386 intel_de_posting_read(display, reg); 387 } 388 389 static bool i915_has_legacy_blc_interrupt(struct intel_display *display) 390 { 391 if (display->platform.i85x) 392 return true; 393 394 if (display->platform.pineview) 395 return true; 396 397 return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile; 398 } 399 400 /* enable ASLE pipestat for OpRegion */ 401 static void i915_enable_asle_pipestat(struct intel_display *display) 402 { 403 if (!intel_opregion_asle_present(display)) 404 return; 405 406 if (!i915_has_legacy_blc_interrupt(display)) 407 return; 408 409 spin_lock_irq(&display->irq.lock); 410 411 i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 412 if (DISPLAY_VER(display) >= 4) 413 i915_enable_pipestat(display, PIPE_A, 414 PIPE_LEGACY_BLC_EVENT_STATUS); 415 416 spin_unlock_irq(&display->irq.lock); 417 } 418 419 #if IS_ENABLED(CONFIG_DEBUG_FS) 420 static void display_pipe_crc_irq_handler(struct intel_display *display, 421 enum pipe pipe, 422 u32 crc0, u32 crc1, 423 u32 crc2, u32 crc3, 424 u32 crc4) 425 { 426 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 427 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 428 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 429 430 trace_intel_pipe_crc(crtc, crcs); 431 432 spin_lock(&pipe_crc->lock); 433 /* 434 * For some not yet identified reason, the first CRC is 435 * bonkers. So let's just wait for the next vblank and read 436 * out the buggy result. 437 * 438 * On GEN8+ sometimes the second CRC is bonkers as well, so 439 * don't trust that one either. 440 */ 441 if (pipe_crc->skipped <= 0 || 442 (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) { 443 pipe_crc->skipped++; 444 spin_unlock(&pipe_crc->lock); 445 return; 446 } 447 spin_unlock(&pipe_crc->lock); 448 449 drm_crtc_add_crc_entry(&crtc->base, true, 450 drm_crtc_accurate_vblank_count(&crtc->base), 451 crcs); 452 } 453 #else 454 static inline void 455 display_pipe_crc_irq_handler(struct intel_display *display, 456 enum pipe pipe, 457 u32 crc0, u32 crc1, 458 u32 crc2, u32 crc3, 459 u32 crc4) {} 460 #endif 461 462 static void flip_done_handler(struct intel_display *display, 463 enum pipe pipe) 464 { 465 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 466 467 spin_lock(&display->drm->event_lock); 468 469 if (crtc->flip_done_event) { 470 trace_intel_crtc_flip_done(crtc); 471 drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); 472 crtc->flip_done_event = NULL; 473 } 474 475 spin_unlock(&display->drm->event_lock); 476 } 477 478 static void hsw_pipe_crc_irq_handler(struct intel_display *display, 479 enum pipe pipe) 480 { 481 display_pipe_crc_irq_handler(display, pipe, 482 intel_de_read(display, PIPE_CRC_RES_HSW(pipe)), 483 0, 0, 0, 0); 484 } 485 486 static void ivb_pipe_crc_irq_handler(struct intel_display *display, 487 enum pipe pipe) 488 { 489 display_pipe_crc_irq_handler(display, pipe, 490 intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)), 491 intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)), 492 intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)), 493 intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)), 494 intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe))); 495 } 496 497 static void i9xx_pipe_crc_irq_handler(struct intel_display *display, 498 enum pipe pipe) 499 { 500 u32 res1, res2; 501 502 if (DISPLAY_VER(display) >= 3) 503 res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe)); 504 else 505 res1 = 0; 506 507 if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 508 res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe)); 509 else 510 res2 = 0; 511 512 display_pipe_crc_irq_handler(display, pipe, 513 intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)), 514 intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)), 515 intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)), 516 res1, res2); 517 } 518 519 static void i9xx_pipestat_irq_reset(struct intel_display *display) 520 { 521 enum pipe pipe; 522 523 for_each_pipe(display, pipe) { 524 intel_de_write(display, 525 PIPESTAT(display, pipe), 526 PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); 527 528 display->irq.pipestat_irq_mask[pipe] = 0; 529 } 530 } 531 532 void i9xx_pipestat_irq_ack(struct intel_display *display, 533 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 534 { 535 enum pipe pipe; 536 537 spin_lock(&display->irq.lock); 538 539 if ((display->platform.valleyview || display->platform.cherryview) && 540 !display->irq.vlv_display_irqs_enabled) { 541 spin_unlock(&display->irq.lock); 542 return; 543 } 544 545 for_each_pipe(display, pipe) { 546 i915_reg_t reg; 547 u32 status_mask, enable_mask, iir_bit = 0; 548 549 /* 550 * PIPESTAT bits get signalled even when the interrupt is 551 * disabled with the mask bits, and some of the status bits do 552 * not generate interrupts at all (like the underrun bit). Hence 553 * we need to be careful that we only handle what we want to 554 * handle. 555 */ 556 557 /* fifo underruns are filterered in the underrun handler. */ 558 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 559 560 switch (pipe) { 561 default: 562 case PIPE_A: 563 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 564 break; 565 case PIPE_B: 566 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 567 break; 568 case PIPE_C: 569 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 570 break; 571 } 572 if (iir & iir_bit) 573 status_mask |= display->irq.pipestat_irq_mask[pipe]; 574 575 if (!status_mask) 576 continue; 577 578 reg = PIPESTAT(display, pipe); 579 pipe_stats[pipe] = intel_de_read(display, reg) & status_mask; 580 enable_mask = i915_pipestat_enable_mask(display, pipe); 581 582 /* 583 * Clear the PIPE*STAT regs before the IIR 584 * 585 * Toggle the enable bits to make sure we get an 586 * edge in the ISR pipe event bit if we don't clear 587 * all the enabled status bits. Otherwise the edge 588 * triggered IIR on i965/g4x wouldn't notice that 589 * an interrupt is still pending. 590 */ 591 if (pipe_stats[pipe]) { 592 intel_de_write(display, reg, pipe_stats[pipe]); 593 intel_de_write(display, reg, enable_mask); 594 } 595 } 596 spin_unlock(&display->irq.lock); 597 } 598 599 void i915_pipestat_irq_handler(struct intel_display *display, 600 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 601 { 602 bool blc_event = false; 603 enum pipe pipe; 604 605 for_each_pipe(display, pipe) { 606 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 607 intel_handle_vblank(display, pipe); 608 609 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 610 blc_event = true; 611 612 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 613 i9xx_pipe_crc_irq_handler(display, pipe); 614 615 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 616 intel_cpu_fifo_underrun_irq_handler(display, pipe); 617 } 618 619 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 620 intel_opregion_asle_intr(display); 621 } 622 623 void i965_pipestat_irq_handler(struct intel_display *display, 624 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 625 { 626 bool blc_event = false; 627 enum pipe pipe; 628 629 for_each_pipe(display, pipe) { 630 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 631 intel_handle_vblank(display, pipe); 632 633 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 634 blc_event = true; 635 636 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 637 i9xx_pipe_crc_irq_handler(display, pipe); 638 639 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 640 intel_cpu_fifo_underrun_irq_handler(display, pipe); 641 } 642 643 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 644 intel_opregion_asle_intr(display); 645 646 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 647 intel_gmbus_irq_handler(display); 648 } 649 650 void valleyview_pipestat_irq_handler(struct intel_display *display, 651 u32 pipe_stats[I915_MAX_PIPES]) 652 { 653 enum pipe pipe; 654 655 for_each_pipe(display, pipe) { 656 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 657 intel_handle_vblank(display, pipe); 658 659 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 660 flip_done_handler(display, pipe); 661 662 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 663 i9xx_pipe_crc_irq_handler(display, pipe); 664 665 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 666 intel_cpu_fifo_underrun_irq_handler(display, pipe); 667 } 668 669 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 670 intel_gmbus_irq_handler(display); 671 } 672 673 static void ibx_irq_handler(struct intel_display *display, u32 pch_iir) 674 { 675 enum pipe pipe; 676 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 677 678 ibx_hpd_irq_handler(display, hotplug_trigger); 679 680 if (pch_iir & SDE_AUDIO_POWER_MASK) { 681 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 682 SDE_AUDIO_POWER_SHIFT); 683 drm_dbg(display->drm, "PCH audio power change on port %d\n", 684 port_name(port)); 685 } 686 687 if (pch_iir & SDE_AUX_MASK) 688 intel_dp_aux_irq_handler(display); 689 690 if (pch_iir & SDE_GMBUS) 691 intel_gmbus_irq_handler(display); 692 693 if (pch_iir & SDE_AUDIO_HDCP_MASK) 694 drm_dbg(display->drm, "PCH HDCP audio interrupt\n"); 695 696 if (pch_iir & SDE_AUDIO_TRANS_MASK) 697 drm_dbg(display->drm, "PCH transcoder audio interrupt\n"); 698 699 if (pch_iir & SDE_POISON) 700 drm_err(display->drm, "PCH poison interrupt\n"); 701 702 if (pch_iir & SDE_FDI_MASK) { 703 for_each_pipe(display, pipe) 704 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 705 pipe_name(pipe), 706 intel_de_read(display, FDI_RX_IIR(pipe))); 707 } 708 709 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 710 drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n"); 711 712 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 713 drm_dbg(display->drm, 714 "PCH transcoder CRC error interrupt\n"); 715 716 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 717 intel_pch_fifo_underrun_irq_handler(display, PIPE_A); 718 719 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 720 intel_pch_fifo_underrun_irq_handler(display, PIPE_B); 721 } 722 723 static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe) 724 { 725 switch (pipe) { 726 case PIPE_A: 727 return ERR_INT_SPRITE_A_FAULT | 728 ERR_INT_PRIMARY_A_FAULT | 729 ERR_INT_CURSOR_A_FAULT; 730 case PIPE_B: 731 return ERR_INT_SPRITE_B_FAULT | 732 ERR_INT_PRIMARY_B_FAULT | 733 ERR_INT_CURSOR_B_FAULT; 734 case PIPE_C: 735 return ERR_INT_SPRITE_C_FAULT | 736 ERR_INT_PRIMARY_C_FAULT | 737 ERR_INT_CURSOR_C_FAULT; 738 default: 739 return 0; 740 } 741 } 742 743 static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = { 744 { .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 745 { .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 746 { .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 747 { .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 748 { .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 749 { .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 750 { .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 751 { .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 752 { .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 753 {} 754 }; 755 756 static void ivb_err_int_handler(struct intel_display *display) 757 { 758 u32 err_int = intel_de_read(display, GEN7_ERR_INT); 759 enum pipe pipe; 760 761 if (err_int & ERR_INT_POISON) 762 drm_err(display->drm, "Poison interrupt\n"); 763 764 if (err_int & ERR_INT_INVALID_GTT_PTE) 765 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 766 767 if (err_int & ERR_INT_INVALID_PTE_DATA) 768 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 769 770 for_each_pipe(display, pipe) { 771 u32 fault_errors; 772 773 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 774 intel_cpu_fifo_underrun_irq_handler(display, pipe); 775 776 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 777 if (display->platform.ivybridge) 778 ivb_pipe_crc_irq_handler(display, pipe); 779 else 780 hsw_pipe_crc_irq_handler(display, pipe); 781 } 782 783 fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe); 784 if (fault_errors) 785 intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers, 786 pipe, fault_errors); 787 } 788 789 intel_de_write(display, GEN7_ERR_INT, err_int); 790 } 791 792 static void cpt_serr_int_handler(struct intel_display *display) 793 { 794 u32 serr_int = intel_de_read(display, SERR_INT); 795 enum pipe pipe; 796 797 if (serr_int & SERR_INT_POISON) 798 drm_err(display->drm, "PCH poison interrupt\n"); 799 800 for_each_pipe(display, pipe) 801 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 802 intel_pch_fifo_underrun_irq_handler(display, pipe); 803 804 intel_de_write(display, SERR_INT, serr_int); 805 } 806 807 static void cpt_irq_handler(struct intel_display *display, u32 pch_iir) 808 { 809 enum pipe pipe; 810 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 811 812 ibx_hpd_irq_handler(display, hotplug_trigger); 813 814 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 815 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 816 SDE_AUDIO_POWER_SHIFT_CPT); 817 drm_dbg(display->drm, "PCH audio power change on port %c\n", 818 port_name(port)); 819 } 820 821 if (pch_iir & SDE_AUX_MASK_CPT) 822 intel_dp_aux_irq_handler(display); 823 824 if (pch_iir & SDE_GMBUS_CPT) 825 intel_gmbus_irq_handler(display); 826 827 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 828 drm_dbg(display->drm, "Audio CP request interrupt\n"); 829 830 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 831 drm_dbg(display->drm, "Audio CP change interrupt\n"); 832 833 if (pch_iir & SDE_FDI_MASK_CPT) { 834 for_each_pipe(display, pipe) 835 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 836 pipe_name(pipe), 837 intel_de_read(display, FDI_RX_IIR(pipe))); 838 } 839 840 if (pch_iir & SDE_ERROR_CPT) 841 cpt_serr_int_handler(display); 842 } 843 844 static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe) 845 { 846 switch (pipe) { 847 case PIPE_A: 848 return GTT_FAULT_SPRITE_A_FAULT | 849 GTT_FAULT_PRIMARY_A_FAULT | 850 GTT_FAULT_CURSOR_A_FAULT; 851 case PIPE_B: 852 return GTT_FAULT_SPRITE_B_FAULT | 853 GTT_FAULT_PRIMARY_B_FAULT | 854 GTT_FAULT_CURSOR_B_FAULT; 855 default: 856 return 0; 857 } 858 } 859 860 static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = { 861 { .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 862 { .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 863 { .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 864 { .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 865 { .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 866 { .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 867 {} 868 }; 869 870 static void ilk_gtt_fault_irq_handler(struct intel_display *display) 871 { 872 enum pipe pipe; 873 u32 gtt_fault; 874 875 gtt_fault = intel_de_read(display, ILK_GTT_FAULT); 876 intel_de_write(display, ILK_GTT_FAULT, gtt_fault); 877 878 if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE) 879 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 880 881 if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA) 882 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 883 884 for_each_pipe(display, pipe) { 885 u32 fault_errors; 886 887 fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe); 888 if (fault_errors) 889 intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers, 890 pipe, fault_errors); 891 } 892 } 893 894 static void _ilk_display_irq_handler(struct intel_display *display, u32 de_iir) 895 { 896 enum pipe pipe; 897 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 898 899 if (hotplug_trigger) 900 ilk_hpd_irq_handler(display, hotplug_trigger); 901 902 if (de_iir & DE_AUX_CHANNEL_A) 903 intel_dp_aux_irq_handler(display); 904 905 if (de_iir & DE_GSE) 906 intel_opregion_asle_intr(display); 907 908 if (de_iir & DE_POISON) 909 drm_err(display->drm, "Poison interrupt\n"); 910 911 if (de_iir & DE_GTT_FAULT) 912 ilk_gtt_fault_irq_handler(display); 913 914 for_each_pipe(display, pipe) { 915 if (de_iir & DE_PIPE_VBLANK(pipe)) 916 intel_handle_vblank(display, pipe); 917 918 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 919 flip_done_handler(display, pipe); 920 921 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 922 intel_cpu_fifo_underrun_irq_handler(display, pipe); 923 924 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 925 i9xx_pipe_crc_irq_handler(display, pipe); 926 } 927 928 /* check event from PCH */ 929 if (de_iir & DE_PCH_EVENT) { 930 u32 pch_iir = intel_de_read(display, SDEIIR); 931 932 if (HAS_PCH_CPT(display)) 933 cpt_irq_handler(display, pch_iir); 934 else 935 ibx_irq_handler(display, pch_iir); 936 937 /* should clear PCH hotplug event before clear CPU irq */ 938 intel_de_write(display, SDEIIR, pch_iir); 939 } 940 941 if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT) 942 ilk_display_rps_irq_handler(display); 943 } 944 945 static void _ivb_display_irq_handler(struct intel_display *display, u32 de_iir) 946 { 947 enum pipe pipe; 948 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 949 950 if (hotplug_trigger) 951 ilk_hpd_irq_handler(display, hotplug_trigger); 952 953 if (de_iir & DE_ERR_INT_IVB) 954 ivb_err_int_handler(display); 955 956 if (de_iir & DE_EDP_PSR_INT_HSW) { 957 struct intel_encoder *encoder; 958 959 for_each_intel_encoder_with_psr(display->drm, encoder) { 960 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 961 u32 psr_iir; 962 963 psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0); 964 intel_psr_irq_handler(intel_dp, psr_iir); 965 break; 966 } 967 } 968 969 if (de_iir & DE_AUX_CHANNEL_A_IVB) 970 intel_dp_aux_irq_handler(display); 971 972 if (de_iir & DE_GSE_IVB) 973 intel_opregion_asle_intr(display); 974 975 for_each_pipe(display, pipe) { 976 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 977 intel_handle_vblank(display, pipe); 978 979 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 980 flip_done_handler(display, pipe); 981 } 982 983 /* check event from PCH */ 984 if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) { 985 u32 pch_iir = intel_de_read(display, SDEIIR); 986 987 cpt_irq_handler(display, pch_iir); 988 989 /* clear PCH hotplug event before clear CPU irq */ 990 intel_de_write(display, SDEIIR, pch_iir); 991 } 992 } 993 994 void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier) 995 { 996 /* disable master interrupt before clearing iir */ 997 *de_ier = intel_de_read_fw(display, DEIER); 998 intel_de_write_fw(display, DEIER, *de_ier & ~DE_MASTER_IRQ_CONTROL); 999 1000 /* 1001 * Disable south interrupts. We'll only write to SDEIIR once, so further 1002 * interrupts will be stored on its back queue, and then we'll be able 1003 * to process them after we restore SDEIER (as soon as we restore it, 1004 * we'll get an interrupt if SDEIIR still has something to process due 1005 * to its back queue). 1006 */ 1007 if (!HAS_PCH_NOP(display)) { 1008 *sde_ier = intel_de_read_fw(display, SDEIER); 1009 intel_de_write_fw(display, SDEIER, 0); 1010 } else { 1011 *sde_ier = 0; 1012 } 1013 } 1014 1015 void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier) 1016 { 1017 intel_de_write_fw(display, DEIER, de_ier); 1018 1019 if (sde_ier) 1020 intel_de_write_fw(display, SDEIER, sde_ier); 1021 } 1022 1023 bool ilk_display_irq_handler(struct intel_display *display) 1024 { 1025 u32 de_iir; 1026 bool handled = false; 1027 1028 de_iir = intel_de_read_fw(display, DEIIR); 1029 if (de_iir) { 1030 intel_de_write_fw(display, DEIIR, de_iir); 1031 if (DISPLAY_VER(display) >= 7) 1032 _ivb_display_irq_handler(display, de_iir); 1033 else 1034 _ilk_display_irq_handler(display, de_iir); 1035 handled = true; 1036 } 1037 1038 return handled; 1039 } 1040 1041 static u32 gen8_de_port_aux_mask(struct intel_display *display) 1042 { 1043 u32 mask; 1044 1045 if (DISPLAY_VER(display) >= 20) 1046 return 0; 1047 else if (DISPLAY_VER(display) >= 14) 1048 return TGL_DE_PORT_AUX_DDIA | 1049 TGL_DE_PORT_AUX_DDIB; 1050 else if (DISPLAY_VER(display) >= 13) 1051 return TGL_DE_PORT_AUX_DDIA | 1052 TGL_DE_PORT_AUX_DDIB | 1053 TGL_DE_PORT_AUX_DDIC | 1054 XELPD_DE_PORT_AUX_DDID | 1055 XELPD_DE_PORT_AUX_DDIE | 1056 TGL_DE_PORT_AUX_USBC1 | 1057 TGL_DE_PORT_AUX_USBC2 | 1058 TGL_DE_PORT_AUX_USBC3 | 1059 TGL_DE_PORT_AUX_USBC4; 1060 else if (DISPLAY_VER(display) >= 12) 1061 return TGL_DE_PORT_AUX_DDIA | 1062 TGL_DE_PORT_AUX_DDIB | 1063 TGL_DE_PORT_AUX_DDIC | 1064 TGL_DE_PORT_AUX_USBC1 | 1065 TGL_DE_PORT_AUX_USBC2 | 1066 TGL_DE_PORT_AUX_USBC3 | 1067 TGL_DE_PORT_AUX_USBC4 | 1068 TGL_DE_PORT_AUX_USBC5 | 1069 TGL_DE_PORT_AUX_USBC6; 1070 1071 mask = GEN8_AUX_CHANNEL_A; 1072 if (DISPLAY_VER(display) >= 9) 1073 mask |= GEN9_AUX_CHANNEL_B | 1074 GEN9_AUX_CHANNEL_C | 1075 GEN9_AUX_CHANNEL_D; 1076 1077 if (DISPLAY_VER(display) == 11) { 1078 mask |= ICL_AUX_CHANNEL_F; 1079 mask |= ICL_AUX_CHANNEL_E; 1080 } 1081 1082 return mask; 1083 } 1084 1085 static u32 gen8_de_pipe_fault_mask(struct intel_display *display) 1086 { 1087 if (DISPLAY_VER(display) >= 20) 1088 return MTL_PLANE_ATS_FAULT | 1089 GEN9_PIPE_CURSOR_FAULT | 1090 GEN11_PIPE_PLANE5_FAULT | 1091 GEN9_PIPE_PLANE4_FAULT | 1092 GEN9_PIPE_PLANE3_FAULT | 1093 GEN9_PIPE_PLANE2_FAULT | 1094 GEN9_PIPE_PLANE1_FAULT; 1095 else if (DISPLAY_VER(display) >= 14) 1096 return MTL_PIPEDMC_ATS_FAULT | 1097 MTL_PLANE_ATS_FAULT | 1098 GEN12_PIPEDMC_FAULT | 1099 GEN9_PIPE_CURSOR_FAULT | 1100 GEN11_PIPE_PLANE5_FAULT | 1101 GEN9_PIPE_PLANE4_FAULT | 1102 GEN9_PIPE_PLANE3_FAULT | 1103 GEN9_PIPE_PLANE2_FAULT | 1104 GEN9_PIPE_PLANE1_FAULT; 1105 else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display)) 1106 return GEN12_PIPEDMC_FAULT | 1107 GEN9_PIPE_CURSOR_FAULT | 1108 GEN11_PIPE_PLANE5_FAULT | 1109 GEN9_PIPE_PLANE4_FAULT | 1110 GEN9_PIPE_PLANE3_FAULT | 1111 GEN9_PIPE_PLANE2_FAULT | 1112 GEN9_PIPE_PLANE1_FAULT; 1113 else if (DISPLAY_VER(display) == 12) 1114 return GEN12_PIPEDMC_FAULT | 1115 GEN9_PIPE_CURSOR_FAULT | 1116 GEN11_PIPE_PLANE7_FAULT | 1117 GEN11_PIPE_PLANE6_FAULT | 1118 GEN11_PIPE_PLANE5_FAULT | 1119 GEN9_PIPE_PLANE4_FAULT | 1120 GEN9_PIPE_PLANE3_FAULT | 1121 GEN9_PIPE_PLANE2_FAULT | 1122 GEN9_PIPE_PLANE1_FAULT; 1123 else if (DISPLAY_VER(display) == 11) 1124 return GEN9_PIPE_CURSOR_FAULT | 1125 GEN11_PIPE_PLANE7_FAULT | 1126 GEN11_PIPE_PLANE6_FAULT | 1127 GEN11_PIPE_PLANE5_FAULT | 1128 GEN9_PIPE_PLANE4_FAULT | 1129 GEN9_PIPE_PLANE3_FAULT | 1130 GEN9_PIPE_PLANE2_FAULT | 1131 GEN9_PIPE_PLANE1_FAULT; 1132 else if (DISPLAY_VER(display) >= 9) 1133 return GEN9_PIPE_CURSOR_FAULT | 1134 GEN9_PIPE_PLANE4_FAULT | 1135 GEN9_PIPE_PLANE3_FAULT | 1136 GEN9_PIPE_PLANE2_FAULT | 1137 GEN9_PIPE_PLANE1_FAULT; 1138 else 1139 return GEN8_PIPE_CURSOR_FAULT | 1140 GEN8_PIPE_SPRITE_FAULT | 1141 GEN8_PIPE_PRIMARY_FAULT; 1142 } 1143 1144 static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1145 { 1146 struct intel_display *display = to_intel_display(crtc); 1147 1148 drm_err_ratelimited(display->drm, 1149 "[CRTC:%d:%s] PLANE ATS fault\n", 1150 crtc->base.base.id, crtc->base.name); 1151 1152 return true; 1153 } 1154 1155 static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1156 { 1157 struct intel_display *display = to_intel_display(crtc); 1158 1159 drm_err_ratelimited(display->drm, 1160 "[CRTC:%d:%s] PIPEDMC ATS fault\n", 1161 crtc->base.base.id, crtc->base.name); 1162 1163 return true; 1164 } 1165 1166 static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1167 { 1168 struct intel_display *display = to_intel_display(crtc); 1169 1170 drm_err_ratelimited(display->drm, 1171 "[CRTC:%d:%s] PIPEDMC fault\n", 1172 crtc->base.base.id, crtc->base.name); 1173 1174 return true; 1175 } 1176 1177 static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = { 1178 { .fault = MTL_PLANE_ATS_FAULT, .handle = handle_plane_ats_fault, }, 1179 { .fault = MTL_PIPEDMC_ATS_FAULT, .handle = handle_pipedmc_ats_fault, }, 1180 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1181 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1182 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1183 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1184 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1185 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1186 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1187 {} 1188 }; 1189 1190 static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = { 1191 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1192 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1193 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1194 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1195 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1196 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1197 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1198 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1199 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1200 {} 1201 }; 1202 1203 static const struct pipe_fault_handler icl_pipe_fault_handlers[] = { 1204 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1205 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1206 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1207 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1208 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1209 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1210 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1211 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1212 {} 1213 }; 1214 1215 static const struct pipe_fault_handler skl_pipe_fault_handlers[] = { 1216 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1217 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1218 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1219 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1220 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1221 {} 1222 }; 1223 1224 static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = { 1225 { .fault = GEN8_PIPE_SPRITE_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1226 { .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1227 { .fault = GEN8_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1228 {} 1229 }; 1230 1231 static const struct pipe_fault_handler * 1232 gen8_pipe_fault_handlers(struct intel_display *display) 1233 { 1234 if (DISPLAY_VER(display) >= 14) 1235 return mtl_pipe_fault_handlers; 1236 else if (DISPLAY_VER(display) >= 12) 1237 return tgl_pipe_fault_handlers; 1238 else if (DISPLAY_VER(display) >= 11) 1239 return icl_pipe_fault_handlers; 1240 else if (DISPLAY_VER(display) >= 9) 1241 return skl_pipe_fault_handlers; 1242 else 1243 return bdw_pipe_fault_handlers; 1244 } 1245 1246 static void intel_pmdemand_irq_handler(struct intel_display *display) 1247 { 1248 wake_up_all(&display->pmdemand.waitqueue); 1249 } 1250 1251 static void 1252 gen8_de_misc_irq_handler(struct intel_display *display, u32 iir) 1253 { 1254 bool found = false; 1255 1256 if (HAS_DBUF_OVERLAP_DETECTION(display)) { 1257 if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) { 1258 drm_warn(display->drm, "DBuf overlap detected\n"); 1259 found = true; 1260 } 1261 } 1262 1263 if (DISPLAY_VER(display) >= 14) { 1264 if (iir & (XELPDP_PMDEMAND_RSP | 1265 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 1266 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 1267 drm_dbg(display->drm, 1268 "Error waiting for Punit PM Demand Response\n"); 1269 1270 intel_pmdemand_irq_handler(display); 1271 found = true; 1272 } 1273 1274 if (iir & XELPDP_RM_TIMEOUT) { 1275 u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE); 1276 drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val); 1277 found = true; 1278 } 1279 } else if (iir & GEN8_DE_MISC_GSE) { 1280 intel_opregion_asle_intr(display); 1281 found = true; 1282 } 1283 1284 if (iir & GEN8_DE_EDP_PSR) { 1285 struct intel_encoder *encoder; 1286 u32 psr_iir; 1287 i915_reg_t iir_reg; 1288 1289 for_each_intel_encoder_with_psr(display->drm, encoder) { 1290 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1291 1292 if (DISPLAY_VER(display) >= 12) 1293 iir_reg = TRANS_PSR_IIR(display, 1294 intel_dp->psr.transcoder); 1295 else 1296 iir_reg = EDP_PSR_IIR; 1297 1298 psr_iir = intel_de_rmw(display, iir_reg, 0, 0); 1299 1300 if (psr_iir) 1301 found = true; 1302 1303 intel_psr_irq_handler(intel_dp, psr_iir); 1304 1305 /* prior GEN12 only have one EDP PSR */ 1306 if (DISPLAY_VER(display) < 12) 1307 break; 1308 } 1309 } 1310 1311 if (!found) 1312 drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); 1313 } 1314 1315 static void gen11_dsi_te_interrupt_handler(struct intel_display *display, 1316 u32 te_trigger) 1317 { 1318 enum pipe pipe = INVALID_PIPE; 1319 enum transcoder dsi_trans; 1320 enum port port; 1321 u32 val; 1322 1323 /* 1324 * Incase of dual link, TE comes from DSI_1 1325 * this is to check if dual link is enabled 1326 */ 1327 val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0)); 1328 val &= PORT_SYNC_MODE_ENABLE; 1329 1330 /* 1331 * if dual link is enabled, then read DSI_0 1332 * transcoder registers 1333 */ 1334 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 1335 PORT_A : PORT_B; 1336 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 1337 1338 /* Check if DSI configured in command mode */ 1339 val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); 1340 val = val & OP_MODE_MASK; 1341 1342 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 1343 drm_err(display->drm, "DSI trancoder not configured in command mode\n"); 1344 return; 1345 } 1346 1347 /* Get PIPE for handling VBLANK event */ 1348 val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans)); 1349 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 1350 case TRANS_DDI_EDP_INPUT_A_ON: 1351 pipe = PIPE_A; 1352 break; 1353 case TRANS_DDI_EDP_INPUT_B_ONOFF: 1354 pipe = PIPE_B; 1355 break; 1356 case TRANS_DDI_EDP_INPUT_C_ONOFF: 1357 pipe = PIPE_C; 1358 break; 1359 default: 1360 drm_err(display->drm, "Invalid PIPE\n"); 1361 return; 1362 } 1363 1364 intel_handle_vblank(display, pipe); 1365 1366 /* clear TE in dsi IIR */ 1367 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 1368 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1369 } 1370 1371 static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display) 1372 { 1373 if (DISPLAY_VER(display) >= 9) 1374 return GEN9_PIPE_PLANE1_FLIP_DONE; 1375 else 1376 return GEN8_PIPE_PRIMARY_FLIP_DONE; 1377 } 1378 1379 static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir) 1380 { 1381 u32 pica_ier = 0; 1382 1383 *pica_iir = 0; 1384 *pch_iir = intel_de_read(display, SDEIIR); 1385 if (!*pch_iir) 1386 return; 1387 1388 /** 1389 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 1390 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 1391 * their flags both in the PICA and SDE IIR. 1392 */ 1393 if (*pch_iir & SDE_PICAINTERRUPT) { 1394 drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL); 1395 1396 pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0); 1397 *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR); 1398 intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir); 1399 } 1400 1401 intel_de_write(display, SDEIIR, *pch_iir); 1402 1403 if (pica_ier) 1404 intel_de_write(display, PICAINTERRUPT_IER, pica_ier); 1405 } 1406 1407 void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) 1408 { 1409 u32 iir; 1410 enum pipe pipe; 1411 1412 drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display)); 1413 1414 if (master_ctl & GEN8_DE_MISC_IRQ) { 1415 iir = intel_de_read(display, GEN8_DE_MISC_IIR); 1416 if (iir) { 1417 intel_de_write(display, GEN8_DE_MISC_IIR, iir); 1418 gen8_de_misc_irq_handler(display, iir); 1419 } else { 1420 drm_err_ratelimited(display->drm, 1421 "The master control interrupt lied (DE MISC)!\n"); 1422 } 1423 } 1424 1425 if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1426 iir = intel_de_read(display, GEN11_DE_HPD_IIR); 1427 if (iir) { 1428 intel_de_write(display, GEN11_DE_HPD_IIR, iir); 1429 gen11_hpd_irq_handler(display, iir); 1430 } else { 1431 drm_err_ratelimited(display->drm, 1432 "The master control interrupt lied, (DE HPD)!\n"); 1433 } 1434 } 1435 1436 if (master_ctl & GEN8_DE_PORT_IRQ) { 1437 iir = intel_de_read(display, GEN8_DE_PORT_IIR); 1438 if (iir) { 1439 bool found = false; 1440 1441 intel_de_write(display, GEN8_DE_PORT_IIR, iir); 1442 1443 if (iir & gen8_de_port_aux_mask(display)) { 1444 intel_dp_aux_irq_handler(display); 1445 found = true; 1446 } 1447 1448 if (display->platform.geminilake || display->platform.broxton) { 1449 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1450 1451 if (hotplug_trigger) { 1452 bxt_hpd_irq_handler(display, hotplug_trigger); 1453 found = true; 1454 } 1455 } else if (display->platform.broadwell) { 1456 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1457 1458 if (hotplug_trigger) { 1459 ilk_hpd_irq_handler(display, hotplug_trigger); 1460 found = true; 1461 } 1462 } 1463 1464 if ((display->platform.geminilake || display->platform.broxton) && 1465 (iir & BXT_DE_PORT_GMBUS)) { 1466 intel_gmbus_irq_handler(display); 1467 found = true; 1468 } 1469 1470 if (DISPLAY_VER(display) >= 11) { 1471 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1472 1473 if (te_trigger) { 1474 gen11_dsi_te_interrupt_handler(display, te_trigger); 1475 found = true; 1476 } 1477 } 1478 1479 if (!found) 1480 drm_err_ratelimited(display->drm, 1481 "Unexpected DE Port interrupt\n"); 1482 } else { 1483 drm_err_ratelimited(display->drm, 1484 "The master control interrupt lied (DE PORT)!\n"); 1485 } 1486 } 1487 1488 for_each_pipe(display, pipe) { 1489 u32 fault_errors; 1490 1491 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1492 continue; 1493 1494 iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe)); 1495 if (!iir) { 1496 drm_err_ratelimited(display->drm, 1497 "The master control interrupt lied (DE PIPE %c)!\n", 1498 pipe_name(pipe)); 1499 continue; 1500 } 1501 1502 intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir); 1503 1504 if (iir & GEN8_PIPE_VBLANK) 1505 intel_handle_vblank(display, pipe); 1506 1507 if (iir & gen8_de_pipe_flip_done_mask(display)) 1508 flip_done_handler(display, pipe); 1509 1510 if (HAS_DSB(display)) { 1511 if (iir & GEN12_DSB_INT(INTEL_DSB_0)) 1512 intel_dsb_irq_handler(display, pipe, INTEL_DSB_0); 1513 1514 if (iir & GEN12_DSB_INT(INTEL_DSB_1)) 1515 intel_dsb_irq_handler(display, pipe, INTEL_DSB_1); 1516 1517 if (iir & GEN12_DSB_INT(INTEL_DSB_2)) 1518 intel_dsb_irq_handler(display, pipe, INTEL_DSB_2); 1519 } 1520 1521 if (HAS_PIPEDMC(display) && iir & GEN12_PIPEDMC_INTERRUPT) 1522 intel_pipedmc_irq_handler(display, pipe); 1523 1524 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1525 hsw_pipe_crc_irq_handler(display, pipe); 1526 1527 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 1528 intel_cpu_fifo_underrun_irq_handler(display, pipe); 1529 1530 fault_errors = iir & gen8_de_pipe_fault_mask(display); 1531 if (fault_errors) 1532 intel_pipe_fault_irq_handler(display, 1533 gen8_pipe_fault_handlers(display), 1534 pipe, fault_errors); 1535 } 1536 1537 if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) && 1538 master_ctl & GEN8_DE_PCH_IRQ) { 1539 u32 pica_iir; 1540 1541 /* 1542 * FIXME(BDW): Assume for now that the new interrupt handling 1543 * scheme also closed the SDE interrupt handling race we've seen 1544 * on older pch-split platforms. But this needs testing. 1545 */ 1546 gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir); 1547 if (iir) { 1548 if (pica_iir) 1549 xelpdp_pica_irq_handler(display, pica_iir); 1550 1551 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 1552 icp_irq_handler(display, iir); 1553 else if (INTEL_PCH_TYPE(display) >= PCH_SPT) 1554 spt_irq_handler(display, iir); 1555 else 1556 cpt_irq_handler(display, iir); 1557 } else { 1558 /* 1559 * Like on previous PCH there seems to be something 1560 * fishy going on with forwarding PCH interrupts. 1561 */ 1562 drm_dbg(display->drm, 1563 "The master control interrupt lied (SDE)!\n"); 1564 } 1565 } 1566 } 1567 1568 u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) 1569 { 1570 u32 iir; 1571 1572 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1573 return 0; 1574 1575 intel_display_rpm_assert_block(display); 1576 1577 iir = intel_de_read(display, GEN11_GU_MISC_IIR); 1578 if (likely(iir)) 1579 intel_de_write(display, GEN11_GU_MISC_IIR, iir); 1580 1581 intel_display_rpm_assert_unblock(display); 1582 1583 return iir; 1584 } 1585 1586 void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir) 1587 { 1588 if (iir & GEN11_GU_MISC_GSE) 1589 intel_opregion_asle_intr(display); 1590 } 1591 1592 void gen11_display_irq_handler(struct intel_display *display) 1593 { 1594 u32 disp_ctl; 1595 1596 intel_display_rpm_assert_block(display); 1597 /* 1598 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1599 * for the display related bits. 1600 */ 1601 disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL); 1602 1603 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 1604 gen8_de_irq_handler(display, disp_ctl); 1605 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 1606 1607 intel_display_rpm_assert_unblock(display); 1608 } 1609 1610 static void i915gm_irq_cstate_wa_enable(struct intel_display *display) 1611 { 1612 lockdep_assert_held(&display->drm->vblank_time_lock); 1613 1614 /* 1615 * Vblank/CRC interrupts fail to wake the device up from C2+. 1616 * Disabling render clock gating during C-states avoids 1617 * the problem. There is a small power cost so we do this 1618 * only when vblank/CRC interrupts are actually enabled. 1619 */ 1620 if (display->irq.vblank_enabled++ == 0) 1621 intel_de_write(display, SCPD0, 1622 _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1623 } 1624 1625 static void i915gm_irq_cstate_wa_disable(struct intel_display *display) 1626 { 1627 lockdep_assert_held(&display->drm->vblank_time_lock); 1628 1629 if (--display->irq.vblank_enabled == 0) 1630 intel_de_write(display, SCPD0, 1631 _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1632 } 1633 1634 void i915gm_irq_cstate_wa(struct intel_display *display, bool enable) 1635 { 1636 spin_lock_irq(&display->drm->vblank_time_lock); 1637 1638 if (enable) 1639 i915gm_irq_cstate_wa_enable(display); 1640 else 1641 i915gm_irq_cstate_wa_disable(display); 1642 1643 spin_unlock_irq(&display->drm->vblank_time_lock); 1644 } 1645 1646 int i8xx_enable_vblank(struct drm_crtc *crtc) 1647 { 1648 struct intel_display *display = to_intel_display(crtc->dev); 1649 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1650 unsigned long irqflags; 1651 1652 spin_lock_irqsave(&display->irq.lock, irqflags); 1653 i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1654 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1655 1656 return 0; 1657 } 1658 1659 void i8xx_disable_vblank(struct drm_crtc *crtc) 1660 { 1661 struct intel_display *display = to_intel_display(crtc->dev); 1662 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1663 unsigned long irqflags; 1664 1665 spin_lock_irqsave(&display->irq.lock, irqflags); 1666 i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1667 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1668 } 1669 1670 int i915gm_enable_vblank(struct drm_crtc *crtc) 1671 { 1672 struct intel_display *display = to_intel_display(crtc->dev); 1673 1674 i915gm_irq_cstate_wa_enable(display); 1675 1676 return i8xx_enable_vblank(crtc); 1677 } 1678 1679 void i915gm_disable_vblank(struct drm_crtc *crtc) 1680 { 1681 struct intel_display *display = to_intel_display(crtc->dev); 1682 1683 i8xx_disable_vblank(crtc); 1684 1685 i915gm_irq_cstate_wa_disable(display); 1686 } 1687 1688 int i965_enable_vblank(struct drm_crtc *crtc) 1689 { 1690 struct intel_display *display = to_intel_display(crtc->dev); 1691 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1692 unsigned long irqflags; 1693 1694 spin_lock_irqsave(&display->irq.lock, irqflags); 1695 i915_enable_pipestat(display, pipe, 1696 PIPE_START_VBLANK_INTERRUPT_STATUS); 1697 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1698 1699 return 0; 1700 } 1701 1702 void i965_disable_vblank(struct drm_crtc *crtc) 1703 { 1704 struct intel_display *display = to_intel_display(crtc->dev); 1705 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1706 unsigned long irqflags; 1707 1708 spin_lock_irqsave(&display->irq.lock, irqflags); 1709 i915_disable_pipestat(display, pipe, 1710 PIPE_START_VBLANK_INTERRUPT_STATUS); 1711 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1712 } 1713 1714 int ilk_enable_vblank(struct drm_crtc *crtc) 1715 { 1716 struct intel_display *display = to_intel_display(crtc->dev); 1717 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1718 unsigned long irqflags; 1719 u32 bit = DISPLAY_VER(display) >= 7 ? 1720 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1721 1722 spin_lock_irqsave(&display->irq.lock, irqflags); 1723 ilk_enable_display_irq(display, bit); 1724 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1725 1726 /* Even though there is no DMC, frame counter can get stuck when 1727 * PSR is active as no frames are generated. 1728 */ 1729 if (HAS_PSR(display)) 1730 drm_crtc_vblank_restore(crtc); 1731 1732 return 0; 1733 } 1734 1735 void ilk_disable_vblank(struct drm_crtc *crtc) 1736 { 1737 struct intel_display *display = to_intel_display(crtc->dev); 1738 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1739 unsigned long irqflags; 1740 u32 bit = DISPLAY_VER(display) >= 7 ? 1741 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1742 1743 spin_lock_irqsave(&display->irq.lock, irqflags); 1744 ilk_disable_display_irq(display, bit); 1745 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1746 } 1747 1748 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1749 bool enable) 1750 { 1751 struct intel_display *display = to_intel_display(intel_crtc); 1752 enum port port; 1753 1754 if (!(intel_crtc->mode_flags & 1755 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1756 return false; 1757 1758 /* for dual link cases we consider TE from slave */ 1759 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1760 port = PORT_B; 1761 else 1762 port = PORT_A; 1763 1764 intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT); 1765 1766 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1767 1768 return true; 1769 } 1770 1771 static void intel_display_vblank_notify_work(struct work_struct *work) 1772 { 1773 struct intel_display *display = 1774 container_of(work, typeof(*display), irq.vblank_notify_work); 1775 int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count); 1776 1777 intel_psr_notify_vblank_enable_disable(display, vblank_enable_count); 1778 } 1779 1780 int bdw_enable_vblank(struct drm_crtc *_crtc) 1781 { 1782 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1783 struct intel_display *display = to_intel_display(crtc); 1784 enum pipe pipe = crtc->pipe; 1785 unsigned long irqflags; 1786 1787 if (gen11_dsi_configure_te(crtc, true)) 1788 return 0; 1789 1790 if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0) 1791 schedule_work(&display->irq.vblank_notify_work); 1792 1793 spin_lock_irqsave(&display->irq.lock, irqflags); 1794 bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1795 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1796 1797 /* Even if there is no DMC, frame counter can get stuck when 1798 * PSR is active as no frames are generated, so check only for PSR. 1799 */ 1800 if (HAS_PSR(display)) 1801 drm_crtc_vblank_restore(&crtc->base); 1802 1803 return 0; 1804 } 1805 1806 void bdw_disable_vblank(struct drm_crtc *_crtc) 1807 { 1808 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1809 struct intel_display *display = to_intel_display(crtc); 1810 enum pipe pipe = crtc->pipe; 1811 unsigned long irqflags; 1812 1813 if (gen11_dsi_configure_te(crtc, false)) 1814 return; 1815 1816 spin_lock_irqsave(&display->irq.lock, irqflags); 1817 bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1818 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1819 1820 if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0) 1821 schedule_work(&display->irq.vblank_notify_work); 1822 } 1823 1824 static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe) 1825 { 1826 switch (pipe) { 1827 case PIPE_A: 1828 return SPRITEB_INVALID_GTT_STATUS | 1829 SPRITEA_INVALID_GTT_STATUS | 1830 PLANEA_INVALID_GTT_STATUS | 1831 CURSORA_INVALID_GTT_STATUS; 1832 case PIPE_B: 1833 return SPRITED_INVALID_GTT_STATUS | 1834 SPRITEC_INVALID_GTT_STATUS | 1835 PLANEB_INVALID_GTT_STATUS | 1836 CURSORB_INVALID_GTT_STATUS; 1837 case PIPE_C: 1838 return SPRITEF_INVALID_GTT_STATUS | 1839 SPRITEE_INVALID_GTT_STATUS | 1840 PLANEC_INVALID_GTT_STATUS | 1841 CURSORC_INVALID_GTT_STATUS; 1842 default: 1843 return 0; 1844 } 1845 } 1846 1847 static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = { 1848 { .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1849 { .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1850 { .fault = PLANEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1851 { .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1852 { .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1853 { .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1854 { .fault = PLANEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1855 { .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1856 { .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1857 { .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1858 { .fault = PLANEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1859 { .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1860 {} 1861 }; 1862 1863 static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt) 1864 { 1865 u32 status, enable, tmp; 1866 1867 tmp = intel_de_read(display, DPINVGTT); 1868 1869 enable = tmp >> 16; 1870 status = tmp & 0xffff; 1871 1872 /* 1873 * Despite what the docs claim, the status bits seem to get 1874 * stuck permanently (similar the old PGTBL_ER register), so 1875 * we have to disable and ignore them once set. They do get 1876 * reset if the display power well goes down, so no need to 1877 * track the enable mask explicitly. 1878 */ 1879 *dpinvgtt = status & enable; 1880 enable &= ~status; 1881 1882 /* customary ack+disable then re-enable to guarantee an edge */ 1883 intel_de_write(display, DPINVGTT, status); 1884 intel_de_write(display, DPINVGTT, enable << 16); 1885 } 1886 1887 static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt) 1888 { 1889 enum pipe pipe; 1890 1891 for_each_pipe(display, pipe) { 1892 u32 fault_errors; 1893 1894 fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe); 1895 if (fault_errors) 1896 intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers, 1897 pipe, fault_errors); 1898 } 1899 } 1900 1901 void vlv_display_error_irq_ack(struct intel_display *display, 1902 u32 *eir, u32 *dpinvgtt) 1903 { 1904 u32 emr; 1905 1906 *eir = intel_de_read(display, VLV_EIR); 1907 1908 if (*eir & VLV_ERROR_PAGE_TABLE) 1909 vlv_page_table_error_irq_ack(display, dpinvgtt); 1910 1911 intel_de_write(display, VLV_EIR, *eir); 1912 1913 /* 1914 * Toggle all EMR bits to make sure we get an edge 1915 * in the ISR master error bit if we don't clear 1916 * all the EIR bits. 1917 */ 1918 emr = intel_de_read(display, VLV_EMR); 1919 intel_de_write(display, VLV_EMR, 0xffffffff); 1920 intel_de_write(display, VLV_EMR, emr); 1921 } 1922 1923 void vlv_display_error_irq_handler(struct intel_display *display, 1924 u32 eir, u32 dpinvgtt) 1925 { 1926 drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir); 1927 1928 if (eir & VLV_ERROR_PAGE_TABLE) 1929 vlv_page_table_error_irq_handler(display, dpinvgtt); 1930 } 1931 1932 static void _vlv_display_irq_reset(struct intel_display *display) 1933 { 1934 if (display->platform.cherryview) 1935 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1936 else 1937 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1938 1939 error_reset(display, VLV_ERROR_REGS); 1940 1941 i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0); 1942 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1943 1944 i9xx_pipestat_irq_reset(display); 1945 1946 irq_reset(display, VLV_IRQ_REGS); 1947 display->irq.vlv_imr_mask = ~0u; 1948 } 1949 1950 void vlv_display_irq_reset(struct intel_display *display) 1951 { 1952 spin_lock_irq(&display->irq.lock); 1953 if (display->irq.vlv_display_irqs_enabled) 1954 _vlv_display_irq_reset(display); 1955 spin_unlock_irq(&display->irq.lock); 1956 } 1957 1958 void i9xx_display_irq_reset(struct intel_display *display) 1959 { 1960 if (HAS_HOTPLUG(display)) { 1961 i915_hotplug_interrupt_update(display, 0xffffffff, 0); 1962 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1963 } 1964 1965 i9xx_pipestat_irq_reset(display); 1966 } 1967 1968 u32 i9xx_display_irq_enable_mask(struct intel_display *display) 1969 { 1970 u32 enable_mask; 1971 1972 enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1973 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1974 1975 if (DISPLAY_VER(display) >= 3) 1976 enable_mask |= I915_ASLE_INTERRUPT; 1977 1978 if (HAS_HOTPLUG(display)) 1979 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1980 1981 return enable_mask; 1982 } 1983 1984 void i915_display_irq_postinstall(struct intel_display *display) 1985 { 1986 /* 1987 * Interrupt setup is already guaranteed to be single-threaded, this is 1988 * just to make the assert_spin_locked check happy. 1989 */ 1990 spin_lock_irq(&display->irq.lock); 1991 i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 1992 i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 1993 spin_unlock_irq(&display->irq.lock); 1994 1995 i915_enable_asle_pipestat(display); 1996 } 1997 1998 void i965_display_irq_postinstall(struct intel_display *display) 1999 { 2000 /* 2001 * Interrupt setup is already guaranteed to be single-threaded, this is 2002 * just to make the assert_spin_locked check happy. 2003 */ 2004 spin_lock_irq(&display->irq.lock); 2005 i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2006 i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 2007 i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 2008 spin_unlock_irq(&display->irq.lock); 2009 2010 i915_enable_asle_pipestat(display); 2011 } 2012 2013 static u32 vlv_error_mask(void) 2014 { 2015 /* TODO enable other errors too? */ 2016 return VLV_ERROR_PAGE_TABLE; 2017 } 2018 2019 static void _vlv_display_irq_postinstall(struct intel_display *display) 2020 { 2021 u32 pipestat_mask; 2022 u32 enable_mask; 2023 enum pipe pipe; 2024 2025 if (display->platform.cherryview) 2026 intel_de_write(display, DPINVGTT, 2027 DPINVGTT_STATUS_MASK_CHV | 2028 DPINVGTT_EN_MASK_CHV); 2029 else 2030 intel_de_write(display, DPINVGTT, 2031 DPINVGTT_STATUS_MASK_VLV | 2032 DPINVGTT_EN_MASK_VLV); 2033 2034 error_init(display, VLV_ERROR_REGS, ~vlv_error_mask()); 2035 2036 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 2037 2038 i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2039 for_each_pipe(display, pipe) 2040 i915_enable_pipestat(display, pipe, pipestat_mask); 2041 2042 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2043 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2044 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2045 I915_LPE_PIPE_A_INTERRUPT | 2046 I915_LPE_PIPE_B_INTERRUPT | 2047 I915_MASTER_ERROR_INTERRUPT; 2048 2049 if (display->platform.cherryview) 2050 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2051 I915_LPE_PIPE_C_INTERRUPT; 2052 2053 drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u); 2054 2055 display->irq.vlv_imr_mask = ~enable_mask; 2056 2057 irq_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask); 2058 } 2059 2060 void vlv_display_irq_postinstall(struct intel_display *display) 2061 { 2062 spin_lock_irq(&display->irq.lock); 2063 if (display->irq.vlv_display_irqs_enabled) 2064 _vlv_display_irq_postinstall(display); 2065 spin_unlock_irq(&display->irq.lock); 2066 } 2067 2068 static void ibx_display_irq_reset(struct intel_display *display) 2069 { 2070 if (HAS_PCH_NOP(display)) 2071 return; 2072 2073 irq_reset(display, SDE_IRQ_REGS); 2074 2075 if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display)) 2076 intel_de_write(display, SERR_INT, 0xffffffff); 2077 } 2078 2079 void ilk_display_irq_reset(struct intel_display *display) 2080 { 2081 irq_reset(display, DE_IRQ_REGS); 2082 display->irq.ilk_de_imr_mask = ~0u; 2083 2084 if (DISPLAY_VER(display) == 7) 2085 intel_de_write(display, GEN7_ERR_INT, 0xffffffff); 2086 2087 if (display->platform.haswell) { 2088 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 2089 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 2090 } 2091 2092 ibx_display_irq_reset(display); 2093 } 2094 2095 void gen8_display_irq_reset(struct intel_display *display) 2096 { 2097 enum pipe pipe; 2098 2099 if (!HAS_DISPLAY(display)) 2100 return; 2101 2102 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 2103 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 2104 2105 for_each_pipe(display, pipe) 2106 if (intel_display_power_is_enabled(display, 2107 POWER_DOMAIN_PIPE(pipe))) 2108 irq_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2109 2110 irq_reset(display, GEN8_DE_PORT_IRQ_REGS); 2111 irq_reset(display, GEN8_DE_MISC_IRQ_REGS); 2112 2113 if (HAS_PCH_SPLIT(display)) 2114 ibx_display_irq_reset(display); 2115 } 2116 2117 void gen11_display_irq_reset(struct intel_display *display) 2118 { 2119 enum pipe pipe; 2120 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2121 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2122 2123 if (!HAS_DISPLAY(display)) 2124 return; 2125 2126 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 2127 2128 if (DISPLAY_VER(display) >= 12) { 2129 enum transcoder trans; 2130 2131 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 2132 enum intel_display_power_domain domain; 2133 2134 domain = POWER_DOMAIN_TRANSCODER(trans); 2135 if (!intel_display_power_is_enabled(display, domain)) 2136 continue; 2137 2138 intel_de_write(display, 2139 TRANS_PSR_IMR(display, trans), 2140 0xffffffff); 2141 intel_de_write(display, 2142 TRANS_PSR_IIR(display, trans), 2143 0xffffffff); 2144 } 2145 } else { 2146 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 2147 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 2148 } 2149 2150 for_each_pipe(display, pipe) 2151 if (intel_display_power_is_enabled(display, 2152 POWER_DOMAIN_PIPE(pipe))) 2153 irq_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2154 2155 irq_reset(display, GEN8_DE_PORT_IRQ_REGS); 2156 irq_reset(display, GEN8_DE_MISC_IRQ_REGS); 2157 2158 if (DISPLAY_VER(display) >= 14) 2159 irq_reset(display, PICAINTERRUPT_IRQ_REGS); 2160 else 2161 irq_reset(display, GEN11_DE_HPD_IRQ_REGS); 2162 2163 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 2164 irq_reset(display, SDE_IRQ_REGS); 2165 } 2166 2167 void gen8_irq_power_well_post_enable(struct intel_display *display, 2168 u8 pipe_mask) 2169 { 2170 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2171 gen8_de_pipe_flip_done_mask(display); 2172 enum pipe pipe; 2173 2174 spin_lock_irq(&display->irq.lock); 2175 2176 if (!intel_parent_irq_enabled(display)) { 2177 spin_unlock_irq(&display->irq.lock); 2178 return; 2179 } 2180 2181 for_each_pipe_masked(display, pipe, pipe_mask) 2182 irq_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2183 display->irq.de_pipe_imr_mask[pipe], 2184 ~display->irq.de_pipe_imr_mask[pipe] | extra_ier); 2185 2186 spin_unlock_irq(&display->irq.lock); 2187 } 2188 2189 void gen8_irq_power_well_pre_disable(struct intel_display *display, 2190 u8 pipe_mask) 2191 { 2192 enum pipe pipe; 2193 2194 spin_lock_irq(&display->irq.lock); 2195 2196 if (!intel_parent_irq_enabled(display)) { 2197 spin_unlock_irq(&display->irq.lock); 2198 return; 2199 } 2200 2201 for_each_pipe_masked(display, pipe, pipe_mask) 2202 irq_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2203 2204 spin_unlock_irq(&display->irq.lock); 2205 2206 /* make sure we're done processing display irqs */ 2207 intel_parent_irq_synchronize(display); 2208 } 2209 2210 /* 2211 * SDEIER is also touched by the interrupt handler to work around missed PCH 2212 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2213 * instead we unconditionally enable all PCH interrupt sources here, but then 2214 * only unmask them as needed with SDEIMR. 2215 * 2216 * Note that we currently do this after installing the interrupt handler, 2217 * but before we enable the master interrupt. That should be sufficient 2218 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 2219 * interrupts could still race. 2220 */ 2221 static void ibx_irq_postinstall(struct intel_display *display) 2222 { 2223 u32 mask; 2224 2225 if (HAS_PCH_NOP(display)) 2226 return; 2227 2228 if (HAS_PCH_IBX(display)) 2229 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2230 else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display)) 2231 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2232 else 2233 mask = SDE_GMBUS_CPT; 2234 2235 irq_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2236 } 2237 2238 void valleyview_enable_display_irqs(struct intel_display *display) 2239 { 2240 spin_lock_irq(&display->irq.lock); 2241 2242 if (display->irq.vlv_display_irqs_enabled) 2243 goto out; 2244 2245 display->irq.vlv_display_irqs_enabled = true; 2246 2247 if (intel_parent_irq_enabled(display)) { 2248 _vlv_display_irq_reset(display); 2249 _vlv_display_irq_postinstall(display); 2250 } 2251 2252 out: 2253 spin_unlock_irq(&display->irq.lock); 2254 } 2255 2256 void valleyview_disable_display_irqs(struct intel_display *display) 2257 { 2258 spin_lock_irq(&display->irq.lock); 2259 2260 if (!display->irq.vlv_display_irqs_enabled) 2261 goto out; 2262 2263 display->irq.vlv_display_irqs_enabled = false; 2264 2265 if (intel_parent_irq_enabled(display)) 2266 _vlv_display_irq_reset(display); 2267 out: 2268 spin_unlock_irq(&display->irq.lock); 2269 } 2270 2271 void ilk_de_irq_postinstall(struct intel_display *display) 2272 { 2273 u32 display_mask, extra_mask; 2274 2275 if (DISPLAY_VER(display) >= 7) { 2276 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2277 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 2278 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2279 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 2280 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 2281 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 2282 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 2283 DE_DP_A_HOTPLUG_IVB); 2284 } else { 2285 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | 2286 DE_PCH_EVENT | DE_GTT_FAULT | 2287 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 2288 DE_PIPEA_CRC_DONE | DE_POISON); 2289 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 2290 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 2291 DE_PLANE_FLIP_DONE(PLANE_A) | 2292 DE_PLANE_FLIP_DONE(PLANE_B) | 2293 DE_DP_A_HOTPLUG); 2294 } 2295 2296 if (display->platform.haswell) { 2297 assert_iir_is_zero(display, EDP_PSR_IIR); 2298 display_mask |= DE_EDP_PSR_INT_HSW; 2299 } 2300 2301 if (display->platform.ironlake && display->platform.mobile) 2302 extra_mask |= DE_PCU_EVENT; 2303 2304 display->irq.ilk_de_imr_mask = ~display_mask; 2305 2306 ibx_irq_postinstall(display); 2307 2308 irq_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask, 2309 display_mask | extra_mask); 2310 } 2311 2312 static void mtp_irq_postinstall(struct intel_display *display); 2313 static void icp_irq_postinstall(struct intel_display *display); 2314 2315 void gen8_de_irq_postinstall(struct intel_display *display) 2316 { 2317 u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) | 2318 GEN8_PIPE_CDCLK_CRC_DONE; 2319 u32 de_pipe_enables; 2320 u32 de_port_masked = gen8_de_port_aux_mask(display); 2321 u32 de_port_enables; 2322 u32 de_misc_masked = GEN8_DE_EDP_PSR; 2323 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2324 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2325 enum pipe pipe; 2326 2327 if (!HAS_DISPLAY(display)) 2328 return; 2329 2330 if (DISPLAY_VER(display) >= 14) 2331 mtp_irq_postinstall(display); 2332 else if (INTEL_PCH_TYPE(display) >= PCH_ICP) 2333 icp_irq_postinstall(display); 2334 else if (HAS_PCH_SPLIT(display)) 2335 ibx_irq_postinstall(display); 2336 2337 if (DISPLAY_VER(display) < 11) 2338 de_misc_masked |= GEN8_DE_MISC_GSE; 2339 2340 if (display->platform.geminilake || display->platform.broxton) 2341 de_port_masked |= BXT_DE_PORT_GMBUS; 2342 2343 if (DISPLAY_VER(display) >= 14) { 2344 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 2345 XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; 2346 } else if (DISPLAY_VER(display) >= 11) { 2347 enum port port; 2348 2349 if (intel_bios_is_dsi_present(display, &port)) 2350 de_port_masked |= DSI0_TE | DSI1_TE; 2351 } 2352 2353 if (HAS_DBUF_OVERLAP_DETECTION(display)) 2354 de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; 2355 2356 if (HAS_DSB(display)) 2357 de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | 2358 GEN12_DSB_INT(INTEL_DSB_1) | 2359 GEN12_DSB_INT(INTEL_DSB_2); 2360 2361 /* TODO figure PIPEDMC interrupts for pre-LNL */ 2362 if (DISPLAY_VER(display) >= 20) 2363 de_pipe_masked |= GEN12_PIPEDMC_INTERRUPT; 2364 2365 de_pipe_enables = de_pipe_masked | 2366 GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2367 gen8_de_pipe_flip_done_mask(display); 2368 2369 de_port_enables = de_port_masked; 2370 if (display->platform.geminilake || display->platform.broxton) 2371 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 2372 else if (display->platform.broadwell) 2373 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 2374 2375 if (DISPLAY_VER(display) >= 12) { 2376 enum transcoder trans; 2377 2378 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 2379 enum intel_display_power_domain domain; 2380 2381 domain = POWER_DOMAIN_TRANSCODER(trans); 2382 if (!intel_display_power_is_enabled(display, domain)) 2383 continue; 2384 2385 assert_iir_is_zero(display, TRANS_PSR_IIR(display, trans)); 2386 } 2387 } else { 2388 assert_iir_is_zero(display, EDP_PSR_IIR); 2389 } 2390 2391 for_each_pipe(display, pipe) { 2392 display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked; 2393 2394 if (intel_display_power_is_enabled(display, 2395 POWER_DOMAIN_PIPE(pipe))) 2396 irq_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2397 display->irq.de_pipe_imr_mask[pipe], 2398 de_pipe_enables); 2399 } 2400 2401 irq_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables); 2402 irq_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked); 2403 2404 if (IS_DISPLAY_VER(display, 11, 13)) { 2405 u32 de_hpd_masked = 0; 2406 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 2407 GEN11_DE_TBT_HOTPLUG_MASK; 2408 2409 irq_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, de_hpd_enables); 2410 } 2411 } 2412 2413 u32 xelpdp_pica_aux_mask(struct intel_display *display) 2414 { 2415 u32 mask = XELPDP_AUX_TC_MASK; 2416 2417 if (DISPLAY_VER(display) >= 20) 2418 mask |= XE2LPD_AUX_DDI_MASK; 2419 2420 return mask; 2421 } 2422 2423 static void mtp_irq_postinstall(struct intel_display *display) 2424 { 2425 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 2426 u32 de_hpd_mask = xelpdp_pica_aux_mask(display); 2427 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 2428 XELPDP_TBT_HOTPLUG_MASK; 2429 2430 irq_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, de_hpd_enables); 2431 2432 irq_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); 2433 } 2434 2435 static void icp_irq_postinstall(struct intel_display *display) 2436 { 2437 u32 mask = SDE_GMBUS_ICP; 2438 2439 irq_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2440 } 2441 2442 void gen11_de_irq_postinstall(struct intel_display *display) 2443 { 2444 if (!HAS_DISPLAY(display)) 2445 return; 2446 2447 gen8_de_irq_postinstall(display); 2448 2449 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2450 } 2451 2452 void dg1_de_irq_postinstall(struct intel_display *display) 2453 { 2454 if (!HAS_DISPLAY(display)) 2455 return; 2456 2457 gen8_de_irq_postinstall(display); 2458 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2459 } 2460 2461 void intel_display_irq_init(struct intel_display *display) 2462 { 2463 spin_lock_init(&display->irq.lock); 2464 2465 display->drm->vblank_disable_immediate = true; 2466 2467 intel_hotplug_irq_init(display); 2468 2469 INIT_WORK(&display->irq.vblank_notify_work, 2470 intel_display_vblank_notify_work); 2471 } 2472 2473 struct intel_display_irq_snapshot { 2474 u32 derrmr; 2475 }; 2476 2477 struct intel_display_irq_snapshot * 2478 intel_display_irq_snapshot_capture(struct intel_display *display) 2479 { 2480 struct intel_display_irq_snapshot *snapshot; 2481 2482 snapshot = kzalloc_obj(*snapshot, GFP_ATOMIC); 2483 if (!snapshot) 2484 return NULL; 2485 2486 if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display)) 2487 snapshot->derrmr = intel_de_read(display, DERRMR); 2488 2489 return snapshot; 2490 } 2491 2492 void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, 2493 struct drm_printer *p) 2494 { 2495 if (!snapshot) 2496 return; 2497 2498 drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr); 2499 } 2500