1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN3_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 #define GEN2_IRQ_RESET(type) do { \ 140 I915_WRITE16(type##IMR, 0xffff); \ 141 POSTING_READ16(type##IMR); \ 142 I915_WRITE16(type##IER, 0); \ 143 I915_WRITE16(type##IIR, 0xffff); \ 144 POSTING_READ16(type##IIR); \ 145 I915_WRITE16(type##IIR, 0xffff); \ 146 POSTING_READ16(type##IIR); \ 147 } while (0) 148 149 /* 150 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 151 */ 152 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 153 i915_reg_t reg) 154 { 155 u32 val = I915_READ(reg); 156 157 if (val == 0) 158 return; 159 160 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 161 i915_mmio_reg_offset(reg), val); 162 I915_WRITE(reg, 0xffffffff); 163 POSTING_READ(reg); 164 I915_WRITE(reg, 0xffffffff); 165 POSTING_READ(reg); 166 } 167 168 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u16 val = I915_READ16(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE16(reg, 0xffff); 179 POSTING_READ16(reg); 180 I915_WRITE16(reg, 0xffff); 181 POSTING_READ16(reg); 182 } 183 184 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 185 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 186 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 187 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 188 POSTING_READ(GEN8_##type##_IMR(which)); \ 189 } while (0) 190 191 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 192 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 193 I915_WRITE(type##IER, (ier_val)); \ 194 I915_WRITE(type##IMR, (imr_val)); \ 195 POSTING_READ(type##IMR); \ 196 } while (0) 197 198 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 199 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 200 I915_WRITE16(type##IER, (ier_val)); \ 201 I915_WRITE16(type##IMR, (imr_val)); \ 202 POSTING_READ16(type##IMR); \ 203 } while (0) 204 205 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 206 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 207 208 /* For display hotplug interrupt */ 209 static inline void 210 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 211 uint32_t mask, 212 uint32_t bits) 213 { 214 uint32_t val; 215 216 lockdep_assert_held(&dev_priv->irq_lock); 217 WARN_ON(bits & ~mask); 218 219 val = I915_READ(PORT_HOTPLUG_EN); 220 val &= ~mask; 221 val |= bits; 222 I915_WRITE(PORT_HOTPLUG_EN, val); 223 } 224 225 /** 226 * i915_hotplug_interrupt_update - update hotplug interrupt enable 227 * @dev_priv: driver private 228 * @mask: bits to update 229 * @bits: bits to enable 230 * NOTE: the HPD enable bits are modified both inside and outside 231 * of an interrupt context. To avoid that read-modify-write cycles 232 * interfer, these bits are protected by a spinlock. Since this 233 * function is usually not called from a context where the lock is 234 * held already, this function acquires the lock itself. A non-locking 235 * version is also available. 236 */ 237 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 238 uint32_t mask, 239 uint32_t bits) 240 { 241 spin_lock_irq(&dev_priv->irq_lock); 242 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 243 spin_unlock_irq(&dev_priv->irq_lock); 244 } 245 246 static u32 247 gen11_gt_engine_identity(struct drm_i915_private * const i915, 248 const unsigned int bank, const unsigned int bit); 249 250 bool gen11_reset_one_iir(struct drm_i915_private * const i915, 251 const unsigned int bank, 252 const unsigned int bit) 253 { 254 void __iomem * const regs = i915->regs; 255 u32 dw; 256 257 lockdep_assert_held(&i915->irq_lock); 258 259 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 260 if (dw & BIT(bit)) { 261 /* 262 * According to the BSpec, DW_IIR bits cannot be cleared without 263 * first servicing the Selector & Shared IIR registers. 264 */ 265 gen11_gt_engine_identity(i915, bank, bit); 266 267 /* 268 * We locked GT INT DW by reading it. If we want to (try 269 * to) recover from this succesfully, we need to clear 270 * our bit, otherwise we are locking the register for 271 * everybody. 272 */ 273 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 274 275 return true; 276 } 277 278 return false; 279 } 280 281 /** 282 * ilk_update_display_irq - update DEIMR 283 * @dev_priv: driver private 284 * @interrupt_mask: mask of interrupt bits to update 285 * @enabled_irq_mask: mask of interrupt bits to enable 286 */ 287 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 288 uint32_t interrupt_mask, 289 uint32_t enabled_irq_mask) 290 { 291 uint32_t new_val; 292 293 lockdep_assert_held(&dev_priv->irq_lock); 294 295 WARN_ON(enabled_irq_mask & ~interrupt_mask); 296 297 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 298 return; 299 300 new_val = dev_priv->irq_mask; 301 new_val &= ~interrupt_mask; 302 new_val |= (~enabled_irq_mask & interrupt_mask); 303 304 if (new_val != dev_priv->irq_mask) { 305 dev_priv->irq_mask = new_val; 306 I915_WRITE(DEIMR, dev_priv->irq_mask); 307 POSTING_READ(DEIMR); 308 } 309 } 310 311 /** 312 * ilk_update_gt_irq - update GTIMR 313 * @dev_priv: driver private 314 * @interrupt_mask: mask of interrupt bits to update 315 * @enabled_irq_mask: mask of interrupt bits to enable 316 */ 317 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 318 uint32_t interrupt_mask, 319 uint32_t enabled_irq_mask) 320 { 321 lockdep_assert_held(&dev_priv->irq_lock); 322 323 WARN_ON(enabled_irq_mask & ~interrupt_mask); 324 325 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 326 return; 327 328 dev_priv->gt_irq_mask &= ~interrupt_mask; 329 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 330 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 331 } 332 333 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 334 { 335 ilk_update_gt_irq(dev_priv, mask, mask); 336 POSTING_READ_FW(GTIMR); 337 } 338 339 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 340 { 341 ilk_update_gt_irq(dev_priv, mask, 0); 342 } 343 344 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 345 { 346 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 347 348 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 349 } 350 351 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 352 { 353 if (INTEL_GEN(dev_priv) >= 11) 354 return GEN11_GPM_WGBOXPERF_INTR_MASK; 355 else if (INTEL_GEN(dev_priv) >= 8) 356 return GEN8_GT_IMR(2); 357 else 358 return GEN6_PMIMR; 359 } 360 361 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 362 { 363 if (INTEL_GEN(dev_priv) >= 11) 364 return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 365 else if (INTEL_GEN(dev_priv) >= 8) 366 return GEN8_GT_IER(2); 367 else 368 return GEN6_PMIER; 369 } 370 371 /** 372 * snb_update_pm_irq - update GEN6_PMIMR 373 * @dev_priv: driver private 374 * @interrupt_mask: mask of interrupt bits to update 375 * @enabled_irq_mask: mask of interrupt bits to enable 376 */ 377 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 378 uint32_t interrupt_mask, 379 uint32_t enabled_irq_mask) 380 { 381 uint32_t new_val; 382 383 WARN_ON(enabled_irq_mask & ~interrupt_mask); 384 385 lockdep_assert_held(&dev_priv->irq_lock); 386 387 new_val = dev_priv->pm_imr; 388 new_val &= ~interrupt_mask; 389 new_val |= (~enabled_irq_mask & interrupt_mask); 390 391 if (new_val != dev_priv->pm_imr) { 392 dev_priv->pm_imr = new_val; 393 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 394 POSTING_READ(gen6_pm_imr(dev_priv)); 395 } 396 } 397 398 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 399 { 400 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 401 return; 402 403 snb_update_pm_irq(dev_priv, mask, mask); 404 } 405 406 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 407 { 408 snb_update_pm_irq(dev_priv, mask, 0); 409 } 410 411 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 412 { 413 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 414 return; 415 416 __gen6_mask_pm_irq(dev_priv, mask); 417 } 418 419 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 420 { 421 i915_reg_t reg = gen6_pm_iir(dev_priv); 422 423 lockdep_assert_held(&dev_priv->irq_lock); 424 425 I915_WRITE(reg, reset_mask); 426 I915_WRITE(reg, reset_mask); 427 POSTING_READ(reg); 428 } 429 430 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 431 { 432 lockdep_assert_held(&dev_priv->irq_lock); 433 434 dev_priv->pm_ier |= enable_mask; 435 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 436 gen6_unmask_pm_irq(dev_priv, enable_mask); 437 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 438 } 439 440 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 441 { 442 lockdep_assert_held(&dev_priv->irq_lock); 443 444 dev_priv->pm_ier &= ~disable_mask; 445 __gen6_mask_pm_irq(dev_priv, disable_mask); 446 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 447 /* though a barrier is missing here, but don't really need a one */ 448 } 449 450 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 451 { 452 spin_lock_irq(&dev_priv->irq_lock); 453 454 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 455 ; 456 457 dev_priv->gt_pm.rps.pm_iir = 0; 458 459 spin_unlock_irq(&dev_priv->irq_lock); 460 } 461 462 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 463 { 464 spin_lock_irq(&dev_priv->irq_lock); 465 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 466 dev_priv->gt_pm.rps.pm_iir = 0; 467 spin_unlock_irq(&dev_priv->irq_lock); 468 } 469 470 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 471 { 472 struct intel_rps *rps = &dev_priv->gt_pm.rps; 473 474 if (READ_ONCE(rps->interrupts_enabled)) 475 return; 476 477 spin_lock_irq(&dev_priv->irq_lock); 478 WARN_ON_ONCE(rps->pm_iir); 479 480 if (INTEL_GEN(dev_priv) >= 11) 481 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 482 else 483 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 484 485 rps->interrupts_enabled = true; 486 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 487 488 spin_unlock_irq(&dev_priv->irq_lock); 489 } 490 491 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 492 { 493 struct intel_rps *rps = &dev_priv->gt_pm.rps; 494 495 if (!READ_ONCE(rps->interrupts_enabled)) 496 return; 497 498 spin_lock_irq(&dev_priv->irq_lock); 499 rps->interrupts_enabled = false; 500 501 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 502 503 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 504 505 spin_unlock_irq(&dev_priv->irq_lock); 506 synchronize_irq(dev_priv->drm.irq); 507 508 /* Now that we will not be generating any more work, flush any 509 * outstanding tasks. As we are called on the RPS idle path, 510 * we will reset the GPU to minimum frequencies, so the current 511 * state of the worker can be discarded. 512 */ 513 cancel_work_sync(&rps->work); 514 if (INTEL_GEN(dev_priv) >= 11) 515 gen11_reset_rps_interrupts(dev_priv); 516 else 517 gen6_reset_rps_interrupts(dev_priv); 518 } 519 520 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 521 { 522 assert_rpm_wakelock_held(dev_priv); 523 524 spin_lock_irq(&dev_priv->irq_lock); 525 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 526 spin_unlock_irq(&dev_priv->irq_lock); 527 } 528 529 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 530 { 531 assert_rpm_wakelock_held(dev_priv); 532 533 spin_lock_irq(&dev_priv->irq_lock); 534 if (!dev_priv->guc.interrupts_enabled) { 535 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 536 dev_priv->pm_guc_events); 537 dev_priv->guc.interrupts_enabled = true; 538 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 539 } 540 spin_unlock_irq(&dev_priv->irq_lock); 541 } 542 543 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 544 { 545 assert_rpm_wakelock_held(dev_priv); 546 547 spin_lock_irq(&dev_priv->irq_lock); 548 dev_priv->guc.interrupts_enabled = false; 549 550 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 551 552 spin_unlock_irq(&dev_priv->irq_lock); 553 synchronize_irq(dev_priv->drm.irq); 554 555 gen9_reset_guc_interrupts(dev_priv); 556 } 557 558 /** 559 * bdw_update_port_irq - update DE port interrupt 560 * @dev_priv: driver private 561 * @interrupt_mask: mask of interrupt bits to update 562 * @enabled_irq_mask: mask of interrupt bits to enable 563 */ 564 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 565 uint32_t interrupt_mask, 566 uint32_t enabled_irq_mask) 567 { 568 uint32_t new_val; 569 uint32_t old_val; 570 571 lockdep_assert_held(&dev_priv->irq_lock); 572 573 WARN_ON(enabled_irq_mask & ~interrupt_mask); 574 575 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 576 return; 577 578 old_val = I915_READ(GEN8_DE_PORT_IMR); 579 580 new_val = old_val; 581 new_val &= ~interrupt_mask; 582 new_val |= (~enabled_irq_mask & interrupt_mask); 583 584 if (new_val != old_val) { 585 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 586 POSTING_READ(GEN8_DE_PORT_IMR); 587 } 588 } 589 590 /** 591 * bdw_update_pipe_irq - update DE pipe interrupt 592 * @dev_priv: driver private 593 * @pipe: pipe whose interrupt to update 594 * @interrupt_mask: mask of interrupt bits to update 595 * @enabled_irq_mask: mask of interrupt bits to enable 596 */ 597 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 598 enum pipe pipe, 599 uint32_t interrupt_mask, 600 uint32_t enabled_irq_mask) 601 { 602 uint32_t new_val; 603 604 lockdep_assert_held(&dev_priv->irq_lock); 605 606 WARN_ON(enabled_irq_mask & ~interrupt_mask); 607 608 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 609 return; 610 611 new_val = dev_priv->de_irq_mask[pipe]; 612 new_val &= ~interrupt_mask; 613 new_val |= (~enabled_irq_mask & interrupt_mask); 614 615 if (new_val != dev_priv->de_irq_mask[pipe]) { 616 dev_priv->de_irq_mask[pipe] = new_val; 617 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 618 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 619 } 620 } 621 622 /** 623 * ibx_display_interrupt_update - update SDEIMR 624 * @dev_priv: driver private 625 * @interrupt_mask: mask of interrupt bits to update 626 * @enabled_irq_mask: mask of interrupt bits to enable 627 */ 628 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 629 uint32_t interrupt_mask, 630 uint32_t enabled_irq_mask) 631 { 632 uint32_t sdeimr = I915_READ(SDEIMR); 633 sdeimr &= ~interrupt_mask; 634 sdeimr |= (~enabled_irq_mask & interrupt_mask); 635 636 WARN_ON(enabled_irq_mask & ~interrupt_mask); 637 638 lockdep_assert_held(&dev_priv->irq_lock); 639 640 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 641 return; 642 643 I915_WRITE(SDEIMR, sdeimr); 644 POSTING_READ(SDEIMR); 645 } 646 647 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 648 enum pipe pipe) 649 { 650 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 651 u32 enable_mask = status_mask << 16; 652 653 lockdep_assert_held(&dev_priv->irq_lock); 654 655 if (INTEL_GEN(dev_priv) < 5) 656 goto out; 657 658 /* 659 * On pipe A we don't support the PSR interrupt yet, 660 * on pipe B and C the same bit MBZ. 661 */ 662 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 663 return 0; 664 /* 665 * On pipe B and C we don't support the PSR interrupt yet, on pipe 666 * A the same bit is for perf counters which we don't use either. 667 */ 668 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 669 return 0; 670 671 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 672 SPRITE0_FLIP_DONE_INT_EN_VLV | 673 SPRITE1_FLIP_DONE_INT_EN_VLV); 674 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 675 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 676 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 677 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 678 679 out: 680 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 681 status_mask & ~PIPESTAT_INT_STATUS_MASK, 682 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 683 pipe_name(pipe), enable_mask, status_mask); 684 685 return enable_mask; 686 } 687 688 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 689 enum pipe pipe, u32 status_mask) 690 { 691 i915_reg_t reg = PIPESTAT(pipe); 692 u32 enable_mask; 693 694 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 695 "pipe %c: status_mask=0x%x\n", 696 pipe_name(pipe), status_mask); 697 698 lockdep_assert_held(&dev_priv->irq_lock); 699 WARN_ON(!intel_irqs_enabled(dev_priv)); 700 701 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 702 return; 703 704 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 705 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 706 707 I915_WRITE(reg, enable_mask | status_mask); 708 POSTING_READ(reg); 709 } 710 711 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 712 enum pipe pipe, u32 status_mask) 713 { 714 i915_reg_t reg = PIPESTAT(pipe); 715 u32 enable_mask; 716 717 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 718 "pipe %c: status_mask=0x%x\n", 719 pipe_name(pipe), status_mask); 720 721 lockdep_assert_held(&dev_priv->irq_lock); 722 WARN_ON(!intel_irqs_enabled(dev_priv)); 723 724 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 725 return; 726 727 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 728 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 729 730 I915_WRITE(reg, enable_mask | status_mask); 731 POSTING_READ(reg); 732 } 733 734 /** 735 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 736 * @dev_priv: i915 device private 737 */ 738 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 739 { 740 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 741 return; 742 743 spin_lock_irq(&dev_priv->irq_lock); 744 745 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 746 if (INTEL_GEN(dev_priv) >= 4) 747 i915_enable_pipestat(dev_priv, PIPE_A, 748 PIPE_LEGACY_BLC_EVENT_STATUS); 749 750 spin_unlock_irq(&dev_priv->irq_lock); 751 } 752 753 /* 754 * This timing diagram depicts the video signal in and 755 * around the vertical blanking period. 756 * 757 * Assumptions about the fictitious mode used in this example: 758 * vblank_start >= 3 759 * vsync_start = vblank_start + 1 760 * vsync_end = vblank_start + 2 761 * vtotal = vblank_start + 3 762 * 763 * start of vblank: 764 * latch double buffered registers 765 * increment frame counter (ctg+) 766 * generate start of vblank interrupt (gen4+) 767 * | 768 * | frame start: 769 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 770 * | may be shifted forward 1-3 extra lines via PIPECONF 771 * | | 772 * | | start of vsync: 773 * | | generate vsync interrupt 774 * | | | 775 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 776 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 777 * ----va---> <-----------------vb--------------------> <--------va------------- 778 * | | <----vs-----> | 779 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 780 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 781 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 782 * | | | 783 * last visible pixel first visible pixel 784 * | increment frame counter (gen3/4) 785 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 786 * 787 * x = horizontal active 788 * _ = horizontal blanking 789 * hs = horizontal sync 790 * va = vertical active 791 * vb = vertical blanking 792 * vs = vertical sync 793 * vbs = vblank_start (number) 794 * 795 * Summary: 796 * - most events happen at the start of horizontal sync 797 * - frame start happens at the start of horizontal blank, 1-4 lines 798 * (depending on PIPECONF settings) after the start of vblank 799 * - gen3/4 pixel and frame counter are synchronized with the start 800 * of horizontal active on the first line of vertical active 801 */ 802 803 /* Called from drm generic code, passed a 'crtc', which 804 * we use as a pipe index 805 */ 806 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 807 { 808 struct drm_i915_private *dev_priv = to_i915(dev); 809 i915_reg_t high_frame, low_frame; 810 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 811 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 812 unsigned long irqflags; 813 814 htotal = mode->crtc_htotal; 815 hsync_start = mode->crtc_hsync_start; 816 vbl_start = mode->crtc_vblank_start; 817 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 818 vbl_start = DIV_ROUND_UP(vbl_start, 2); 819 820 /* Convert to pixel count */ 821 vbl_start *= htotal; 822 823 /* Start of vblank event occurs at start of hsync */ 824 vbl_start -= htotal - hsync_start; 825 826 high_frame = PIPEFRAME(pipe); 827 low_frame = PIPEFRAMEPIXEL(pipe); 828 829 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 830 831 /* 832 * High & low register fields aren't synchronized, so make sure 833 * we get a low value that's stable across two reads of the high 834 * register. 835 */ 836 do { 837 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 838 low = I915_READ_FW(low_frame); 839 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 840 } while (high1 != high2); 841 842 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 843 844 high1 >>= PIPE_FRAME_HIGH_SHIFT; 845 pixel = low & PIPE_PIXEL_MASK; 846 low >>= PIPE_FRAME_LOW_SHIFT; 847 848 /* 849 * The frame counter increments at beginning of active. 850 * Cook up a vblank counter by also checking the pixel 851 * counter against vblank start. 852 */ 853 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 854 } 855 856 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 857 { 858 struct drm_i915_private *dev_priv = to_i915(dev); 859 860 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 861 } 862 863 /* 864 * On certain encoders on certain platforms, pipe 865 * scanline register will not work to get the scanline, 866 * since the timings are driven from the PORT or issues 867 * with scanline register updates. 868 * This function will use Framestamp and current 869 * timestamp registers to calculate the scanline. 870 */ 871 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 872 { 873 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 874 struct drm_vblank_crtc *vblank = 875 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 876 const struct drm_display_mode *mode = &vblank->hwmode; 877 u32 vblank_start = mode->crtc_vblank_start; 878 u32 vtotal = mode->crtc_vtotal; 879 u32 htotal = mode->crtc_htotal; 880 u32 clock = mode->crtc_clock; 881 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 882 883 /* 884 * To avoid the race condition where we might cross into the 885 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 886 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 887 * during the same frame. 888 */ 889 do { 890 /* 891 * This field provides read back of the display 892 * pipe frame time stamp. The time stamp value 893 * is sampled at every start of vertical blank. 894 */ 895 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 896 897 /* 898 * The TIMESTAMP_CTR register has the current 899 * time stamp value. 900 */ 901 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 902 903 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 904 } while (scan_post_time != scan_prev_time); 905 906 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 907 clock), 1000 * htotal); 908 scanline = min(scanline, vtotal - 1); 909 scanline = (scanline + vblank_start) % vtotal; 910 911 return scanline; 912 } 913 914 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 915 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 916 { 917 struct drm_device *dev = crtc->base.dev; 918 struct drm_i915_private *dev_priv = to_i915(dev); 919 const struct drm_display_mode *mode; 920 struct drm_vblank_crtc *vblank; 921 enum pipe pipe = crtc->pipe; 922 int position, vtotal; 923 924 if (!crtc->active) 925 return -1; 926 927 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 928 mode = &vblank->hwmode; 929 930 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 931 return __intel_get_crtc_scanline_from_timestamp(crtc); 932 933 vtotal = mode->crtc_vtotal; 934 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 935 vtotal /= 2; 936 937 if (IS_GEN2(dev_priv)) 938 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 939 else 940 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 941 942 /* 943 * On HSW, the DSL reg (0x70000) appears to return 0 if we 944 * read it just before the start of vblank. So try it again 945 * so we don't accidentally end up spanning a vblank frame 946 * increment, causing the pipe_update_end() code to squak at us. 947 * 948 * The nature of this problem means we can't simply check the ISR 949 * bit and return the vblank start value; nor can we use the scanline 950 * debug register in the transcoder as it appears to have the same 951 * problem. We may need to extend this to include other platforms, 952 * but so far testing only shows the problem on HSW. 953 */ 954 if (HAS_DDI(dev_priv) && !position) { 955 int i, temp; 956 957 for (i = 0; i < 100; i++) { 958 udelay(1); 959 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 960 if (temp != position) { 961 position = temp; 962 break; 963 } 964 } 965 } 966 967 /* 968 * See update_scanline_offset() for the details on the 969 * scanline_offset adjustment. 970 */ 971 return (position + crtc->scanline_offset) % vtotal; 972 } 973 974 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 975 bool in_vblank_irq, int *vpos, int *hpos, 976 ktime_t *stime, ktime_t *etime, 977 const struct drm_display_mode *mode) 978 { 979 struct drm_i915_private *dev_priv = to_i915(dev); 980 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 981 pipe); 982 int position; 983 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 984 unsigned long irqflags; 985 986 if (WARN_ON(!mode->crtc_clock)) { 987 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 988 "pipe %c\n", pipe_name(pipe)); 989 return false; 990 } 991 992 htotal = mode->crtc_htotal; 993 hsync_start = mode->crtc_hsync_start; 994 vtotal = mode->crtc_vtotal; 995 vbl_start = mode->crtc_vblank_start; 996 vbl_end = mode->crtc_vblank_end; 997 998 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 999 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1000 vbl_end /= 2; 1001 vtotal /= 2; 1002 } 1003 1004 /* 1005 * Lock uncore.lock, as we will do multiple timing critical raw 1006 * register reads, potentially with preemption disabled, so the 1007 * following code must not block on uncore.lock. 1008 */ 1009 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1010 1011 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1012 1013 /* Get optional system timestamp before query. */ 1014 if (stime) 1015 *stime = ktime_get(); 1016 1017 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1018 /* No obvious pixelcount register. Only query vertical 1019 * scanout position from Display scan line register. 1020 */ 1021 position = __intel_get_crtc_scanline(intel_crtc); 1022 } else { 1023 /* Have access to pixelcount since start of frame. 1024 * We can split this into vertical and horizontal 1025 * scanout position. 1026 */ 1027 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1028 1029 /* convert to pixel counts */ 1030 vbl_start *= htotal; 1031 vbl_end *= htotal; 1032 vtotal *= htotal; 1033 1034 /* 1035 * In interlaced modes, the pixel counter counts all pixels, 1036 * so one field will have htotal more pixels. In order to avoid 1037 * the reported position from jumping backwards when the pixel 1038 * counter is beyond the length of the shorter field, just 1039 * clamp the position the length of the shorter field. This 1040 * matches how the scanline counter based position works since 1041 * the scanline counter doesn't count the two half lines. 1042 */ 1043 if (position >= vtotal) 1044 position = vtotal - 1; 1045 1046 /* 1047 * Start of vblank interrupt is triggered at start of hsync, 1048 * just prior to the first active line of vblank. However we 1049 * consider lines to start at the leading edge of horizontal 1050 * active. So, should we get here before we've crossed into 1051 * the horizontal active of the first line in vblank, we would 1052 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1053 * always add htotal-hsync_start to the current pixel position. 1054 */ 1055 position = (position + htotal - hsync_start) % vtotal; 1056 } 1057 1058 /* Get optional system timestamp after query. */ 1059 if (etime) 1060 *etime = ktime_get(); 1061 1062 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1063 1064 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1065 1066 /* 1067 * While in vblank, position will be negative 1068 * counting up towards 0 at vbl_end. And outside 1069 * vblank, position will be positive counting 1070 * up since vbl_end. 1071 */ 1072 if (position >= vbl_start) 1073 position -= vbl_end; 1074 else 1075 position += vtotal - vbl_end; 1076 1077 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1078 *vpos = position; 1079 *hpos = 0; 1080 } else { 1081 *vpos = position / htotal; 1082 *hpos = position - (*vpos * htotal); 1083 } 1084 1085 return true; 1086 } 1087 1088 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1089 { 1090 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1091 unsigned long irqflags; 1092 int position; 1093 1094 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1095 position = __intel_get_crtc_scanline(crtc); 1096 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1097 1098 return position; 1099 } 1100 1101 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1102 { 1103 u32 busy_up, busy_down, max_avg, min_avg; 1104 u8 new_delay; 1105 1106 spin_lock(&mchdev_lock); 1107 1108 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1109 1110 new_delay = dev_priv->ips.cur_delay; 1111 1112 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1113 busy_up = I915_READ(RCPREVBSYTUPAVG); 1114 busy_down = I915_READ(RCPREVBSYTDNAVG); 1115 max_avg = I915_READ(RCBMAXAVG); 1116 min_avg = I915_READ(RCBMINAVG); 1117 1118 /* Handle RCS change request from hw */ 1119 if (busy_up > max_avg) { 1120 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1121 new_delay = dev_priv->ips.cur_delay - 1; 1122 if (new_delay < dev_priv->ips.max_delay) 1123 new_delay = dev_priv->ips.max_delay; 1124 } else if (busy_down < min_avg) { 1125 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1126 new_delay = dev_priv->ips.cur_delay + 1; 1127 if (new_delay > dev_priv->ips.min_delay) 1128 new_delay = dev_priv->ips.min_delay; 1129 } 1130 1131 if (ironlake_set_drps(dev_priv, new_delay)) 1132 dev_priv->ips.cur_delay = new_delay; 1133 1134 spin_unlock(&mchdev_lock); 1135 1136 return; 1137 } 1138 1139 static void notify_ring(struct intel_engine_cs *engine) 1140 { 1141 struct i915_request *rq = NULL; 1142 struct intel_wait *wait; 1143 1144 if (!engine->breadcrumbs.irq_armed) 1145 return; 1146 1147 atomic_inc(&engine->irq_count); 1148 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1149 1150 spin_lock(&engine->breadcrumbs.irq_lock); 1151 wait = engine->breadcrumbs.irq_wait; 1152 if (wait) { 1153 bool wakeup = engine->irq_seqno_barrier; 1154 1155 /* We use a callback from the dma-fence to submit 1156 * requests after waiting on our own requests. To 1157 * ensure minimum delay in queuing the next request to 1158 * hardware, signal the fence now rather than wait for 1159 * the signaler to be woken up. We still wake up the 1160 * waiter in order to handle the irq-seqno coherency 1161 * issues (we may receive the interrupt before the 1162 * seqno is written, see __i915_request_irq_complete()) 1163 * and to handle coalescing of multiple seqno updates 1164 * and many waiters. 1165 */ 1166 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1167 wait->seqno)) { 1168 struct i915_request *waiter = wait->request; 1169 1170 wakeup = true; 1171 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1172 &waiter->fence.flags) && 1173 intel_wait_check_request(wait, waiter)) 1174 rq = i915_request_get(waiter); 1175 } 1176 1177 if (wakeup) 1178 wake_up_process(wait->tsk); 1179 } else { 1180 if (engine->breadcrumbs.irq_armed) 1181 __intel_engine_disarm_breadcrumbs(engine); 1182 } 1183 spin_unlock(&engine->breadcrumbs.irq_lock); 1184 1185 if (rq) { 1186 dma_fence_signal(&rq->fence); 1187 GEM_BUG_ON(!i915_request_completed(rq)); 1188 i915_request_put(rq); 1189 } 1190 1191 trace_intel_engine_notify(engine, wait); 1192 } 1193 1194 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1195 struct intel_rps_ei *ei) 1196 { 1197 ei->ktime = ktime_get_raw(); 1198 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1199 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1200 } 1201 1202 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1203 { 1204 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1205 } 1206 1207 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1208 { 1209 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1210 const struct intel_rps_ei *prev = &rps->ei; 1211 struct intel_rps_ei now; 1212 u32 events = 0; 1213 1214 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1215 return 0; 1216 1217 vlv_c0_read(dev_priv, &now); 1218 1219 if (prev->ktime) { 1220 u64 time, c0; 1221 u32 render, media; 1222 1223 time = ktime_us_delta(now.ktime, prev->ktime); 1224 1225 time *= dev_priv->czclk_freq; 1226 1227 /* Workload can be split between render + media, 1228 * e.g. SwapBuffers being blitted in X after being rendered in 1229 * mesa. To account for this we need to combine both engines 1230 * into our activity counter. 1231 */ 1232 render = now.render_c0 - prev->render_c0; 1233 media = now.media_c0 - prev->media_c0; 1234 c0 = max(render, media); 1235 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1236 1237 if (c0 > time * rps->up_threshold) 1238 events = GEN6_PM_RP_UP_THRESHOLD; 1239 else if (c0 < time * rps->down_threshold) 1240 events = GEN6_PM_RP_DOWN_THRESHOLD; 1241 } 1242 1243 rps->ei = now; 1244 return events; 1245 } 1246 1247 static void gen6_pm_rps_work(struct work_struct *work) 1248 { 1249 struct drm_i915_private *dev_priv = 1250 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1251 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1252 bool client_boost = false; 1253 int new_delay, adj, min, max; 1254 u32 pm_iir = 0; 1255 1256 spin_lock_irq(&dev_priv->irq_lock); 1257 if (rps->interrupts_enabled) { 1258 pm_iir = fetch_and_zero(&rps->pm_iir); 1259 client_boost = atomic_read(&rps->num_waiters); 1260 } 1261 spin_unlock_irq(&dev_priv->irq_lock); 1262 1263 /* Make sure we didn't queue anything we're not going to process. */ 1264 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1265 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1266 goto out; 1267 1268 mutex_lock(&dev_priv->pcu_lock); 1269 1270 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1271 1272 adj = rps->last_adj; 1273 new_delay = rps->cur_freq; 1274 min = rps->min_freq_softlimit; 1275 max = rps->max_freq_softlimit; 1276 if (client_boost) 1277 max = rps->max_freq; 1278 if (client_boost && new_delay < rps->boost_freq) { 1279 new_delay = rps->boost_freq; 1280 adj = 0; 1281 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1282 if (adj > 0) 1283 adj *= 2; 1284 else /* CHV needs even encode values */ 1285 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1286 1287 if (new_delay >= rps->max_freq_softlimit) 1288 adj = 0; 1289 } else if (client_boost) { 1290 adj = 0; 1291 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1292 if (rps->cur_freq > rps->efficient_freq) 1293 new_delay = rps->efficient_freq; 1294 else if (rps->cur_freq > rps->min_freq_softlimit) 1295 new_delay = rps->min_freq_softlimit; 1296 adj = 0; 1297 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1298 if (adj < 0) 1299 adj *= 2; 1300 else /* CHV needs even encode values */ 1301 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1302 1303 if (new_delay <= rps->min_freq_softlimit) 1304 adj = 0; 1305 } else { /* unknown event */ 1306 adj = 0; 1307 } 1308 1309 rps->last_adj = adj; 1310 1311 /* sysfs frequency interfaces may have snuck in while servicing the 1312 * interrupt 1313 */ 1314 new_delay += adj; 1315 new_delay = clamp_t(int, new_delay, min, max); 1316 1317 if (intel_set_rps(dev_priv, new_delay)) { 1318 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1319 rps->last_adj = 0; 1320 } 1321 1322 mutex_unlock(&dev_priv->pcu_lock); 1323 1324 out: 1325 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1326 spin_lock_irq(&dev_priv->irq_lock); 1327 if (rps->interrupts_enabled) 1328 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1329 spin_unlock_irq(&dev_priv->irq_lock); 1330 } 1331 1332 1333 /** 1334 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1335 * occurred. 1336 * @work: workqueue struct 1337 * 1338 * Doesn't actually do anything except notify userspace. As a consequence of 1339 * this event, userspace should try to remap the bad rows since statistically 1340 * it is likely the same row is more likely to go bad again. 1341 */ 1342 static void ivybridge_parity_work(struct work_struct *work) 1343 { 1344 struct drm_i915_private *dev_priv = 1345 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1346 u32 error_status, row, bank, subbank; 1347 char *parity_event[6]; 1348 uint32_t misccpctl; 1349 uint8_t slice = 0; 1350 1351 /* We must turn off DOP level clock gating to access the L3 registers. 1352 * In order to prevent a get/put style interface, acquire struct mutex 1353 * any time we access those registers. 1354 */ 1355 mutex_lock(&dev_priv->drm.struct_mutex); 1356 1357 /* If we've screwed up tracking, just let the interrupt fire again */ 1358 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1359 goto out; 1360 1361 misccpctl = I915_READ(GEN7_MISCCPCTL); 1362 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1363 POSTING_READ(GEN7_MISCCPCTL); 1364 1365 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1366 i915_reg_t reg; 1367 1368 slice--; 1369 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1370 break; 1371 1372 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1373 1374 reg = GEN7_L3CDERRST1(slice); 1375 1376 error_status = I915_READ(reg); 1377 row = GEN7_PARITY_ERROR_ROW(error_status); 1378 bank = GEN7_PARITY_ERROR_BANK(error_status); 1379 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1380 1381 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1382 POSTING_READ(reg); 1383 1384 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1385 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1386 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1387 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1388 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1389 parity_event[5] = NULL; 1390 1391 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1392 KOBJ_CHANGE, parity_event); 1393 1394 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1395 slice, row, bank, subbank); 1396 1397 kfree(parity_event[4]); 1398 kfree(parity_event[3]); 1399 kfree(parity_event[2]); 1400 kfree(parity_event[1]); 1401 } 1402 1403 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1404 1405 out: 1406 WARN_ON(dev_priv->l3_parity.which_slice); 1407 spin_lock_irq(&dev_priv->irq_lock); 1408 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1409 spin_unlock_irq(&dev_priv->irq_lock); 1410 1411 mutex_unlock(&dev_priv->drm.struct_mutex); 1412 } 1413 1414 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1415 u32 iir) 1416 { 1417 if (!HAS_L3_DPF(dev_priv)) 1418 return; 1419 1420 spin_lock(&dev_priv->irq_lock); 1421 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1422 spin_unlock(&dev_priv->irq_lock); 1423 1424 iir &= GT_PARITY_ERROR(dev_priv); 1425 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1426 dev_priv->l3_parity.which_slice |= 1 << 1; 1427 1428 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1429 dev_priv->l3_parity.which_slice |= 1 << 0; 1430 1431 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1432 } 1433 1434 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1435 u32 gt_iir) 1436 { 1437 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1438 notify_ring(dev_priv->engine[RCS]); 1439 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1440 notify_ring(dev_priv->engine[VCS]); 1441 } 1442 1443 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1444 u32 gt_iir) 1445 { 1446 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1447 notify_ring(dev_priv->engine[RCS]); 1448 if (gt_iir & GT_BSD_USER_INTERRUPT) 1449 notify_ring(dev_priv->engine[VCS]); 1450 if (gt_iir & GT_BLT_USER_INTERRUPT) 1451 notify_ring(dev_priv->engine[BCS]); 1452 1453 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1454 GT_BSD_CS_ERROR_INTERRUPT | 1455 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1456 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1457 1458 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1459 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1460 } 1461 1462 static void 1463 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1464 { 1465 struct intel_engine_execlists * const execlists = &engine->execlists; 1466 bool tasklet = false; 1467 1468 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) { 1469 if (READ_ONCE(engine->execlists.active)) 1470 tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, 1471 &engine->irq_posted); 1472 } 1473 1474 if (iir & GT_RENDER_USER_INTERRUPT) { 1475 notify_ring(engine); 1476 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1477 } 1478 1479 if (tasklet) 1480 tasklet_hi_schedule(&execlists->tasklet); 1481 } 1482 1483 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1484 u32 master_ctl, u32 gt_iir[4]) 1485 { 1486 void __iomem * const regs = i915->regs; 1487 1488 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1489 GEN8_GT_BCS_IRQ | \ 1490 GEN8_GT_VCS1_IRQ | \ 1491 GEN8_GT_VCS2_IRQ | \ 1492 GEN8_GT_VECS_IRQ | \ 1493 GEN8_GT_PM_IRQ | \ 1494 GEN8_GT_GUC_IRQ) 1495 1496 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1497 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1498 if (likely(gt_iir[0])) 1499 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1500 } 1501 1502 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1503 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1504 if (likely(gt_iir[1])) 1505 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1506 } 1507 1508 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1509 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1510 if (likely(gt_iir[2] & (i915->pm_rps_events | 1511 i915->pm_guc_events))) 1512 raw_reg_write(regs, GEN8_GT_IIR(2), 1513 gt_iir[2] & (i915->pm_rps_events | 1514 i915->pm_guc_events)); 1515 } 1516 1517 if (master_ctl & GEN8_GT_VECS_IRQ) { 1518 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1519 if (likely(gt_iir[3])) 1520 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1521 } 1522 } 1523 1524 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1525 u32 master_ctl, u32 gt_iir[4]) 1526 { 1527 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1528 gen8_cs_irq_handler(i915->engine[RCS], 1529 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1530 gen8_cs_irq_handler(i915->engine[BCS], 1531 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1532 } 1533 1534 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1535 gen8_cs_irq_handler(i915->engine[VCS], 1536 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1537 gen8_cs_irq_handler(i915->engine[VCS2], 1538 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1539 } 1540 1541 if (master_ctl & GEN8_GT_VECS_IRQ) { 1542 gen8_cs_irq_handler(i915->engine[VECS], 1543 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1544 } 1545 1546 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1547 gen6_rps_irq_handler(i915, gt_iir[2]); 1548 gen9_guc_irq_handler(i915, gt_iir[2]); 1549 } 1550 } 1551 1552 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1553 { 1554 switch (port) { 1555 case PORT_A: 1556 return val & PORTA_HOTPLUG_LONG_DETECT; 1557 case PORT_B: 1558 return val & PORTB_HOTPLUG_LONG_DETECT; 1559 case PORT_C: 1560 return val & PORTC_HOTPLUG_LONG_DETECT; 1561 default: 1562 return false; 1563 } 1564 } 1565 1566 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1567 { 1568 switch (port) { 1569 case PORT_E: 1570 return val & PORTE_HOTPLUG_LONG_DETECT; 1571 default: 1572 return false; 1573 } 1574 } 1575 1576 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1577 { 1578 switch (port) { 1579 case PORT_A: 1580 return val & PORTA_HOTPLUG_LONG_DETECT; 1581 case PORT_B: 1582 return val & PORTB_HOTPLUG_LONG_DETECT; 1583 case PORT_C: 1584 return val & PORTC_HOTPLUG_LONG_DETECT; 1585 case PORT_D: 1586 return val & PORTD_HOTPLUG_LONG_DETECT; 1587 default: 1588 return false; 1589 } 1590 } 1591 1592 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1593 { 1594 switch (port) { 1595 case PORT_A: 1596 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1597 default: 1598 return false; 1599 } 1600 } 1601 1602 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1603 { 1604 switch (port) { 1605 case PORT_B: 1606 return val & PORTB_HOTPLUG_LONG_DETECT; 1607 case PORT_C: 1608 return val & PORTC_HOTPLUG_LONG_DETECT; 1609 case PORT_D: 1610 return val & PORTD_HOTPLUG_LONG_DETECT; 1611 default: 1612 return false; 1613 } 1614 } 1615 1616 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1617 { 1618 switch (port) { 1619 case PORT_B: 1620 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1621 case PORT_C: 1622 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1623 case PORT_D: 1624 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1625 default: 1626 return false; 1627 } 1628 } 1629 1630 /* 1631 * Get a bit mask of pins that have triggered, and which ones may be long. 1632 * This can be called multiple times with the same masks to accumulate 1633 * hotplug detection results from several registers. 1634 * 1635 * Note that the caller is expected to zero out the masks initially. 1636 */ 1637 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1638 u32 *pin_mask, u32 *long_mask, 1639 u32 hotplug_trigger, u32 dig_hotplug_reg, 1640 const u32 hpd[HPD_NUM_PINS], 1641 bool long_pulse_detect(enum port port, u32 val)) 1642 { 1643 enum port port; 1644 int i; 1645 1646 for_each_hpd_pin(i) { 1647 if ((hpd[i] & hotplug_trigger) == 0) 1648 continue; 1649 1650 *pin_mask |= BIT(i); 1651 1652 port = intel_hpd_pin_to_port(dev_priv, i); 1653 if (port == PORT_NONE) 1654 continue; 1655 1656 if (long_pulse_detect(port, dig_hotplug_reg)) 1657 *long_mask |= BIT(i); 1658 } 1659 1660 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1661 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1662 1663 } 1664 1665 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1666 { 1667 wake_up_all(&dev_priv->gmbus_wait_queue); 1668 } 1669 1670 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1671 { 1672 wake_up_all(&dev_priv->gmbus_wait_queue); 1673 } 1674 1675 #if defined(CONFIG_DEBUG_FS) 1676 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1677 enum pipe pipe, 1678 uint32_t crc0, uint32_t crc1, 1679 uint32_t crc2, uint32_t crc3, 1680 uint32_t crc4) 1681 { 1682 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1683 struct intel_pipe_crc_entry *entry; 1684 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1685 struct drm_driver *driver = dev_priv->drm.driver; 1686 uint32_t crcs[5]; 1687 int head, tail; 1688 1689 spin_lock(&pipe_crc->lock); 1690 if (pipe_crc->source && !crtc->base.crc.opened) { 1691 if (!pipe_crc->entries) { 1692 spin_unlock(&pipe_crc->lock); 1693 DRM_DEBUG_KMS("spurious interrupt\n"); 1694 return; 1695 } 1696 1697 head = pipe_crc->head; 1698 tail = pipe_crc->tail; 1699 1700 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1701 spin_unlock(&pipe_crc->lock); 1702 DRM_ERROR("CRC buffer overflowing\n"); 1703 return; 1704 } 1705 1706 entry = &pipe_crc->entries[head]; 1707 1708 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1709 entry->crc[0] = crc0; 1710 entry->crc[1] = crc1; 1711 entry->crc[2] = crc2; 1712 entry->crc[3] = crc3; 1713 entry->crc[4] = crc4; 1714 1715 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1716 pipe_crc->head = head; 1717 1718 spin_unlock(&pipe_crc->lock); 1719 1720 wake_up_interruptible(&pipe_crc->wq); 1721 } else { 1722 /* 1723 * For some not yet identified reason, the first CRC is 1724 * bonkers. So let's just wait for the next vblank and read 1725 * out the buggy result. 1726 * 1727 * On GEN8+ sometimes the second CRC is bonkers as well, so 1728 * don't trust that one either. 1729 */ 1730 if (pipe_crc->skipped <= 0 || 1731 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1732 pipe_crc->skipped++; 1733 spin_unlock(&pipe_crc->lock); 1734 return; 1735 } 1736 spin_unlock(&pipe_crc->lock); 1737 crcs[0] = crc0; 1738 crcs[1] = crc1; 1739 crcs[2] = crc2; 1740 crcs[3] = crc3; 1741 crcs[4] = crc4; 1742 drm_crtc_add_crc_entry(&crtc->base, true, 1743 drm_crtc_accurate_vblank_count(&crtc->base), 1744 crcs); 1745 } 1746 } 1747 #else 1748 static inline void 1749 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1750 enum pipe pipe, 1751 uint32_t crc0, uint32_t crc1, 1752 uint32_t crc2, uint32_t crc3, 1753 uint32_t crc4) {} 1754 #endif 1755 1756 1757 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1758 enum pipe pipe) 1759 { 1760 display_pipe_crc_irq_handler(dev_priv, pipe, 1761 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1762 0, 0, 0, 0); 1763 } 1764 1765 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1766 enum pipe pipe) 1767 { 1768 display_pipe_crc_irq_handler(dev_priv, pipe, 1769 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1770 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1771 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1772 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1773 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1774 } 1775 1776 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1777 enum pipe pipe) 1778 { 1779 uint32_t res1, res2; 1780 1781 if (INTEL_GEN(dev_priv) >= 3) 1782 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1783 else 1784 res1 = 0; 1785 1786 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1787 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1788 else 1789 res2 = 0; 1790 1791 display_pipe_crc_irq_handler(dev_priv, pipe, 1792 I915_READ(PIPE_CRC_RES_RED(pipe)), 1793 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1794 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1795 res1, res2); 1796 } 1797 1798 /* The RPS events need forcewake, so we add them to a work queue and mask their 1799 * IMR bits until the work is done. Other interrupts can be processed without 1800 * the work queue. */ 1801 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1802 { 1803 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1804 1805 if (pm_iir & dev_priv->pm_rps_events) { 1806 spin_lock(&dev_priv->irq_lock); 1807 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1808 if (rps->interrupts_enabled) { 1809 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1810 schedule_work(&rps->work); 1811 } 1812 spin_unlock(&dev_priv->irq_lock); 1813 } 1814 1815 if (INTEL_GEN(dev_priv) >= 8) 1816 return; 1817 1818 if (HAS_VEBOX(dev_priv)) { 1819 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1820 notify_ring(dev_priv->engine[VECS]); 1821 1822 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1823 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1824 } 1825 } 1826 1827 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1828 { 1829 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 1830 intel_guc_to_host_event_handler(&dev_priv->guc); 1831 } 1832 1833 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1834 { 1835 enum pipe pipe; 1836 1837 for_each_pipe(dev_priv, pipe) { 1838 I915_WRITE(PIPESTAT(pipe), 1839 PIPESTAT_INT_STATUS_MASK | 1840 PIPE_FIFO_UNDERRUN_STATUS); 1841 1842 dev_priv->pipestat_irq_mask[pipe] = 0; 1843 } 1844 } 1845 1846 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1847 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1848 { 1849 int pipe; 1850 1851 spin_lock(&dev_priv->irq_lock); 1852 1853 if (!dev_priv->display_irqs_enabled) { 1854 spin_unlock(&dev_priv->irq_lock); 1855 return; 1856 } 1857 1858 for_each_pipe(dev_priv, pipe) { 1859 i915_reg_t reg; 1860 u32 status_mask, enable_mask, iir_bit = 0; 1861 1862 /* 1863 * PIPESTAT bits get signalled even when the interrupt is 1864 * disabled with the mask bits, and some of the status bits do 1865 * not generate interrupts at all (like the underrun bit). Hence 1866 * we need to be careful that we only handle what we want to 1867 * handle. 1868 */ 1869 1870 /* fifo underruns are filterered in the underrun handler. */ 1871 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1872 1873 switch (pipe) { 1874 case PIPE_A: 1875 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1876 break; 1877 case PIPE_B: 1878 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1879 break; 1880 case PIPE_C: 1881 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1882 break; 1883 } 1884 if (iir & iir_bit) 1885 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1886 1887 if (!status_mask) 1888 continue; 1889 1890 reg = PIPESTAT(pipe); 1891 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1892 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1893 1894 /* 1895 * Clear the PIPE*STAT regs before the IIR 1896 * 1897 * Toggle the enable bits to make sure we get an 1898 * edge in the ISR pipe event bit if we don't clear 1899 * all the enabled status bits. Otherwise the edge 1900 * triggered IIR on i965/g4x wouldn't notice that 1901 * an interrupt is still pending. 1902 */ 1903 if (pipe_stats[pipe]) { 1904 I915_WRITE(reg, pipe_stats[pipe]); 1905 I915_WRITE(reg, enable_mask); 1906 } 1907 } 1908 spin_unlock(&dev_priv->irq_lock); 1909 } 1910 1911 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1912 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1913 { 1914 enum pipe pipe; 1915 1916 for_each_pipe(dev_priv, pipe) { 1917 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1918 drm_handle_vblank(&dev_priv->drm, pipe); 1919 1920 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1921 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1922 1923 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1924 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1925 } 1926 } 1927 1928 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1929 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1930 { 1931 bool blc_event = false; 1932 enum pipe pipe; 1933 1934 for_each_pipe(dev_priv, pipe) { 1935 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1936 drm_handle_vblank(&dev_priv->drm, pipe); 1937 1938 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1939 blc_event = true; 1940 1941 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1942 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1943 1944 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1945 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1946 } 1947 1948 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1949 intel_opregion_asle_intr(dev_priv); 1950 } 1951 1952 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1953 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1954 { 1955 bool blc_event = false; 1956 enum pipe pipe; 1957 1958 for_each_pipe(dev_priv, pipe) { 1959 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1960 drm_handle_vblank(&dev_priv->drm, pipe); 1961 1962 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1963 blc_event = true; 1964 1965 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1966 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1967 1968 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1969 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1970 } 1971 1972 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1973 intel_opregion_asle_intr(dev_priv); 1974 1975 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1976 gmbus_irq_handler(dev_priv); 1977 } 1978 1979 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1980 u32 pipe_stats[I915_MAX_PIPES]) 1981 { 1982 enum pipe pipe; 1983 1984 for_each_pipe(dev_priv, pipe) { 1985 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1986 drm_handle_vblank(&dev_priv->drm, pipe); 1987 1988 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1989 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1990 1991 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1992 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1993 } 1994 1995 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1996 gmbus_irq_handler(dev_priv); 1997 } 1998 1999 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 2000 { 2001 u32 hotplug_status = 0, hotplug_status_mask; 2002 int i; 2003 2004 if (IS_G4X(dev_priv) || 2005 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2006 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 2007 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 2008 else 2009 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 2010 2011 /* 2012 * We absolutely have to clear all the pending interrupt 2013 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 2014 * interrupt bit won't have an edge, and the i965/g4x 2015 * edge triggered IIR will not notice that an interrupt 2016 * is still pending. We can't use PORT_HOTPLUG_EN to 2017 * guarantee the edge as the act of toggling the enable 2018 * bits can itself generate a new hotplug interrupt :( 2019 */ 2020 for (i = 0; i < 10; i++) { 2021 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 2022 2023 if (tmp == 0) 2024 return hotplug_status; 2025 2026 hotplug_status |= tmp; 2027 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2028 } 2029 2030 WARN_ONCE(1, 2031 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2032 I915_READ(PORT_HOTPLUG_STAT)); 2033 2034 return hotplug_status; 2035 } 2036 2037 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2038 u32 hotplug_status) 2039 { 2040 u32 pin_mask = 0, long_mask = 0; 2041 2042 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2043 IS_CHERRYVIEW(dev_priv)) { 2044 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2045 2046 if (hotplug_trigger) { 2047 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2048 hotplug_trigger, hotplug_trigger, 2049 hpd_status_g4x, 2050 i9xx_port_hotplug_long_detect); 2051 2052 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2053 } 2054 2055 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2056 dp_aux_irq_handler(dev_priv); 2057 } else { 2058 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2059 2060 if (hotplug_trigger) { 2061 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2062 hotplug_trigger, hotplug_trigger, 2063 hpd_status_i915, 2064 i9xx_port_hotplug_long_detect); 2065 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2066 } 2067 } 2068 } 2069 2070 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2071 { 2072 struct drm_device *dev = arg; 2073 struct drm_i915_private *dev_priv = to_i915(dev); 2074 irqreturn_t ret = IRQ_NONE; 2075 2076 if (!intel_irqs_enabled(dev_priv)) 2077 return IRQ_NONE; 2078 2079 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2080 disable_rpm_wakeref_asserts(dev_priv); 2081 2082 do { 2083 u32 iir, gt_iir, pm_iir; 2084 u32 pipe_stats[I915_MAX_PIPES] = {}; 2085 u32 hotplug_status = 0; 2086 u32 ier = 0; 2087 2088 gt_iir = I915_READ(GTIIR); 2089 pm_iir = I915_READ(GEN6_PMIIR); 2090 iir = I915_READ(VLV_IIR); 2091 2092 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2093 break; 2094 2095 ret = IRQ_HANDLED; 2096 2097 /* 2098 * Theory on interrupt generation, based on empirical evidence: 2099 * 2100 * x = ((VLV_IIR & VLV_IER) || 2101 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2102 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2103 * 2104 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2105 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2106 * guarantee the CPU interrupt will be raised again even if we 2107 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2108 * bits this time around. 2109 */ 2110 I915_WRITE(VLV_MASTER_IER, 0); 2111 ier = I915_READ(VLV_IER); 2112 I915_WRITE(VLV_IER, 0); 2113 2114 if (gt_iir) 2115 I915_WRITE(GTIIR, gt_iir); 2116 if (pm_iir) 2117 I915_WRITE(GEN6_PMIIR, pm_iir); 2118 2119 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2120 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2121 2122 /* Call regardless, as some status bits might not be 2123 * signalled in iir */ 2124 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2125 2126 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2127 I915_LPE_PIPE_B_INTERRUPT)) 2128 intel_lpe_audio_irq_handler(dev_priv); 2129 2130 /* 2131 * VLV_IIR is single buffered, and reflects the level 2132 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2133 */ 2134 if (iir) 2135 I915_WRITE(VLV_IIR, iir); 2136 2137 I915_WRITE(VLV_IER, ier); 2138 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2139 POSTING_READ(VLV_MASTER_IER); 2140 2141 if (gt_iir) 2142 snb_gt_irq_handler(dev_priv, gt_iir); 2143 if (pm_iir) 2144 gen6_rps_irq_handler(dev_priv, pm_iir); 2145 2146 if (hotplug_status) 2147 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2148 2149 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2150 } while (0); 2151 2152 enable_rpm_wakeref_asserts(dev_priv); 2153 2154 return ret; 2155 } 2156 2157 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2158 { 2159 struct drm_device *dev = arg; 2160 struct drm_i915_private *dev_priv = to_i915(dev); 2161 irqreturn_t ret = IRQ_NONE; 2162 2163 if (!intel_irqs_enabled(dev_priv)) 2164 return IRQ_NONE; 2165 2166 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2167 disable_rpm_wakeref_asserts(dev_priv); 2168 2169 do { 2170 u32 master_ctl, iir; 2171 u32 pipe_stats[I915_MAX_PIPES] = {}; 2172 u32 hotplug_status = 0; 2173 u32 gt_iir[4]; 2174 u32 ier = 0; 2175 2176 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2177 iir = I915_READ(VLV_IIR); 2178 2179 if (master_ctl == 0 && iir == 0) 2180 break; 2181 2182 ret = IRQ_HANDLED; 2183 2184 /* 2185 * Theory on interrupt generation, based on empirical evidence: 2186 * 2187 * x = ((VLV_IIR & VLV_IER) || 2188 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2189 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2190 * 2191 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2192 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2193 * guarantee the CPU interrupt will be raised again even if we 2194 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2195 * bits this time around. 2196 */ 2197 I915_WRITE(GEN8_MASTER_IRQ, 0); 2198 ier = I915_READ(VLV_IER); 2199 I915_WRITE(VLV_IER, 0); 2200 2201 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2202 2203 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2204 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2205 2206 /* Call regardless, as some status bits might not be 2207 * signalled in iir */ 2208 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2209 2210 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2211 I915_LPE_PIPE_B_INTERRUPT | 2212 I915_LPE_PIPE_C_INTERRUPT)) 2213 intel_lpe_audio_irq_handler(dev_priv); 2214 2215 /* 2216 * VLV_IIR is single buffered, and reflects the level 2217 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2218 */ 2219 if (iir) 2220 I915_WRITE(VLV_IIR, iir); 2221 2222 I915_WRITE(VLV_IER, ier); 2223 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2224 POSTING_READ(GEN8_MASTER_IRQ); 2225 2226 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2227 2228 if (hotplug_status) 2229 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2230 2231 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2232 } while (0); 2233 2234 enable_rpm_wakeref_asserts(dev_priv); 2235 2236 return ret; 2237 } 2238 2239 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2240 u32 hotplug_trigger, 2241 const u32 hpd[HPD_NUM_PINS]) 2242 { 2243 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2244 2245 /* 2246 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2247 * unless we touch the hotplug register, even if hotplug_trigger is 2248 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2249 * errors. 2250 */ 2251 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2252 if (!hotplug_trigger) { 2253 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2254 PORTD_HOTPLUG_STATUS_MASK | 2255 PORTC_HOTPLUG_STATUS_MASK | 2256 PORTB_HOTPLUG_STATUS_MASK; 2257 dig_hotplug_reg &= ~mask; 2258 } 2259 2260 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2261 if (!hotplug_trigger) 2262 return; 2263 2264 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2265 dig_hotplug_reg, hpd, 2266 pch_port_hotplug_long_detect); 2267 2268 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2269 } 2270 2271 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2272 { 2273 int pipe; 2274 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2275 2276 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2277 2278 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2279 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2280 SDE_AUDIO_POWER_SHIFT); 2281 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2282 port_name(port)); 2283 } 2284 2285 if (pch_iir & SDE_AUX_MASK) 2286 dp_aux_irq_handler(dev_priv); 2287 2288 if (pch_iir & SDE_GMBUS) 2289 gmbus_irq_handler(dev_priv); 2290 2291 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2292 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2293 2294 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2295 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2296 2297 if (pch_iir & SDE_POISON) 2298 DRM_ERROR("PCH poison interrupt\n"); 2299 2300 if (pch_iir & SDE_FDI_MASK) 2301 for_each_pipe(dev_priv, pipe) 2302 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2303 pipe_name(pipe), 2304 I915_READ(FDI_RX_IIR(pipe))); 2305 2306 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2307 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2308 2309 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2310 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2311 2312 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2313 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2314 2315 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2316 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2317 } 2318 2319 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2320 { 2321 u32 err_int = I915_READ(GEN7_ERR_INT); 2322 enum pipe pipe; 2323 2324 if (err_int & ERR_INT_POISON) 2325 DRM_ERROR("Poison interrupt\n"); 2326 2327 for_each_pipe(dev_priv, pipe) { 2328 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2329 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2330 2331 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2332 if (IS_IVYBRIDGE(dev_priv)) 2333 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2334 else 2335 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2336 } 2337 } 2338 2339 I915_WRITE(GEN7_ERR_INT, err_int); 2340 } 2341 2342 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2343 { 2344 u32 serr_int = I915_READ(SERR_INT); 2345 enum pipe pipe; 2346 2347 if (serr_int & SERR_INT_POISON) 2348 DRM_ERROR("PCH poison interrupt\n"); 2349 2350 for_each_pipe(dev_priv, pipe) 2351 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2352 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2353 2354 I915_WRITE(SERR_INT, serr_int); 2355 } 2356 2357 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2358 { 2359 int pipe; 2360 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2361 2362 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2363 2364 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2365 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2366 SDE_AUDIO_POWER_SHIFT_CPT); 2367 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2368 port_name(port)); 2369 } 2370 2371 if (pch_iir & SDE_AUX_MASK_CPT) 2372 dp_aux_irq_handler(dev_priv); 2373 2374 if (pch_iir & SDE_GMBUS_CPT) 2375 gmbus_irq_handler(dev_priv); 2376 2377 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2378 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2379 2380 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2381 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2382 2383 if (pch_iir & SDE_FDI_MASK_CPT) 2384 for_each_pipe(dev_priv, pipe) 2385 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2386 pipe_name(pipe), 2387 I915_READ(FDI_RX_IIR(pipe))); 2388 2389 if (pch_iir & SDE_ERROR_CPT) 2390 cpt_serr_int_handler(dev_priv); 2391 } 2392 2393 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2394 { 2395 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2396 ~SDE_PORTE_HOTPLUG_SPT; 2397 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2398 u32 pin_mask = 0, long_mask = 0; 2399 2400 if (hotplug_trigger) { 2401 u32 dig_hotplug_reg; 2402 2403 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2404 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2405 2406 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2407 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2408 spt_port_hotplug_long_detect); 2409 } 2410 2411 if (hotplug2_trigger) { 2412 u32 dig_hotplug_reg; 2413 2414 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2415 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2416 2417 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2418 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2419 spt_port_hotplug2_long_detect); 2420 } 2421 2422 if (pin_mask) 2423 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2424 2425 if (pch_iir & SDE_GMBUS_CPT) 2426 gmbus_irq_handler(dev_priv); 2427 } 2428 2429 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2430 u32 hotplug_trigger, 2431 const u32 hpd[HPD_NUM_PINS]) 2432 { 2433 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2434 2435 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2436 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2437 2438 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2439 dig_hotplug_reg, hpd, 2440 ilk_port_hotplug_long_detect); 2441 2442 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2443 } 2444 2445 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2446 u32 de_iir) 2447 { 2448 enum pipe pipe; 2449 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2450 2451 if (hotplug_trigger) 2452 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2453 2454 if (de_iir & DE_AUX_CHANNEL_A) 2455 dp_aux_irq_handler(dev_priv); 2456 2457 if (de_iir & DE_GSE) 2458 intel_opregion_asle_intr(dev_priv); 2459 2460 if (de_iir & DE_POISON) 2461 DRM_ERROR("Poison interrupt\n"); 2462 2463 for_each_pipe(dev_priv, pipe) { 2464 if (de_iir & DE_PIPE_VBLANK(pipe)) 2465 drm_handle_vblank(&dev_priv->drm, pipe); 2466 2467 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2468 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2469 2470 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2471 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2472 } 2473 2474 /* check event from PCH */ 2475 if (de_iir & DE_PCH_EVENT) { 2476 u32 pch_iir = I915_READ(SDEIIR); 2477 2478 if (HAS_PCH_CPT(dev_priv)) 2479 cpt_irq_handler(dev_priv, pch_iir); 2480 else 2481 ibx_irq_handler(dev_priv, pch_iir); 2482 2483 /* should clear PCH hotplug event before clear CPU irq */ 2484 I915_WRITE(SDEIIR, pch_iir); 2485 } 2486 2487 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2488 ironlake_rps_change_irq_handler(dev_priv); 2489 } 2490 2491 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2492 u32 de_iir) 2493 { 2494 enum pipe pipe; 2495 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2496 2497 if (hotplug_trigger) 2498 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2499 2500 if (de_iir & DE_ERR_INT_IVB) 2501 ivb_err_int_handler(dev_priv); 2502 2503 if (de_iir & DE_EDP_PSR_INT_HSW) { 2504 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2505 2506 intel_psr_irq_handler(dev_priv, psr_iir); 2507 I915_WRITE(EDP_PSR_IIR, psr_iir); 2508 } 2509 2510 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2511 dp_aux_irq_handler(dev_priv); 2512 2513 if (de_iir & DE_GSE_IVB) 2514 intel_opregion_asle_intr(dev_priv); 2515 2516 for_each_pipe(dev_priv, pipe) { 2517 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2518 drm_handle_vblank(&dev_priv->drm, pipe); 2519 } 2520 2521 /* check event from PCH */ 2522 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2523 u32 pch_iir = I915_READ(SDEIIR); 2524 2525 cpt_irq_handler(dev_priv, pch_iir); 2526 2527 /* clear PCH hotplug event before clear CPU irq */ 2528 I915_WRITE(SDEIIR, pch_iir); 2529 } 2530 } 2531 2532 /* 2533 * To handle irqs with the minimum potential races with fresh interrupts, we: 2534 * 1 - Disable Master Interrupt Control. 2535 * 2 - Find the source(s) of the interrupt. 2536 * 3 - Clear the Interrupt Identity bits (IIR). 2537 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2538 * 5 - Re-enable Master Interrupt Control. 2539 */ 2540 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2541 { 2542 struct drm_device *dev = arg; 2543 struct drm_i915_private *dev_priv = to_i915(dev); 2544 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2545 irqreturn_t ret = IRQ_NONE; 2546 2547 if (!intel_irqs_enabled(dev_priv)) 2548 return IRQ_NONE; 2549 2550 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2551 disable_rpm_wakeref_asserts(dev_priv); 2552 2553 /* disable master interrupt before clearing iir */ 2554 de_ier = I915_READ(DEIER); 2555 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2556 POSTING_READ(DEIER); 2557 2558 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2559 * interrupts will will be stored on its back queue, and then we'll be 2560 * able to process them after we restore SDEIER (as soon as we restore 2561 * it, we'll get an interrupt if SDEIIR still has something to process 2562 * due to its back queue). */ 2563 if (!HAS_PCH_NOP(dev_priv)) { 2564 sde_ier = I915_READ(SDEIER); 2565 I915_WRITE(SDEIER, 0); 2566 POSTING_READ(SDEIER); 2567 } 2568 2569 /* Find, clear, then process each source of interrupt */ 2570 2571 gt_iir = I915_READ(GTIIR); 2572 if (gt_iir) { 2573 I915_WRITE(GTIIR, gt_iir); 2574 ret = IRQ_HANDLED; 2575 if (INTEL_GEN(dev_priv) >= 6) 2576 snb_gt_irq_handler(dev_priv, gt_iir); 2577 else 2578 ilk_gt_irq_handler(dev_priv, gt_iir); 2579 } 2580 2581 de_iir = I915_READ(DEIIR); 2582 if (de_iir) { 2583 I915_WRITE(DEIIR, de_iir); 2584 ret = IRQ_HANDLED; 2585 if (INTEL_GEN(dev_priv) >= 7) 2586 ivb_display_irq_handler(dev_priv, de_iir); 2587 else 2588 ilk_display_irq_handler(dev_priv, de_iir); 2589 } 2590 2591 if (INTEL_GEN(dev_priv) >= 6) { 2592 u32 pm_iir = I915_READ(GEN6_PMIIR); 2593 if (pm_iir) { 2594 I915_WRITE(GEN6_PMIIR, pm_iir); 2595 ret = IRQ_HANDLED; 2596 gen6_rps_irq_handler(dev_priv, pm_iir); 2597 } 2598 } 2599 2600 I915_WRITE(DEIER, de_ier); 2601 POSTING_READ(DEIER); 2602 if (!HAS_PCH_NOP(dev_priv)) { 2603 I915_WRITE(SDEIER, sde_ier); 2604 POSTING_READ(SDEIER); 2605 } 2606 2607 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2608 enable_rpm_wakeref_asserts(dev_priv); 2609 2610 return ret; 2611 } 2612 2613 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2614 u32 hotplug_trigger, 2615 const u32 hpd[HPD_NUM_PINS]) 2616 { 2617 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2618 2619 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2620 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2621 2622 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2623 dig_hotplug_reg, hpd, 2624 bxt_port_hotplug_long_detect); 2625 2626 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2627 } 2628 2629 static irqreturn_t 2630 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2631 { 2632 irqreturn_t ret = IRQ_NONE; 2633 u32 iir; 2634 enum pipe pipe; 2635 2636 if (master_ctl & GEN8_DE_MISC_IRQ) { 2637 iir = I915_READ(GEN8_DE_MISC_IIR); 2638 if (iir) { 2639 bool found = false; 2640 2641 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2642 ret = IRQ_HANDLED; 2643 2644 if (iir & GEN8_DE_MISC_GSE) { 2645 intel_opregion_asle_intr(dev_priv); 2646 found = true; 2647 } 2648 2649 if (iir & GEN8_DE_EDP_PSR) { 2650 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2651 2652 intel_psr_irq_handler(dev_priv, psr_iir); 2653 I915_WRITE(EDP_PSR_IIR, psr_iir); 2654 found = true; 2655 } 2656 2657 if (!found) 2658 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2659 } 2660 else 2661 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2662 } 2663 2664 if (master_ctl & GEN8_DE_PORT_IRQ) { 2665 iir = I915_READ(GEN8_DE_PORT_IIR); 2666 if (iir) { 2667 u32 tmp_mask; 2668 bool found = false; 2669 2670 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2671 ret = IRQ_HANDLED; 2672 2673 tmp_mask = GEN8_AUX_CHANNEL_A; 2674 if (INTEL_GEN(dev_priv) >= 9) 2675 tmp_mask |= GEN9_AUX_CHANNEL_B | 2676 GEN9_AUX_CHANNEL_C | 2677 GEN9_AUX_CHANNEL_D; 2678 2679 if (IS_CNL_WITH_PORT_F(dev_priv)) 2680 tmp_mask |= CNL_AUX_CHANNEL_F; 2681 2682 if (iir & tmp_mask) { 2683 dp_aux_irq_handler(dev_priv); 2684 found = true; 2685 } 2686 2687 if (IS_GEN9_LP(dev_priv)) { 2688 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2689 if (tmp_mask) { 2690 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2691 hpd_bxt); 2692 found = true; 2693 } 2694 } else if (IS_BROADWELL(dev_priv)) { 2695 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2696 if (tmp_mask) { 2697 ilk_hpd_irq_handler(dev_priv, 2698 tmp_mask, hpd_bdw); 2699 found = true; 2700 } 2701 } 2702 2703 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2704 gmbus_irq_handler(dev_priv); 2705 found = true; 2706 } 2707 2708 if (!found) 2709 DRM_ERROR("Unexpected DE Port interrupt\n"); 2710 } 2711 else 2712 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2713 } 2714 2715 for_each_pipe(dev_priv, pipe) { 2716 u32 fault_errors; 2717 2718 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2719 continue; 2720 2721 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2722 if (!iir) { 2723 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2724 continue; 2725 } 2726 2727 ret = IRQ_HANDLED; 2728 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2729 2730 if (iir & GEN8_PIPE_VBLANK) 2731 drm_handle_vblank(&dev_priv->drm, pipe); 2732 2733 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2734 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2735 2736 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2737 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2738 2739 fault_errors = iir; 2740 if (INTEL_GEN(dev_priv) >= 9) 2741 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2742 else 2743 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2744 2745 if (fault_errors) 2746 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2747 pipe_name(pipe), 2748 fault_errors); 2749 } 2750 2751 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2752 master_ctl & GEN8_DE_PCH_IRQ) { 2753 /* 2754 * FIXME(BDW): Assume for now that the new interrupt handling 2755 * scheme also closed the SDE interrupt handling race we've seen 2756 * on older pch-split platforms. But this needs testing. 2757 */ 2758 iir = I915_READ(SDEIIR); 2759 if (iir) { 2760 I915_WRITE(SDEIIR, iir); 2761 ret = IRQ_HANDLED; 2762 2763 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 2764 HAS_PCH_CNP(dev_priv)) 2765 spt_irq_handler(dev_priv, iir); 2766 else 2767 cpt_irq_handler(dev_priv, iir); 2768 } else { 2769 /* 2770 * Like on previous PCH there seems to be something 2771 * fishy going on with forwarding PCH interrupts. 2772 */ 2773 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2774 } 2775 } 2776 2777 return ret; 2778 } 2779 2780 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2781 { 2782 struct drm_i915_private *dev_priv = to_i915(arg); 2783 u32 master_ctl; 2784 u32 gt_iir[4]; 2785 2786 if (!intel_irqs_enabled(dev_priv)) 2787 return IRQ_NONE; 2788 2789 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2790 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2791 if (!master_ctl) 2792 return IRQ_NONE; 2793 2794 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2795 2796 /* Find, clear, then process each source of interrupt */ 2797 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2798 2799 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2800 if (master_ctl & ~GEN8_GT_IRQS) { 2801 disable_rpm_wakeref_asserts(dev_priv); 2802 gen8_de_irq_handler(dev_priv, master_ctl); 2803 enable_rpm_wakeref_asserts(dev_priv); 2804 } 2805 2806 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2807 2808 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2809 2810 return IRQ_HANDLED; 2811 } 2812 2813 struct wedge_me { 2814 struct delayed_work work; 2815 struct drm_i915_private *i915; 2816 const char *name; 2817 }; 2818 2819 static void wedge_me(struct work_struct *work) 2820 { 2821 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2822 2823 dev_err(w->i915->drm.dev, 2824 "%s timed out, cancelling all in-flight rendering.\n", 2825 w->name); 2826 i915_gem_set_wedged(w->i915); 2827 } 2828 2829 static void __init_wedge(struct wedge_me *w, 2830 struct drm_i915_private *i915, 2831 long timeout, 2832 const char *name) 2833 { 2834 w->i915 = i915; 2835 w->name = name; 2836 2837 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2838 schedule_delayed_work(&w->work, timeout); 2839 } 2840 2841 static void __fini_wedge(struct wedge_me *w) 2842 { 2843 cancel_delayed_work_sync(&w->work); 2844 destroy_delayed_work_on_stack(&w->work); 2845 w->i915 = NULL; 2846 } 2847 2848 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2849 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2850 (W)->i915; \ 2851 __fini_wedge((W))) 2852 2853 static u32 2854 gen11_gt_engine_identity(struct drm_i915_private * const i915, 2855 const unsigned int bank, const unsigned int bit) 2856 { 2857 void __iomem * const regs = i915->regs; 2858 u32 timeout_ts; 2859 u32 ident; 2860 2861 lockdep_assert_held(&i915->irq_lock); 2862 2863 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2864 2865 /* 2866 * NB: Specs do not specify how long to spin wait, 2867 * so we do ~100us as an educated guess. 2868 */ 2869 timeout_ts = (local_clock() >> 10) + 100; 2870 do { 2871 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2872 } while (!(ident & GEN11_INTR_DATA_VALID) && 2873 !time_after32(local_clock() >> 10, timeout_ts)); 2874 2875 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 2876 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 2877 bank, bit, ident); 2878 return 0; 2879 } 2880 2881 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 2882 GEN11_INTR_DATA_VALID); 2883 2884 return ident; 2885 } 2886 2887 static void 2888 gen11_other_irq_handler(struct drm_i915_private * const i915, 2889 const u8 instance, const u16 iir) 2890 { 2891 if (instance == OTHER_GTPM_INSTANCE) 2892 return gen6_rps_irq_handler(i915, iir); 2893 2894 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 2895 instance, iir); 2896 } 2897 2898 static void 2899 gen11_engine_irq_handler(struct drm_i915_private * const i915, 2900 const u8 class, const u8 instance, const u16 iir) 2901 { 2902 struct intel_engine_cs *engine; 2903 2904 if (instance <= MAX_ENGINE_INSTANCE) 2905 engine = i915->engine_class[class][instance]; 2906 else 2907 engine = NULL; 2908 2909 if (likely(engine)) 2910 return gen8_cs_irq_handler(engine, iir); 2911 2912 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 2913 class, instance); 2914 } 2915 2916 static void 2917 gen11_gt_identity_handler(struct drm_i915_private * const i915, 2918 const u32 identity) 2919 { 2920 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 2921 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 2922 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 2923 2924 if (unlikely(!intr)) 2925 return; 2926 2927 if (class <= COPY_ENGINE_CLASS) 2928 return gen11_engine_irq_handler(i915, class, instance, intr); 2929 2930 if (class == OTHER_CLASS) 2931 return gen11_other_irq_handler(i915, instance, intr); 2932 2933 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 2934 class, instance, intr); 2935 } 2936 2937 static void 2938 gen11_gt_bank_handler(struct drm_i915_private * const i915, 2939 const unsigned int bank) 2940 { 2941 void __iomem * const regs = i915->regs; 2942 unsigned long intr_dw; 2943 unsigned int bit; 2944 2945 lockdep_assert_held(&i915->irq_lock); 2946 2947 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 2948 2949 if (unlikely(!intr_dw)) { 2950 DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 2951 return; 2952 } 2953 2954 for_each_set_bit(bit, &intr_dw, 32) { 2955 const u32 ident = gen11_gt_engine_identity(i915, 2956 bank, bit); 2957 2958 gen11_gt_identity_handler(i915, ident); 2959 } 2960 2961 /* Clear must be after shared has been served for engine */ 2962 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 2963 } 2964 2965 static void 2966 gen11_gt_irq_handler(struct drm_i915_private * const i915, 2967 const u32 master_ctl) 2968 { 2969 unsigned int bank; 2970 2971 spin_lock(&i915->irq_lock); 2972 2973 for (bank = 0; bank < 2; bank++) { 2974 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 2975 gen11_gt_bank_handler(i915, bank); 2976 } 2977 2978 spin_unlock(&i915->irq_lock); 2979 } 2980 2981 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2982 { 2983 struct drm_i915_private * const i915 = to_i915(arg); 2984 void __iomem * const regs = i915->regs; 2985 u32 master_ctl; 2986 2987 if (!intel_irqs_enabled(i915)) 2988 return IRQ_NONE; 2989 2990 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2991 master_ctl &= ~GEN11_MASTER_IRQ; 2992 if (!master_ctl) 2993 return IRQ_NONE; 2994 2995 /* Disable interrupts. */ 2996 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2997 2998 /* Find, clear, then process each source of interrupt. */ 2999 gen11_gt_irq_handler(i915, master_ctl); 3000 3001 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3002 if (master_ctl & GEN11_DISPLAY_IRQ) { 3003 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3004 3005 disable_rpm_wakeref_asserts(i915); 3006 /* 3007 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3008 * for the display related bits. 3009 */ 3010 gen8_de_irq_handler(i915, disp_ctl); 3011 enable_rpm_wakeref_asserts(i915); 3012 } 3013 3014 /* Acknowledge and enable interrupts. */ 3015 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 3016 3017 return IRQ_HANDLED; 3018 } 3019 3020 static void i915_reset_device(struct drm_i915_private *dev_priv, 3021 u32 engine_mask, 3022 const char *reason) 3023 { 3024 struct i915_gpu_error *error = &dev_priv->gpu_error; 3025 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3026 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3027 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3028 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 3029 struct wedge_me w; 3030 3031 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 3032 3033 DRM_DEBUG_DRIVER("resetting chip\n"); 3034 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 3035 3036 /* Use a watchdog to ensure that our reset completes */ 3037 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3038 intel_prepare_reset(dev_priv); 3039 3040 error->reason = reason; 3041 error->stalled_mask = engine_mask; 3042 3043 /* Signal that locked waiters should reset the GPU */ 3044 smp_mb__before_atomic(); 3045 set_bit(I915_RESET_HANDOFF, &error->flags); 3046 wake_up_all(&error->wait_queue); 3047 3048 /* Wait for anyone holding the lock to wakeup, without 3049 * blocking indefinitely on struct_mutex. 3050 */ 3051 do { 3052 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3053 i915_reset(dev_priv, engine_mask, reason); 3054 mutex_unlock(&dev_priv->drm.struct_mutex); 3055 } 3056 } while (wait_on_bit_timeout(&error->flags, 3057 I915_RESET_HANDOFF, 3058 TASK_UNINTERRUPTIBLE, 3059 1)); 3060 3061 error->stalled_mask = 0; 3062 error->reason = NULL; 3063 3064 intel_finish_reset(dev_priv); 3065 } 3066 3067 if (!test_bit(I915_WEDGED, &error->flags)) 3068 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3069 } 3070 3071 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3072 { 3073 u32 eir; 3074 3075 if (!IS_GEN2(dev_priv)) 3076 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 3077 3078 if (INTEL_GEN(dev_priv) < 4) 3079 I915_WRITE(IPEIR, I915_READ(IPEIR)); 3080 else 3081 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 3082 3083 I915_WRITE(EIR, I915_READ(EIR)); 3084 eir = I915_READ(EIR); 3085 if (eir) { 3086 /* 3087 * some errors might have become stuck, 3088 * mask them. 3089 */ 3090 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 3091 I915_WRITE(EMR, I915_READ(EMR) | eir); 3092 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3093 } 3094 } 3095 3096 /** 3097 * i915_handle_error - handle a gpu error 3098 * @dev_priv: i915 device private 3099 * @engine_mask: mask representing engines that are hung 3100 * @flags: control flags 3101 * @fmt: Error message format string 3102 * 3103 * Do some basic checking of register state at error time and 3104 * dump it to the syslog. Also call i915_capture_error_state() to make 3105 * sure we get a record and make it available in debugfs. Fire a uevent 3106 * so userspace knows something bad happened (should trigger collection 3107 * of a ring dump etc.). 3108 */ 3109 void i915_handle_error(struct drm_i915_private *dev_priv, 3110 u32 engine_mask, 3111 unsigned long flags, 3112 const char *fmt, ...) 3113 { 3114 struct intel_engine_cs *engine; 3115 unsigned int tmp; 3116 char error_msg[80]; 3117 char *msg = NULL; 3118 3119 if (fmt) { 3120 va_list args; 3121 3122 va_start(args, fmt); 3123 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 3124 va_end(args); 3125 3126 msg = error_msg; 3127 } 3128 3129 /* 3130 * In most cases it's guaranteed that we get here with an RPM 3131 * reference held, for example because there is a pending GPU 3132 * request that won't finish until the reset is done. This 3133 * isn't the case at least when we get here by doing a 3134 * simulated reset via debugfs, so get an RPM reference. 3135 */ 3136 intel_runtime_pm_get(dev_priv); 3137 3138 engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3139 3140 if (flags & I915_ERROR_CAPTURE) { 3141 i915_capture_error_state(dev_priv, engine_mask, msg); 3142 i915_clear_error_registers(dev_priv); 3143 } 3144 3145 /* 3146 * Try engine reset when available. We fall back to full reset if 3147 * single reset fails. 3148 */ 3149 if (intel_has_reset_engine(dev_priv)) { 3150 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 3151 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3152 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3153 &dev_priv->gpu_error.flags)) 3154 continue; 3155 3156 if (i915_reset_engine(engine, msg) == 0) 3157 engine_mask &= ~intel_engine_flag(engine); 3158 3159 clear_bit(I915_RESET_ENGINE + engine->id, 3160 &dev_priv->gpu_error.flags); 3161 wake_up_bit(&dev_priv->gpu_error.flags, 3162 I915_RESET_ENGINE + engine->id); 3163 } 3164 } 3165 3166 if (!engine_mask) 3167 goto out; 3168 3169 /* Full reset needs the mutex, stop any other user trying to do so. */ 3170 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3171 wait_event(dev_priv->gpu_error.reset_queue, 3172 !test_bit(I915_RESET_BACKOFF, 3173 &dev_priv->gpu_error.flags)); 3174 goto out; 3175 } 3176 3177 /* Prevent any other reset-engine attempt. */ 3178 for_each_engine(engine, dev_priv, tmp) { 3179 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3180 &dev_priv->gpu_error.flags)) 3181 wait_on_bit(&dev_priv->gpu_error.flags, 3182 I915_RESET_ENGINE + engine->id, 3183 TASK_UNINTERRUPTIBLE); 3184 } 3185 3186 i915_reset_device(dev_priv, engine_mask, msg); 3187 3188 for_each_engine(engine, dev_priv, tmp) { 3189 clear_bit(I915_RESET_ENGINE + engine->id, 3190 &dev_priv->gpu_error.flags); 3191 } 3192 3193 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3194 wake_up_all(&dev_priv->gpu_error.reset_queue); 3195 3196 out: 3197 intel_runtime_pm_put(dev_priv); 3198 } 3199 3200 /* Called from drm generic code, passed 'crtc' which 3201 * we use as a pipe index 3202 */ 3203 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3204 { 3205 struct drm_i915_private *dev_priv = to_i915(dev); 3206 unsigned long irqflags; 3207 3208 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3209 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3210 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3211 3212 return 0; 3213 } 3214 3215 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3216 { 3217 struct drm_i915_private *dev_priv = to_i915(dev); 3218 unsigned long irqflags; 3219 3220 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3221 i915_enable_pipestat(dev_priv, pipe, 3222 PIPE_START_VBLANK_INTERRUPT_STATUS); 3223 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3224 3225 return 0; 3226 } 3227 3228 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3229 { 3230 struct drm_i915_private *dev_priv = to_i915(dev); 3231 unsigned long irqflags; 3232 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3233 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3234 3235 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3236 ilk_enable_display_irq(dev_priv, bit); 3237 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3238 3239 /* Even though there is no DMC, frame counter can get stuck when 3240 * PSR is active as no frames are generated. 3241 */ 3242 if (HAS_PSR(dev_priv)) 3243 drm_vblank_restore(dev, pipe); 3244 3245 return 0; 3246 } 3247 3248 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3249 { 3250 struct drm_i915_private *dev_priv = to_i915(dev); 3251 unsigned long irqflags; 3252 3253 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3254 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3255 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3256 3257 /* Even if there is no DMC, frame counter can get stuck when 3258 * PSR is active as no frames are generated, so check only for PSR. 3259 */ 3260 if (HAS_PSR(dev_priv)) 3261 drm_vblank_restore(dev, pipe); 3262 3263 return 0; 3264 } 3265 3266 /* Called from drm generic code, passed 'crtc' which 3267 * we use as a pipe index 3268 */ 3269 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3270 { 3271 struct drm_i915_private *dev_priv = to_i915(dev); 3272 unsigned long irqflags; 3273 3274 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3275 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3276 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3277 } 3278 3279 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3280 { 3281 struct drm_i915_private *dev_priv = to_i915(dev); 3282 unsigned long irqflags; 3283 3284 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3285 i915_disable_pipestat(dev_priv, pipe, 3286 PIPE_START_VBLANK_INTERRUPT_STATUS); 3287 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3288 } 3289 3290 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3291 { 3292 struct drm_i915_private *dev_priv = to_i915(dev); 3293 unsigned long irqflags; 3294 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3295 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3296 3297 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3298 ilk_disable_display_irq(dev_priv, bit); 3299 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3300 } 3301 3302 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3303 { 3304 struct drm_i915_private *dev_priv = to_i915(dev); 3305 unsigned long irqflags; 3306 3307 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3308 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3310 } 3311 3312 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3313 { 3314 if (HAS_PCH_NOP(dev_priv)) 3315 return; 3316 3317 GEN3_IRQ_RESET(SDE); 3318 3319 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3320 I915_WRITE(SERR_INT, 0xffffffff); 3321 } 3322 3323 /* 3324 * SDEIER is also touched by the interrupt handler to work around missed PCH 3325 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3326 * instead we unconditionally enable all PCH interrupt sources here, but then 3327 * only unmask them as needed with SDEIMR. 3328 * 3329 * This function needs to be called before interrupts are enabled. 3330 */ 3331 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3332 { 3333 struct drm_i915_private *dev_priv = to_i915(dev); 3334 3335 if (HAS_PCH_NOP(dev_priv)) 3336 return; 3337 3338 WARN_ON(I915_READ(SDEIER) != 0); 3339 I915_WRITE(SDEIER, 0xffffffff); 3340 POSTING_READ(SDEIER); 3341 } 3342 3343 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3344 { 3345 GEN3_IRQ_RESET(GT); 3346 if (INTEL_GEN(dev_priv) >= 6) 3347 GEN3_IRQ_RESET(GEN6_PM); 3348 } 3349 3350 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3351 { 3352 if (IS_CHERRYVIEW(dev_priv)) 3353 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3354 else 3355 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3356 3357 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3358 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3359 3360 i9xx_pipestat_irq_reset(dev_priv); 3361 3362 GEN3_IRQ_RESET(VLV_); 3363 dev_priv->irq_mask = ~0u; 3364 } 3365 3366 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3367 { 3368 u32 pipestat_mask; 3369 u32 enable_mask; 3370 enum pipe pipe; 3371 3372 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3373 3374 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3375 for_each_pipe(dev_priv, pipe) 3376 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3377 3378 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3379 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3380 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3381 I915_LPE_PIPE_A_INTERRUPT | 3382 I915_LPE_PIPE_B_INTERRUPT; 3383 3384 if (IS_CHERRYVIEW(dev_priv)) 3385 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3386 I915_LPE_PIPE_C_INTERRUPT; 3387 3388 WARN_ON(dev_priv->irq_mask != ~0u); 3389 3390 dev_priv->irq_mask = ~enable_mask; 3391 3392 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3393 } 3394 3395 /* drm_dma.h hooks 3396 */ 3397 static void ironlake_irq_reset(struct drm_device *dev) 3398 { 3399 struct drm_i915_private *dev_priv = to_i915(dev); 3400 3401 if (IS_GEN5(dev_priv)) 3402 I915_WRITE(HWSTAM, 0xffffffff); 3403 3404 GEN3_IRQ_RESET(DE); 3405 if (IS_GEN7(dev_priv)) 3406 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3407 3408 if (IS_HASWELL(dev_priv)) { 3409 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3410 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3411 } 3412 3413 gen5_gt_irq_reset(dev_priv); 3414 3415 ibx_irq_reset(dev_priv); 3416 } 3417 3418 static void valleyview_irq_reset(struct drm_device *dev) 3419 { 3420 struct drm_i915_private *dev_priv = to_i915(dev); 3421 3422 I915_WRITE(VLV_MASTER_IER, 0); 3423 POSTING_READ(VLV_MASTER_IER); 3424 3425 gen5_gt_irq_reset(dev_priv); 3426 3427 spin_lock_irq(&dev_priv->irq_lock); 3428 if (dev_priv->display_irqs_enabled) 3429 vlv_display_irq_reset(dev_priv); 3430 spin_unlock_irq(&dev_priv->irq_lock); 3431 } 3432 3433 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3434 { 3435 GEN8_IRQ_RESET_NDX(GT, 0); 3436 GEN8_IRQ_RESET_NDX(GT, 1); 3437 GEN8_IRQ_RESET_NDX(GT, 2); 3438 GEN8_IRQ_RESET_NDX(GT, 3); 3439 } 3440 3441 static void gen8_irq_reset(struct drm_device *dev) 3442 { 3443 struct drm_i915_private *dev_priv = to_i915(dev); 3444 int pipe; 3445 3446 I915_WRITE(GEN8_MASTER_IRQ, 0); 3447 POSTING_READ(GEN8_MASTER_IRQ); 3448 3449 gen8_gt_irq_reset(dev_priv); 3450 3451 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3452 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3453 3454 for_each_pipe(dev_priv, pipe) 3455 if (intel_display_power_is_enabled(dev_priv, 3456 POWER_DOMAIN_PIPE(pipe))) 3457 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3458 3459 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3460 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3461 GEN3_IRQ_RESET(GEN8_PCU_); 3462 3463 if (HAS_PCH_SPLIT(dev_priv)) 3464 ibx_irq_reset(dev_priv); 3465 } 3466 3467 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3468 { 3469 /* Disable RCS, BCS, VCS and VECS class engines. */ 3470 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3471 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3472 3473 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3474 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3475 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3476 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3477 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3478 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3479 3480 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3481 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3482 } 3483 3484 static void gen11_irq_reset(struct drm_device *dev) 3485 { 3486 struct drm_i915_private *dev_priv = dev->dev_private; 3487 int pipe; 3488 3489 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3490 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3491 3492 gen11_gt_irq_reset(dev_priv); 3493 3494 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3495 3496 for_each_pipe(dev_priv, pipe) 3497 if (intel_display_power_is_enabled(dev_priv, 3498 POWER_DOMAIN_PIPE(pipe))) 3499 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3500 3501 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3502 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3503 GEN3_IRQ_RESET(GEN8_PCU_); 3504 } 3505 3506 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3507 u8 pipe_mask) 3508 { 3509 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3510 enum pipe pipe; 3511 3512 spin_lock_irq(&dev_priv->irq_lock); 3513 3514 if (!intel_irqs_enabled(dev_priv)) { 3515 spin_unlock_irq(&dev_priv->irq_lock); 3516 return; 3517 } 3518 3519 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3520 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3521 dev_priv->de_irq_mask[pipe], 3522 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3523 3524 spin_unlock_irq(&dev_priv->irq_lock); 3525 } 3526 3527 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3528 u8 pipe_mask) 3529 { 3530 enum pipe pipe; 3531 3532 spin_lock_irq(&dev_priv->irq_lock); 3533 3534 if (!intel_irqs_enabled(dev_priv)) { 3535 spin_unlock_irq(&dev_priv->irq_lock); 3536 return; 3537 } 3538 3539 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3540 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3541 3542 spin_unlock_irq(&dev_priv->irq_lock); 3543 3544 /* make sure we're done processing display irqs */ 3545 synchronize_irq(dev_priv->drm.irq); 3546 } 3547 3548 static void cherryview_irq_reset(struct drm_device *dev) 3549 { 3550 struct drm_i915_private *dev_priv = to_i915(dev); 3551 3552 I915_WRITE(GEN8_MASTER_IRQ, 0); 3553 POSTING_READ(GEN8_MASTER_IRQ); 3554 3555 gen8_gt_irq_reset(dev_priv); 3556 3557 GEN3_IRQ_RESET(GEN8_PCU_); 3558 3559 spin_lock_irq(&dev_priv->irq_lock); 3560 if (dev_priv->display_irqs_enabled) 3561 vlv_display_irq_reset(dev_priv); 3562 spin_unlock_irq(&dev_priv->irq_lock); 3563 } 3564 3565 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3566 const u32 hpd[HPD_NUM_PINS]) 3567 { 3568 struct intel_encoder *encoder; 3569 u32 enabled_irqs = 0; 3570 3571 for_each_intel_encoder(&dev_priv->drm, encoder) 3572 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3573 enabled_irqs |= hpd[encoder->hpd_pin]; 3574 3575 return enabled_irqs; 3576 } 3577 3578 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3579 { 3580 u32 hotplug; 3581 3582 /* 3583 * Enable digital hotplug on the PCH, and configure the DP short pulse 3584 * duration to 2ms (which is the minimum in the Display Port spec). 3585 * The pulse duration bits are reserved on LPT+. 3586 */ 3587 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3588 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3589 PORTC_PULSE_DURATION_MASK | 3590 PORTD_PULSE_DURATION_MASK); 3591 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3592 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3593 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3594 /* 3595 * When CPU and PCH are on the same package, port A 3596 * HPD must be enabled in both north and south. 3597 */ 3598 if (HAS_PCH_LPT_LP(dev_priv)) 3599 hotplug |= PORTA_HOTPLUG_ENABLE; 3600 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3601 } 3602 3603 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3604 { 3605 u32 hotplug_irqs, enabled_irqs; 3606 3607 if (HAS_PCH_IBX(dev_priv)) { 3608 hotplug_irqs = SDE_HOTPLUG_MASK; 3609 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3610 } else { 3611 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3612 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3613 } 3614 3615 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3616 3617 ibx_hpd_detection_setup(dev_priv); 3618 } 3619 3620 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3621 { 3622 u32 val, hotplug; 3623 3624 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3625 if (HAS_PCH_CNP(dev_priv)) { 3626 val = I915_READ(SOUTH_CHICKEN1); 3627 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3628 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3629 I915_WRITE(SOUTH_CHICKEN1, val); 3630 } 3631 3632 /* Enable digital hotplug on the PCH */ 3633 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3634 hotplug |= PORTA_HOTPLUG_ENABLE | 3635 PORTB_HOTPLUG_ENABLE | 3636 PORTC_HOTPLUG_ENABLE | 3637 PORTD_HOTPLUG_ENABLE; 3638 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3639 3640 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3641 hotplug |= PORTE_HOTPLUG_ENABLE; 3642 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3643 } 3644 3645 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3646 { 3647 u32 hotplug_irqs, enabled_irqs; 3648 3649 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3650 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3651 3652 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3653 3654 spt_hpd_detection_setup(dev_priv); 3655 } 3656 3657 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3658 { 3659 u32 hotplug; 3660 3661 /* 3662 * Enable digital hotplug on the CPU, and configure the DP short pulse 3663 * duration to 2ms (which is the minimum in the Display Port spec) 3664 * The pulse duration bits are reserved on HSW+. 3665 */ 3666 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3667 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3668 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3669 DIGITAL_PORTA_PULSE_DURATION_2ms; 3670 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3671 } 3672 3673 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3674 { 3675 u32 hotplug_irqs, enabled_irqs; 3676 3677 if (INTEL_GEN(dev_priv) >= 8) { 3678 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3679 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3680 3681 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3682 } else if (INTEL_GEN(dev_priv) >= 7) { 3683 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3684 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3685 3686 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3687 } else { 3688 hotplug_irqs = DE_DP_A_HOTPLUG; 3689 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3690 3691 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3692 } 3693 3694 ilk_hpd_detection_setup(dev_priv); 3695 3696 ibx_hpd_irq_setup(dev_priv); 3697 } 3698 3699 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3700 u32 enabled_irqs) 3701 { 3702 u32 hotplug; 3703 3704 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3705 hotplug |= PORTA_HOTPLUG_ENABLE | 3706 PORTB_HOTPLUG_ENABLE | 3707 PORTC_HOTPLUG_ENABLE; 3708 3709 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3710 hotplug, enabled_irqs); 3711 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3712 3713 /* 3714 * For BXT invert bit has to be set based on AOB design 3715 * for HPD detection logic, update it based on VBT fields. 3716 */ 3717 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3718 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3719 hotplug |= BXT_DDIA_HPD_INVERT; 3720 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3721 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3722 hotplug |= BXT_DDIB_HPD_INVERT; 3723 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3724 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3725 hotplug |= BXT_DDIC_HPD_INVERT; 3726 3727 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3728 } 3729 3730 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3731 { 3732 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3733 } 3734 3735 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3736 { 3737 u32 hotplug_irqs, enabled_irqs; 3738 3739 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3740 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3741 3742 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3743 3744 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3745 } 3746 3747 static void ibx_irq_postinstall(struct drm_device *dev) 3748 { 3749 struct drm_i915_private *dev_priv = to_i915(dev); 3750 u32 mask; 3751 3752 if (HAS_PCH_NOP(dev_priv)) 3753 return; 3754 3755 if (HAS_PCH_IBX(dev_priv)) 3756 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3757 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3758 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3759 else 3760 mask = SDE_GMBUS_CPT; 3761 3762 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3763 I915_WRITE(SDEIMR, ~mask); 3764 3765 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3766 HAS_PCH_LPT(dev_priv)) 3767 ibx_hpd_detection_setup(dev_priv); 3768 else 3769 spt_hpd_detection_setup(dev_priv); 3770 } 3771 3772 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3773 { 3774 struct drm_i915_private *dev_priv = to_i915(dev); 3775 u32 pm_irqs, gt_irqs; 3776 3777 pm_irqs = gt_irqs = 0; 3778 3779 dev_priv->gt_irq_mask = ~0; 3780 if (HAS_L3_DPF(dev_priv)) { 3781 /* L3 parity interrupt is always unmasked. */ 3782 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3783 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3784 } 3785 3786 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3787 if (IS_GEN5(dev_priv)) { 3788 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3789 } else { 3790 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3791 } 3792 3793 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3794 3795 if (INTEL_GEN(dev_priv) >= 6) { 3796 /* 3797 * RPS interrupts will get enabled/disabled on demand when RPS 3798 * itself is enabled/disabled. 3799 */ 3800 if (HAS_VEBOX(dev_priv)) { 3801 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3802 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3803 } 3804 3805 dev_priv->pm_imr = 0xffffffff; 3806 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3807 } 3808 } 3809 3810 static int ironlake_irq_postinstall(struct drm_device *dev) 3811 { 3812 struct drm_i915_private *dev_priv = to_i915(dev); 3813 u32 display_mask, extra_mask; 3814 3815 if (INTEL_GEN(dev_priv) >= 7) { 3816 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3817 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3818 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3819 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3820 DE_DP_A_HOTPLUG_IVB); 3821 } else { 3822 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3823 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3824 DE_PIPEA_CRC_DONE | DE_POISON); 3825 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3826 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3827 DE_DP_A_HOTPLUG); 3828 } 3829 3830 if (IS_HASWELL(dev_priv)) { 3831 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 3832 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 3833 display_mask |= DE_EDP_PSR_INT_HSW; 3834 } 3835 3836 dev_priv->irq_mask = ~display_mask; 3837 3838 ibx_irq_pre_postinstall(dev); 3839 3840 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3841 3842 gen5_gt_irq_postinstall(dev); 3843 3844 ilk_hpd_detection_setup(dev_priv); 3845 3846 ibx_irq_postinstall(dev); 3847 3848 if (IS_IRONLAKE_M(dev_priv)) { 3849 /* Enable PCU event interrupts 3850 * 3851 * spinlocking not required here for correctness since interrupt 3852 * setup is guaranteed to run in single-threaded context. But we 3853 * need it to make the assert_spin_locked happy. */ 3854 spin_lock_irq(&dev_priv->irq_lock); 3855 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3856 spin_unlock_irq(&dev_priv->irq_lock); 3857 } 3858 3859 return 0; 3860 } 3861 3862 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3863 { 3864 lockdep_assert_held(&dev_priv->irq_lock); 3865 3866 if (dev_priv->display_irqs_enabled) 3867 return; 3868 3869 dev_priv->display_irqs_enabled = true; 3870 3871 if (intel_irqs_enabled(dev_priv)) { 3872 vlv_display_irq_reset(dev_priv); 3873 vlv_display_irq_postinstall(dev_priv); 3874 } 3875 } 3876 3877 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3878 { 3879 lockdep_assert_held(&dev_priv->irq_lock); 3880 3881 if (!dev_priv->display_irqs_enabled) 3882 return; 3883 3884 dev_priv->display_irqs_enabled = false; 3885 3886 if (intel_irqs_enabled(dev_priv)) 3887 vlv_display_irq_reset(dev_priv); 3888 } 3889 3890 3891 static int valleyview_irq_postinstall(struct drm_device *dev) 3892 { 3893 struct drm_i915_private *dev_priv = to_i915(dev); 3894 3895 gen5_gt_irq_postinstall(dev); 3896 3897 spin_lock_irq(&dev_priv->irq_lock); 3898 if (dev_priv->display_irqs_enabled) 3899 vlv_display_irq_postinstall(dev_priv); 3900 spin_unlock_irq(&dev_priv->irq_lock); 3901 3902 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3903 POSTING_READ(VLV_MASTER_IER); 3904 3905 return 0; 3906 } 3907 3908 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3909 { 3910 /* These are interrupts we'll toggle with the ring mask register */ 3911 uint32_t gt_interrupts[] = { 3912 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3913 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3914 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3915 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3916 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3917 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3918 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3919 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3920 0, 3921 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3922 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3923 }; 3924 3925 if (HAS_L3_DPF(dev_priv)) 3926 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3927 3928 dev_priv->pm_ier = 0x0; 3929 dev_priv->pm_imr = ~dev_priv->pm_ier; 3930 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3931 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3932 /* 3933 * RPS interrupts will get enabled/disabled on demand when RPS itself 3934 * is enabled/disabled. Same wil be the case for GuC interrupts. 3935 */ 3936 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3937 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3938 } 3939 3940 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3941 { 3942 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3943 uint32_t de_pipe_enables; 3944 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3945 u32 de_port_enables; 3946 u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR; 3947 enum pipe pipe; 3948 3949 if (INTEL_GEN(dev_priv) >= 9) { 3950 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3951 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3952 GEN9_AUX_CHANNEL_D; 3953 if (IS_GEN9_LP(dev_priv)) 3954 de_port_masked |= BXT_DE_PORT_GMBUS; 3955 } else { 3956 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3957 } 3958 3959 if (IS_CNL_WITH_PORT_F(dev_priv)) 3960 de_port_masked |= CNL_AUX_CHANNEL_F; 3961 3962 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3963 GEN8_PIPE_FIFO_UNDERRUN; 3964 3965 de_port_enables = de_port_masked; 3966 if (IS_GEN9_LP(dev_priv)) 3967 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3968 else if (IS_BROADWELL(dev_priv)) 3969 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3970 3971 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 3972 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 3973 3974 for_each_pipe(dev_priv, pipe) { 3975 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3976 3977 if (intel_display_power_is_enabled(dev_priv, 3978 POWER_DOMAIN_PIPE(pipe))) 3979 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3980 dev_priv->de_irq_mask[pipe], 3981 de_pipe_enables); 3982 } 3983 3984 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3985 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3986 3987 if (IS_GEN9_LP(dev_priv)) 3988 bxt_hpd_detection_setup(dev_priv); 3989 else if (IS_BROADWELL(dev_priv)) 3990 ilk_hpd_detection_setup(dev_priv); 3991 } 3992 3993 static int gen8_irq_postinstall(struct drm_device *dev) 3994 { 3995 struct drm_i915_private *dev_priv = to_i915(dev); 3996 3997 if (HAS_PCH_SPLIT(dev_priv)) 3998 ibx_irq_pre_postinstall(dev); 3999 4000 gen8_gt_irq_postinstall(dev_priv); 4001 gen8_de_irq_postinstall(dev_priv); 4002 4003 if (HAS_PCH_SPLIT(dev_priv)) 4004 ibx_irq_postinstall(dev); 4005 4006 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4007 POSTING_READ(GEN8_MASTER_IRQ); 4008 4009 return 0; 4010 } 4011 4012 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4013 { 4014 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 4015 4016 BUILD_BUG_ON(irqs & 0xffff0000); 4017 4018 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 4019 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 4020 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 4021 4022 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4023 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 4024 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 4025 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 4026 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 4027 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 4028 4029 /* 4030 * RPS interrupts will get enabled/disabled on demand when RPS itself 4031 * is enabled/disabled. 4032 */ 4033 dev_priv->pm_ier = 0x0; 4034 dev_priv->pm_imr = ~dev_priv->pm_ier; 4035 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4036 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4037 } 4038 4039 static int gen11_irq_postinstall(struct drm_device *dev) 4040 { 4041 struct drm_i915_private *dev_priv = dev->dev_private; 4042 4043 gen11_gt_irq_postinstall(dev_priv); 4044 gen8_de_irq_postinstall(dev_priv); 4045 4046 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4047 4048 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 4049 POSTING_READ(GEN11_GFX_MSTR_IRQ); 4050 4051 return 0; 4052 } 4053 4054 static int cherryview_irq_postinstall(struct drm_device *dev) 4055 { 4056 struct drm_i915_private *dev_priv = to_i915(dev); 4057 4058 gen8_gt_irq_postinstall(dev_priv); 4059 4060 spin_lock_irq(&dev_priv->irq_lock); 4061 if (dev_priv->display_irqs_enabled) 4062 vlv_display_irq_postinstall(dev_priv); 4063 spin_unlock_irq(&dev_priv->irq_lock); 4064 4065 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4066 POSTING_READ(GEN8_MASTER_IRQ); 4067 4068 return 0; 4069 } 4070 4071 static void i8xx_irq_reset(struct drm_device *dev) 4072 { 4073 struct drm_i915_private *dev_priv = to_i915(dev); 4074 4075 i9xx_pipestat_irq_reset(dev_priv); 4076 4077 I915_WRITE16(HWSTAM, 0xffff); 4078 4079 GEN2_IRQ_RESET(); 4080 } 4081 4082 static int i8xx_irq_postinstall(struct drm_device *dev) 4083 { 4084 struct drm_i915_private *dev_priv = to_i915(dev); 4085 u16 enable_mask; 4086 4087 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4088 I915_ERROR_MEMORY_REFRESH)); 4089 4090 /* Unmask the interrupts that we always want on. */ 4091 dev_priv->irq_mask = 4092 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4093 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 4094 4095 enable_mask = 4096 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4097 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4098 I915_USER_INTERRUPT; 4099 4100 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4101 4102 /* Interrupt setup is already guaranteed to be single-threaded, this is 4103 * just to make the assert_spin_locked check happy. */ 4104 spin_lock_irq(&dev_priv->irq_lock); 4105 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4106 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4107 spin_unlock_irq(&dev_priv->irq_lock); 4108 4109 return 0; 4110 } 4111 4112 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4113 { 4114 struct drm_device *dev = arg; 4115 struct drm_i915_private *dev_priv = to_i915(dev); 4116 irqreturn_t ret = IRQ_NONE; 4117 4118 if (!intel_irqs_enabled(dev_priv)) 4119 return IRQ_NONE; 4120 4121 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4122 disable_rpm_wakeref_asserts(dev_priv); 4123 4124 do { 4125 u32 pipe_stats[I915_MAX_PIPES] = {}; 4126 u16 iir; 4127 4128 iir = I915_READ16(IIR); 4129 if (iir == 0) 4130 break; 4131 4132 ret = IRQ_HANDLED; 4133 4134 /* Call regardless, as some status bits might not be 4135 * signalled in iir */ 4136 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4137 4138 I915_WRITE16(IIR, iir); 4139 4140 if (iir & I915_USER_INTERRUPT) 4141 notify_ring(dev_priv->engine[RCS]); 4142 4143 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4144 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4145 4146 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4147 } while (0); 4148 4149 enable_rpm_wakeref_asserts(dev_priv); 4150 4151 return ret; 4152 } 4153 4154 static void i915_irq_reset(struct drm_device *dev) 4155 { 4156 struct drm_i915_private *dev_priv = to_i915(dev); 4157 4158 if (I915_HAS_HOTPLUG(dev_priv)) { 4159 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4160 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4161 } 4162 4163 i9xx_pipestat_irq_reset(dev_priv); 4164 4165 I915_WRITE(HWSTAM, 0xffffffff); 4166 4167 GEN3_IRQ_RESET(); 4168 } 4169 4170 static int i915_irq_postinstall(struct drm_device *dev) 4171 { 4172 struct drm_i915_private *dev_priv = to_i915(dev); 4173 u32 enable_mask; 4174 4175 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4176 I915_ERROR_MEMORY_REFRESH)); 4177 4178 /* Unmask the interrupts that we always want on. */ 4179 dev_priv->irq_mask = 4180 ~(I915_ASLE_INTERRUPT | 4181 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4182 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 4183 4184 enable_mask = 4185 I915_ASLE_INTERRUPT | 4186 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4187 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4188 I915_USER_INTERRUPT; 4189 4190 if (I915_HAS_HOTPLUG(dev_priv)) { 4191 /* Enable in IER... */ 4192 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4193 /* and unmask in IMR */ 4194 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4195 } 4196 4197 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4198 4199 /* Interrupt setup is already guaranteed to be single-threaded, this is 4200 * just to make the assert_spin_locked check happy. */ 4201 spin_lock_irq(&dev_priv->irq_lock); 4202 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4203 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4204 spin_unlock_irq(&dev_priv->irq_lock); 4205 4206 i915_enable_asle_pipestat(dev_priv); 4207 4208 return 0; 4209 } 4210 4211 static irqreturn_t i915_irq_handler(int irq, void *arg) 4212 { 4213 struct drm_device *dev = arg; 4214 struct drm_i915_private *dev_priv = to_i915(dev); 4215 irqreturn_t ret = IRQ_NONE; 4216 4217 if (!intel_irqs_enabled(dev_priv)) 4218 return IRQ_NONE; 4219 4220 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4221 disable_rpm_wakeref_asserts(dev_priv); 4222 4223 do { 4224 u32 pipe_stats[I915_MAX_PIPES] = {}; 4225 u32 hotplug_status = 0; 4226 u32 iir; 4227 4228 iir = I915_READ(IIR); 4229 if (iir == 0) 4230 break; 4231 4232 ret = IRQ_HANDLED; 4233 4234 if (I915_HAS_HOTPLUG(dev_priv) && 4235 iir & I915_DISPLAY_PORT_INTERRUPT) 4236 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4237 4238 /* Call regardless, as some status bits might not be 4239 * signalled in iir */ 4240 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4241 4242 I915_WRITE(IIR, iir); 4243 4244 if (iir & I915_USER_INTERRUPT) 4245 notify_ring(dev_priv->engine[RCS]); 4246 4247 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4248 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4249 4250 if (hotplug_status) 4251 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4252 4253 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4254 } while (0); 4255 4256 enable_rpm_wakeref_asserts(dev_priv); 4257 4258 return ret; 4259 } 4260 4261 static void i965_irq_reset(struct drm_device *dev) 4262 { 4263 struct drm_i915_private *dev_priv = to_i915(dev); 4264 4265 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4266 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4267 4268 i9xx_pipestat_irq_reset(dev_priv); 4269 4270 I915_WRITE(HWSTAM, 0xffffffff); 4271 4272 GEN3_IRQ_RESET(); 4273 } 4274 4275 static int i965_irq_postinstall(struct drm_device *dev) 4276 { 4277 struct drm_i915_private *dev_priv = to_i915(dev); 4278 u32 enable_mask; 4279 u32 error_mask; 4280 4281 /* 4282 * Enable some error detection, note the instruction error mask 4283 * bit is reserved, so we leave it masked. 4284 */ 4285 if (IS_G4X(dev_priv)) { 4286 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4287 GM45_ERROR_MEM_PRIV | 4288 GM45_ERROR_CP_PRIV | 4289 I915_ERROR_MEMORY_REFRESH); 4290 } else { 4291 error_mask = ~(I915_ERROR_PAGE_TABLE | 4292 I915_ERROR_MEMORY_REFRESH); 4293 } 4294 I915_WRITE(EMR, error_mask); 4295 4296 /* Unmask the interrupts that we always want on. */ 4297 dev_priv->irq_mask = 4298 ~(I915_ASLE_INTERRUPT | 4299 I915_DISPLAY_PORT_INTERRUPT | 4300 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4301 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4302 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4303 4304 enable_mask = 4305 I915_ASLE_INTERRUPT | 4306 I915_DISPLAY_PORT_INTERRUPT | 4307 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4308 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4309 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 4310 I915_USER_INTERRUPT; 4311 4312 if (IS_G4X(dev_priv)) 4313 enable_mask |= I915_BSD_USER_INTERRUPT; 4314 4315 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4316 4317 /* Interrupt setup is already guaranteed to be single-threaded, this is 4318 * just to make the assert_spin_locked check happy. */ 4319 spin_lock_irq(&dev_priv->irq_lock); 4320 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4321 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4322 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4323 spin_unlock_irq(&dev_priv->irq_lock); 4324 4325 i915_enable_asle_pipestat(dev_priv); 4326 4327 return 0; 4328 } 4329 4330 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4331 { 4332 u32 hotplug_en; 4333 4334 lockdep_assert_held(&dev_priv->irq_lock); 4335 4336 /* Note HDMI and DP share hotplug bits */ 4337 /* enable bits are the same for all generations */ 4338 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4339 /* Programming the CRT detection parameters tends 4340 to generate a spurious hotplug event about three 4341 seconds later. So just do it once. 4342 */ 4343 if (IS_G4X(dev_priv)) 4344 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4345 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4346 4347 /* Ignore TV since it's buggy */ 4348 i915_hotplug_interrupt_update_locked(dev_priv, 4349 HOTPLUG_INT_EN_MASK | 4350 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4351 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4352 hotplug_en); 4353 } 4354 4355 static irqreturn_t i965_irq_handler(int irq, void *arg) 4356 { 4357 struct drm_device *dev = arg; 4358 struct drm_i915_private *dev_priv = to_i915(dev); 4359 irqreturn_t ret = IRQ_NONE; 4360 4361 if (!intel_irqs_enabled(dev_priv)) 4362 return IRQ_NONE; 4363 4364 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4365 disable_rpm_wakeref_asserts(dev_priv); 4366 4367 do { 4368 u32 pipe_stats[I915_MAX_PIPES] = {}; 4369 u32 hotplug_status = 0; 4370 u32 iir; 4371 4372 iir = I915_READ(IIR); 4373 if (iir == 0) 4374 break; 4375 4376 ret = IRQ_HANDLED; 4377 4378 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4379 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4380 4381 /* Call regardless, as some status bits might not be 4382 * signalled in iir */ 4383 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4384 4385 I915_WRITE(IIR, iir); 4386 4387 if (iir & I915_USER_INTERRUPT) 4388 notify_ring(dev_priv->engine[RCS]); 4389 4390 if (iir & I915_BSD_USER_INTERRUPT) 4391 notify_ring(dev_priv->engine[VCS]); 4392 4393 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4394 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4395 4396 if (hotplug_status) 4397 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4398 4399 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4400 } while (0); 4401 4402 enable_rpm_wakeref_asserts(dev_priv); 4403 4404 return ret; 4405 } 4406 4407 /** 4408 * intel_irq_init - initializes irq support 4409 * @dev_priv: i915 device instance 4410 * 4411 * This function initializes all the irq support including work items, timers 4412 * and all the vtables. It does not setup the interrupt itself though. 4413 */ 4414 void intel_irq_init(struct drm_i915_private *dev_priv) 4415 { 4416 struct drm_device *dev = &dev_priv->drm; 4417 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4418 int i; 4419 4420 intel_hpd_init_work(dev_priv); 4421 4422 INIT_WORK(&rps->work, gen6_pm_rps_work); 4423 4424 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4425 for (i = 0; i < MAX_L3_SLICES; ++i) 4426 dev_priv->l3_parity.remap_info[i] = NULL; 4427 4428 if (HAS_GUC_SCHED(dev_priv)) 4429 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4430 4431 /* Let's track the enabled rps events */ 4432 if (IS_VALLEYVIEW(dev_priv)) 4433 /* WaGsvRC0ResidencyMethod:vlv */ 4434 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4435 else 4436 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4437 4438 rps->pm_intrmsk_mbz = 0; 4439 4440 /* 4441 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4442 * if GEN6_PM_UP_EI_EXPIRED is masked. 4443 * 4444 * TODO: verify if this can be reproduced on VLV,CHV. 4445 */ 4446 if (INTEL_GEN(dev_priv) <= 7) 4447 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4448 4449 if (INTEL_GEN(dev_priv) >= 8) 4450 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4451 4452 if (IS_GEN2(dev_priv)) { 4453 /* Gen2 doesn't have a hardware frame counter */ 4454 dev->max_vblank_count = 0; 4455 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4456 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4457 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4458 } else { 4459 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4460 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4461 } 4462 4463 /* 4464 * Opt out of the vblank disable timer on everything except gen2. 4465 * Gen2 doesn't have a hardware frame counter and so depends on 4466 * vblank interrupts to produce sane vblank seuquence numbers. 4467 */ 4468 if (!IS_GEN2(dev_priv)) 4469 dev->vblank_disable_immediate = true; 4470 4471 /* Most platforms treat the display irq block as an always-on 4472 * power domain. vlv/chv can disable it at runtime and need 4473 * special care to avoid writing any of the display block registers 4474 * outside of the power domain. We defer setting up the display irqs 4475 * in this case to the runtime pm. 4476 */ 4477 dev_priv->display_irqs_enabled = true; 4478 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4479 dev_priv->display_irqs_enabled = false; 4480 4481 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4482 4483 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4484 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4485 4486 if (IS_CHERRYVIEW(dev_priv)) { 4487 dev->driver->irq_handler = cherryview_irq_handler; 4488 dev->driver->irq_preinstall = cherryview_irq_reset; 4489 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4490 dev->driver->irq_uninstall = cherryview_irq_reset; 4491 dev->driver->enable_vblank = i965_enable_vblank; 4492 dev->driver->disable_vblank = i965_disable_vblank; 4493 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4494 } else if (IS_VALLEYVIEW(dev_priv)) { 4495 dev->driver->irq_handler = valleyview_irq_handler; 4496 dev->driver->irq_preinstall = valleyview_irq_reset; 4497 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4498 dev->driver->irq_uninstall = valleyview_irq_reset; 4499 dev->driver->enable_vblank = i965_enable_vblank; 4500 dev->driver->disable_vblank = i965_disable_vblank; 4501 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4502 } else if (INTEL_GEN(dev_priv) >= 11) { 4503 dev->driver->irq_handler = gen11_irq_handler; 4504 dev->driver->irq_preinstall = gen11_irq_reset; 4505 dev->driver->irq_postinstall = gen11_irq_postinstall; 4506 dev->driver->irq_uninstall = gen11_irq_reset; 4507 dev->driver->enable_vblank = gen8_enable_vblank; 4508 dev->driver->disable_vblank = gen8_disable_vblank; 4509 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4510 } else if (INTEL_GEN(dev_priv) >= 8) { 4511 dev->driver->irq_handler = gen8_irq_handler; 4512 dev->driver->irq_preinstall = gen8_irq_reset; 4513 dev->driver->irq_postinstall = gen8_irq_postinstall; 4514 dev->driver->irq_uninstall = gen8_irq_reset; 4515 dev->driver->enable_vblank = gen8_enable_vblank; 4516 dev->driver->disable_vblank = gen8_disable_vblank; 4517 if (IS_GEN9_LP(dev_priv)) 4518 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4519 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4520 HAS_PCH_CNP(dev_priv)) 4521 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4522 else 4523 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4524 } else if (HAS_PCH_SPLIT(dev_priv)) { 4525 dev->driver->irq_handler = ironlake_irq_handler; 4526 dev->driver->irq_preinstall = ironlake_irq_reset; 4527 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4528 dev->driver->irq_uninstall = ironlake_irq_reset; 4529 dev->driver->enable_vblank = ironlake_enable_vblank; 4530 dev->driver->disable_vblank = ironlake_disable_vblank; 4531 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4532 } else { 4533 if (IS_GEN2(dev_priv)) { 4534 dev->driver->irq_preinstall = i8xx_irq_reset; 4535 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4536 dev->driver->irq_handler = i8xx_irq_handler; 4537 dev->driver->irq_uninstall = i8xx_irq_reset; 4538 dev->driver->enable_vblank = i8xx_enable_vblank; 4539 dev->driver->disable_vblank = i8xx_disable_vblank; 4540 } else if (IS_GEN3(dev_priv)) { 4541 dev->driver->irq_preinstall = i915_irq_reset; 4542 dev->driver->irq_postinstall = i915_irq_postinstall; 4543 dev->driver->irq_uninstall = i915_irq_reset; 4544 dev->driver->irq_handler = i915_irq_handler; 4545 dev->driver->enable_vblank = i8xx_enable_vblank; 4546 dev->driver->disable_vblank = i8xx_disable_vblank; 4547 } else { 4548 dev->driver->irq_preinstall = i965_irq_reset; 4549 dev->driver->irq_postinstall = i965_irq_postinstall; 4550 dev->driver->irq_uninstall = i965_irq_reset; 4551 dev->driver->irq_handler = i965_irq_handler; 4552 dev->driver->enable_vblank = i965_enable_vblank; 4553 dev->driver->disable_vblank = i965_disable_vblank; 4554 } 4555 if (I915_HAS_HOTPLUG(dev_priv)) 4556 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4557 } 4558 } 4559 4560 /** 4561 * intel_irq_fini - deinitializes IRQ support 4562 * @i915: i915 device instance 4563 * 4564 * This function deinitializes all the IRQ support. 4565 */ 4566 void intel_irq_fini(struct drm_i915_private *i915) 4567 { 4568 int i; 4569 4570 for (i = 0; i < MAX_L3_SLICES; ++i) 4571 kfree(i915->l3_parity.remap_info[i]); 4572 } 4573 4574 /** 4575 * intel_irq_install - enables the hardware interrupt 4576 * @dev_priv: i915 device instance 4577 * 4578 * This function enables the hardware interrupt handling, but leaves the hotplug 4579 * handling still disabled. It is called after intel_irq_init(). 4580 * 4581 * In the driver load and resume code we need working interrupts in a few places 4582 * but don't want to deal with the hassle of concurrent probe and hotplug 4583 * workers. Hence the split into this two-stage approach. 4584 */ 4585 int intel_irq_install(struct drm_i915_private *dev_priv) 4586 { 4587 /* 4588 * We enable some interrupt sources in our postinstall hooks, so mark 4589 * interrupts as enabled _before_ actually enabling them to avoid 4590 * special cases in our ordering checks. 4591 */ 4592 dev_priv->runtime_pm.irqs_enabled = true; 4593 4594 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4595 } 4596 4597 /** 4598 * intel_irq_uninstall - finilizes all irq handling 4599 * @dev_priv: i915 device instance 4600 * 4601 * This stops interrupt and hotplug handling and unregisters and frees all 4602 * resources acquired in the init functions. 4603 */ 4604 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4605 { 4606 drm_irq_uninstall(&dev_priv->drm); 4607 intel_hpd_cancel_work(dev_priv); 4608 dev_priv->runtime_pm.irqs_enabled = false; 4609 } 4610 4611 /** 4612 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4613 * @dev_priv: i915 device instance 4614 * 4615 * This function is used to disable interrupts at runtime, both in the runtime 4616 * pm and the system suspend/resume code. 4617 */ 4618 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4619 { 4620 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4621 dev_priv->runtime_pm.irqs_enabled = false; 4622 synchronize_irq(dev_priv->drm.irq); 4623 } 4624 4625 /** 4626 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4627 * @dev_priv: i915 device instance 4628 * 4629 * This function is used to enable interrupts at runtime, both in the runtime 4630 * pm and the system suspend/resume code. 4631 */ 4632 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4633 { 4634 dev_priv->runtime_pm.irqs_enabled = true; 4635 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4636 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4637 } 4638