1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 174 /* For display hotplug interrupt */ 175 static inline void 176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 177 uint32_t mask, 178 uint32_t bits) 179 { 180 uint32_t val; 181 182 assert_spin_locked(&dev_priv->irq_lock); 183 WARN_ON(bits & ~mask); 184 185 val = I915_READ(PORT_HOTPLUG_EN); 186 val &= ~mask; 187 val |= bits; 188 I915_WRITE(PORT_HOTPLUG_EN, val); 189 } 190 191 /** 192 * i915_hotplug_interrupt_update - update hotplug interrupt enable 193 * @dev_priv: driver private 194 * @mask: bits to update 195 * @bits: bits to enable 196 * NOTE: the HPD enable bits are modified both inside and outside 197 * of an interrupt context. To avoid that read-modify-write cycles 198 * interfer, these bits are protected by a spinlock. Since this 199 * function is usually not called from a context where the lock is 200 * held already, this function acquires the lock itself. A non-locking 201 * version is also available. 202 */ 203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 204 uint32_t mask, 205 uint32_t bits) 206 { 207 spin_lock_irq(&dev_priv->irq_lock); 208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 209 spin_unlock_irq(&dev_priv->irq_lock); 210 } 211 212 /** 213 * ilk_update_display_irq - update DEIMR 214 * @dev_priv: driver private 215 * @interrupt_mask: mask of interrupt bits to update 216 * @enabled_irq_mask: mask of interrupt bits to enable 217 */ 218 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 219 uint32_t interrupt_mask, 220 uint32_t enabled_irq_mask) 221 { 222 uint32_t new_val; 223 224 assert_spin_locked(&dev_priv->irq_lock); 225 226 WARN_ON(enabled_irq_mask & ~interrupt_mask); 227 228 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 229 return; 230 231 new_val = dev_priv->irq_mask; 232 new_val &= ~interrupt_mask; 233 new_val |= (~enabled_irq_mask & interrupt_mask); 234 235 if (new_val != dev_priv->irq_mask) { 236 dev_priv->irq_mask = new_val; 237 I915_WRITE(DEIMR, dev_priv->irq_mask); 238 POSTING_READ(DEIMR); 239 } 240 } 241 242 /** 243 * ilk_update_gt_irq - update GTIMR 244 * @dev_priv: driver private 245 * @interrupt_mask: mask of interrupt bits to update 246 * @enabled_irq_mask: mask of interrupt bits to enable 247 */ 248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 249 uint32_t interrupt_mask, 250 uint32_t enabled_irq_mask) 251 { 252 assert_spin_locked(&dev_priv->irq_lock); 253 254 WARN_ON(enabled_irq_mask & ~interrupt_mask); 255 256 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 257 return; 258 259 dev_priv->gt_irq_mask &= ~interrupt_mask; 260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 262 } 263 264 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 265 { 266 ilk_update_gt_irq(dev_priv, mask, mask); 267 POSTING_READ_FW(GTIMR); 268 } 269 270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 271 { 272 ilk_update_gt_irq(dev_priv, mask, 0); 273 } 274 275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 276 { 277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 278 } 279 280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 281 { 282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 283 } 284 285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 286 { 287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 288 } 289 290 /** 291 * snb_update_pm_irq - update GEN6_PMIMR 292 * @dev_priv: driver private 293 * @interrupt_mask: mask of interrupt bits to update 294 * @enabled_irq_mask: mask of interrupt bits to enable 295 */ 296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 297 uint32_t interrupt_mask, 298 uint32_t enabled_irq_mask) 299 { 300 uint32_t new_val; 301 302 WARN_ON(enabled_irq_mask & ~interrupt_mask); 303 304 assert_spin_locked(&dev_priv->irq_lock); 305 306 new_val = dev_priv->pm_irq_mask; 307 new_val &= ~interrupt_mask; 308 new_val |= (~enabled_irq_mask & interrupt_mask); 309 310 if (new_val != dev_priv->pm_irq_mask) { 311 dev_priv->pm_irq_mask = new_val; 312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 313 POSTING_READ(gen6_pm_imr(dev_priv)); 314 } 315 } 316 317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 318 { 319 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 320 return; 321 322 snb_update_pm_irq(dev_priv, mask, mask); 323 } 324 325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 326 uint32_t mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_disable_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 spin_lock_irq(&dev_priv->irq_lock); 344 I915_WRITE(reg, dev_priv->pm_rps_events); 345 I915_WRITE(reg, dev_priv->pm_rps_events); 346 POSTING_READ(reg); 347 dev_priv->rps.pm_iir = 0; 348 spin_unlock_irq(&dev_priv->irq_lock); 349 } 350 351 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 352 { 353 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 354 return; 355 356 spin_lock_irq(&dev_priv->irq_lock); 357 WARN_ON_ONCE(dev_priv->rps.pm_iir); 358 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 359 dev_priv->rps.interrupts_enabled = true; 360 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 361 dev_priv->pm_rps_events); 362 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 363 364 spin_unlock_irq(&dev_priv->irq_lock); 365 } 366 367 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 368 { 369 return (mask & ~dev_priv->rps.pm_intr_keep); 370 } 371 372 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 373 { 374 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 375 return; 376 377 spin_lock_irq(&dev_priv->irq_lock); 378 dev_priv->rps.interrupts_enabled = false; 379 380 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 381 382 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 383 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 384 ~dev_priv->pm_rps_events); 385 386 spin_unlock_irq(&dev_priv->irq_lock); 387 synchronize_irq(dev_priv->drm.irq); 388 389 /* Now that we will not be generating any more work, flush any 390 * outsanding tasks. As we are called on the RPS idle path, 391 * we will reset the GPU to minimum frequencies, so the current 392 * state of the worker can be discarded. 393 */ 394 cancel_work_sync(&dev_priv->rps.work); 395 gen6_reset_rps_interrupts(dev_priv); 396 } 397 398 /** 399 * bdw_update_port_irq - update DE port interrupt 400 * @dev_priv: driver private 401 * @interrupt_mask: mask of interrupt bits to update 402 * @enabled_irq_mask: mask of interrupt bits to enable 403 */ 404 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 405 uint32_t interrupt_mask, 406 uint32_t enabled_irq_mask) 407 { 408 uint32_t new_val; 409 uint32_t old_val; 410 411 assert_spin_locked(&dev_priv->irq_lock); 412 413 WARN_ON(enabled_irq_mask & ~interrupt_mask); 414 415 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 416 return; 417 418 old_val = I915_READ(GEN8_DE_PORT_IMR); 419 420 new_val = old_val; 421 new_val &= ~interrupt_mask; 422 new_val |= (~enabled_irq_mask & interrupt_mask); 423 424 if (new_val != old_val) { 425 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 426 POSTING_READ(GEN8_DE_PORT_IMR); 427 } 428 } 429 430 /** 431 * bdw_update_pipe_irq - update DE pipe interrupt 432 * @dev_priv: driver private 433 * @pipe: pipe whose interrupt to update 434 * @interrupt_mask: mask of interrupt bits to update 435 * @enabled_irq_mask: mask of interrupt bits to enable 436 */ 437 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 438 enum pipe pipe, 439 uint32_t interrupt_mask, 440 uint32_t enabled_irq_mask) 441 { 442 uint32_t new_val; 443 444 assert_spin_locked(&dev_priv->irq_lock); 445 446 WARN_ON(enabled_irq_mask & ~interrupt_mask); 447 448 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 449 return; 450 451 new_val = dev_priv->de_irq_mask[pipe]; 452 new_val &= ~interrupt_mask; 453 new_val |= (~enabled_irq_mask & interrupt_mask); 454 455 if (new_val != dev_priv->de_irq_mask[pipe]) { 456 dev_priv->de_irq_mask[pipe] = new_val; 457 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 458 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 459 } 460 } 461 462 /** 463 * ibx_display_interrupt_update - update SDEIMR 464 * @dev_priv: driver private 465 * @interrupt_mask: mask of interrupt bits to update 466 * @enabled_irq_mask: mask of interrupt bits to enable 467 */ 468 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 469 uint32_t interrupt_mask, 470 uint32_t enabled_irq_mask) 471 { 472 uint32_t sdeimr = I915_READ(SDEIMR); 473 sdeimr &= ~interrupt_mask; 474 sdeimr |= (~enabled_irq_mask & interrupt_mask); 475 476 WARN_ON(enabled_irq_mask & ~interrupt_mask); 477 478 assert_spin_locked(&dev_priv->irq_lock); 479 480 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 481 return; 482 483 I915_WRITE(SDEIMR, sdeimr); 484 POSTING_READ(SDEIMR); 485 } 486 487 static void 488 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 489 u32 enable_mask, u32 status_mask) 490 { 491 i915_reg_t reg = PIPESTAT(pipe); 492 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 493 494 assert_spin_locked(&dev_priv->irq_lock); 495 WARN_ON(!intel_irqs_enabled(dev_priv)); 496 497 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 498 status_mask & ~PIPESTAT_INT_STATUS_MASK, 499 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 500 pipe_name(pipe), enable_mask, status_mask)) 501 return; 502 503 if ((pipestat & enable_mask) == enable_mask) 504 return; 505 506 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 507 508 /* Enable the interrupt, clear any pending status */ 509 pipestat |= enable_mask | status_mask; 510 I915_WRITE(reg, pipestat); 511 POSTING_READ(reg); 512 } 513 514 static void 515 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 516 u32 enable_mask, u32 status_mask) 517 { 518 i915_reg_t reg = PIPESTAT(pipe); 519 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 520 521 assert_spin_locked(&dev_priv->irq_lock); 522 WARN_ON(!intel_irqs_enabled(dev_priv)); 523 524 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 525 status_mask & ~PIPESTAT_INT_STATUS_MASK, 526 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 527 pipe_name(pipe), enable_mask, status_mask)) 528 return; 529 530 if ((pipestat & enable_mask) == 0) 531 return; 532 533 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 534 535 pipestat &= ~enable_mask; 536 I915_WRITE(reg, pipestat); 537 POSTING_READ(reg); 538 } 539 540 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 541 { 542 u32 enable_mask = status_mask << 16; 543 544 /* 545 * On pipe A we don't support the PSR interrupt yet, 546 * on pipe B and C the same bit MBZ. 547 */ 548 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 549 return 0; 550 /* 551 * On pipe B and C we don't support the PSR interrupt yet, on pipe 552 * A the same bit is for perf counters which we don't use either. 553 */ 554 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 555 return 0; 556 557 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 558 SPRITE0_FLIP_DONE_INT_EN_VLV | 559 SPRITE1_FLIP_DONE_INT_EN_VLV); 560 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 561 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 562 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 563 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 564 565 return enable_mask; 566 } 567 568 void 569 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 570 u32 status_mask) 571 { 572 u32 enable_mask; 573 574 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 575 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 576 status_mask); 577 else 578 enable_mask = status_mask << 16; 579 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 580 } 581 582 void 583 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 584 u32 status_mask) 585 { 586 u32 enable_mask; 587 588 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 589 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 590 status_mask); 591 else 592 enable_mask = status_mask << 16; 593 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 594 } 595 596 /** 597 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 598 * @dev_priv: i915 device private 599 */ 600 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 601 { 602 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 603 return; 604 605 spin_lock_irq(&dev_priv->irq_lock); 606 607 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 608 if (INTEL_GEN(dev_priv) >= 4) 609 i915_enable_pipestat(dev_priv, PIPE_A, 610 PIPE_LEGACY_BLC_EVENT_STATUS); 611 612 spin_unlock_irq(&dev_priv->irq_lock); 613 } 614 615 /* 616 * This timing diagram depicts the video signal in and 617 * around the vertical blanking period. 618 * 619 * Assumptions about the fictitious mode used in this example: 620 * vblank_start >= 3 621 * vsync_start = vblank_start + 1 622 * vsync_end = vblank_start + 2 623 * vtotal = vblank_start + 3 624 * 625 * start of vblank: 626 * latch double buffered registers 627 * increment frame counter (ctg+) 628 * generate start of vblank interrupt (gen4+) 629 * | 630 * | frame start: 631 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 632 * | may be shifted forward 1-3 extra lines via PIPECONF 633 * | | 634 * | | start of vsync: 635 * | | generate vsync interrupt 636 * | | | 637 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 638 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 639 * ----va---> <-----------------vb--------------------> <--------va------------- 640 * | | <----vs-----> | 641 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 642 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 643 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 644 * | | | 645 * last visible pixel first visible pixel 646 * | increment frame counter (gen3/4) 647 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 648 * 649 * x = horizontal active 650 * _ = horizontal blanking 651 * hs = horizontal sync 652 * va = vertical active 653 * vb = vertical blanking 654 * vs = vertical sync 655 * vbs = vblank_start (number) 656 * 657 * Summary: 658 * - most events happen at the start of horizontal sync 659 * - frame start happens at the start of horizontal blank, 1-4 lines 660 * (depending on PIPECONF settings) after the start of vblank 661 * - gen3/4 pixel and frame counter are synchronized with the start 662 * of horizontal active on the first line of vertical active 663 */ 664 665 /* Called from drm generic code, passed a 'crtc', which 666 * we use as a pipe index 667 */ 668 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 669 { 670 struct drm_i915_private *dev_priv = to_i915(dev); 671 i915_reg_t high_frame, low_frame; 672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 673 struct intel_crtc *intel_crtc = 674 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 675 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 676 677 htotal = mode->crtc_htotal; 678 hsync_start = mode->crtc_hsync_start; 679 vbl_start = mode->crtc_vblank_start; 680 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 681 vbl_start = DIV_ROUND_UP(vbl_start, 2); 682 683 /* Convert to pixel count */ 684 vbl_start *= htotal; 685 686 /* Start of vblank event occurs at start of hsync */ 687 vbl_start -= htotal - hsync_start; 688 689 high_frame = PIPEFRAME(pipe); 690 low_frame = PIPEFRAMEPIXEL(pipe); 691 692 /* 693 * High & low register fields aren't synchronized, so make sure 694 * we get a low value that's stable across two reads of the high 695 * register. 696 */ 697 do { 698 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 699 low = I915_READ(low_frame); 700 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 701 } while (high1 != high2); 702 703 high1 >>= PIPE_FRAME_HIGH_SHIFT; 704 pixel = low & PIPE_PIXEL_MASK; 705 low >>= PIPE_FRAME_LOW_SHIFT; 706 707 /* 708 * The frame counter increments at beginning of active. 709 * Cook up a vblank counter by also checking the pixel 710 * counter against vblank start. 711 */ 712 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 713 } 714 715 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 716 { 717 struct drm_i915_private *dev_priv = to_i915(dev); 718 719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 720 } 721 722 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 723 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 724 { 725 struct drm_device *dev = crtc->base.dev; 726 struct drm_i915_private *dev_priv = to_i915(dev); 727 const struct drm_display_mode *mode = &crtc->base.hwmode; 728 enum pipe pipe = crtc->pipe; 729 int position, vtotal; 730 731 vtotal = mode->crtc_vtotal; 732 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 733 vtotal /= 2; 734 735 if (IS_GEN2(dev_priv)) 736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 737 else 738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 739 740 /* 741 * On HSW, the DSL reg (0x70000) appears to return 0 if we 742 * read it just before the start of vblank. So try it again 743 * so we don't accidentally end up spanning a vblank frame 744 * increment, causing the pipe_update_end() code to squak at us. 745 * 746 * The nature of this problem means we can't simply check the ISR 747 * bit and return the vblank start value; nor can we use the scanline 748 * debug register in the transcoder as it appears to have the same 749 * problem. We may need to extend this to include other platforms, 750 * but so far testing only shows the problem on HSW. 751 */ 752 if (HAS_DDI(dev_priv) && !position) { 753 int i, temp; 754 755 for (i = 0; i < 100; i++) { 756 udelay(1); 757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 758 DSL_LINEMASK_GEN3; 759 if (temp != position) { 760 position = temp; 761 break; 762 } 763 } 764 } 765 766 /* 767 * See update_scanline_offset() for the details on the 768 * scanline_offset adjustment. 769 */ 770 return (position + crtc->scanline_offset) % vtotal; 771 } 772 773 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 774 unsigned int flags, int *vpos, int *hpos, 775 ktime_t *stime, ktime_t *etime, 776 const struct drm_display_mode *mode) 777 { 778 struct drm_i915_private *dev_priv = to_i915(dev); 779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 781 int position; 782 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 783 bool in_vbl = true; 784 int ret = 0; 785 unsigned long irqflags; 786 787 if (WARN_ON(!mode->crtc_clock)) { 788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 789 "pipe %c\n", pipe_name(pipe)); 790 return 0; 791 } 792 793 htotal = mode->crtc_htotal; 794 hsync_start = mode->crtc_hsync_start; 795 vtotal = mode->crtc_vtotal; 796 vbl_start = mode->crtc_vblank_start; 797 vbl_end = mode->crtc_vblank_end; 798 799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 800 vbl_start = DIV_ROUND_UP(vbl_start, 2); 801 vbl_end /= 2; 802 vtotal /= 2; 803 } 804 805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 806 807 /* 808 * Lock uncore.lock, as we will do multiple timing critical raw 809 * register reads, potentially with preemption disabled, so the 810 * following code must not block on uncore.lock. 811 */ 812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 813 814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 815 816 /* Get optional system timestamp before query. */ 817 if (stime) 818 *stime = ktime_get(); 819 820 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 821 /* No obvious pixelcount register. Only query vertical 822 * scanout position from Display scan line register. 823 */ 824 position = __intel_get_crtc_scanline(intel_crtc); 825 } else { 826 /* Have access to pixelcount since start of frame. 827 * We can split this into vertical and horizontal 828 * scanout position. 829 */ 830 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 831 832 /* convert to pixel counts */ 833 vbl_start *= htotal; 834 vbl_end *= htotal; 835 vtotal *= htotal; 836 837 /* 838 * In interlaced modes, the pixel counter counts all pixels, 839 * so one field will have htotal more pixels. In order to avoid 840 * the reported position from jumping backwards when the pixel 841 * counter is beyond the length of the shorter field, just 842 * clamp the position the length of the shorter field. This 843 * matches how the scanline counter based position works since 844 * the scanline counter doesn't count the two half lines. 845 */ 846 if (position >= vtotal) 847 position = vtotal - 1; 848 849 /* 850 * Start of vblank interrupt is triggered at start of hsync, 851 * just prior to the first active line of vblank. However we 852 * consider lines to start at the leading edge of horizontal 853 * active. So, should we get here before we've crossed into 854 * the horizontal active of the first line in vblank, we would 855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 856 * always add htotal-hsync_start to the current pixel position. 857 */ 858 position = (position + htotal - hsync_start) % vtotal; 859 } 860 861 /* Get optional system timestamp after query. */ 862 if (etime) 863 *etime = ktime_get(); 864 865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 866 867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 868 869 in_vbl = position >= vbl_start && position < vbl_end; 870 871 /* 872 * While in vblank, position will be negative 873 * counting up towards 0 at vbl_end. And outside 874 * vblank, position will be positive counting 875 * up since vbl_end. 876 */ 877 if (position >= vbl_start) 878 position -= vbl_end; 879 else 880 position += vtotal - vbl_end; 881 882 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 883 *vpos = position; 884 *hpos = 0; 885 } else { 886 *vpos = position / htotal; 887 *hpos = position - (*vpos * htotal); 888 } 889 890 /* In vblank? */ 891 if (in_vbl) 892 ret |= DRM_SCANOUTPOS_IN_VBLANK; 893 894 return ret; 895 } 896 897 int intel_get_crtc_scanline(struct intel_crtc *crtc) 898 { 899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 900 unsigned long irqflags; 901 int position; 902 903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 904 position = __intel_get_crtc_scanline(crtc); 905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 906 907 return position; 908 } 909 910 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 911 int *max_error, 912 struct timeval *vblank_time, 913 unsigned flags) 914 { 915 struct drm_crtc *crtc; 916 917 if (pipe >= INTEL_INFO(dev)->num_pipes) { 918 DRM_ERROR("Invalid crtc %u\n", pipe); 919 return -EINVAL; 920 } 921 922 /* Get drm_crtc to timestamp: */ 923 crtc = intel_get_crtc_for_pipe(dev, pipe); 924 if (crtc == NULL) { 925 DRM_ERROR("Invalid crtc %u\n", pipe); 926 return -EINVAL; 927 } 928 929 if (!crtc->hwmode.crtc_clock) { 930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 931 return -EBUSY; 932 } 933 934 /* Helper routine in DRM core does all the work: */ 935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 936 vblank_time, flags, 937 &crtc->hwmode); 938 } 939 940 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 941 { 942 u32 busy_up, busy_down, max_avg, min_avg; 943 u8 new_delay; 944 945 spin_lock(&mchdev_lock); 946 947 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 948 949 new_delay = dev_priv->ips.cur_delay; 950 951 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 952 busy_up = I915_READ(RCPREVBSYTUPAVG); 953 busy_down = I915_READ(RCPREVBSYTDNAVG); 954 max_avg = I915_READ(RCBMAXAVG); 955 min_avg = I915_READ(RCBMINAVG); 956 957 /* Handle RCS change request from hw */ 958 if (busy_up > max_avg) { 959 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 960 new_delay = dev_priv->ips.cur_delay - 1; 961 if (new_delay < dev_priv->ips.max_delay) 962 new_delay = dev_priv->ips.max_delay; 963 } else if (busy_down < min_avg) { 964 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 965 new_delay = dev_priv->ips.cur_delay + 1; 966 if (new_delay > dev_priv->ips.min_delay) 967 new_delay = dev_priv->ips.min_delay; 968 } 969 970 if (ironlake_set_drps(dev_priv, new_delay)) 971 dev_priv->ips.cur_delay = new_delay; 972 973 spin_unlock(&mchdev_lock); 974 975 return; 976 } 977 978 static void notify_ring(struct intel_engine_cs *engine) 979 { 980 smp_store_mb(engine->breadcrumbs.irq_posted, true); 981 if (intel_engine_wakeup(engine)) 982 trace_i915_gem_request_notify(engine); 983 } 984 985 static void vlv_c0_read(struct drm_i915_private *dev_priv, 986 struct intel_rps_ei *ei) 987 { 988 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 989 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 990 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 991 } 992 993 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 994 const struct intel_rps_ei *old, 995 const struct intel_rps_ei *now, 996 int threshold) 997 { 998 u64 time, c0; 999 unsigned int mul = 100; 1000 1001 if (old->cz_clock == 0) 1002 return false; 1003 1004 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1005 mul <<= 8; 1006 1007 time = now->cz_clock - old->cz_clock; 1008 time *= threshold * dev_priv->czclk_freq; 1009 1010 /* Workload can be split between render + media, e.g. SwapBuffers 1011 * being blitted in X after being rendered in mesa. To account for 1012 * this we need to combine both engines into our activity counter. 1013 */ 1014 c0 = now->render_c0 - old->render_c0; 1015 c0 += now->media_c0 - old->media_c0; 1016 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1017 1018 return c0 >= time; 1019 } 1020 1021 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1022 { 1023 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1024 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1025 } 1026 1027 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1028 { 1029 struct intel_rps_ei now; 1030 u32 events = 0; 1031 1032 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1033 return 0; 1034 1035 vlv_c0_read(dev_priv, &now); 1036 if (now.cz_clock == 0) 1037 return 0; 1038 1039 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1040 if (!vlv_c0_above(dev_priv, 1041 &dev_priv->rps.down_ei, &now, 1042 dev_priv->rps.down_threshold)) 1043 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1044 dev_priv->rps.down_ei = now; 1045 } 1046 1047 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1048 if (vlv_c0_above(dev_priv, 1049 &dev_priv->rps.up_ei, &now, 1050 dev_priv->rps.up_threshold)) 1051 events |= GEN6_PM_RP_UP_THRESHOLD; 1052 dev_priv->rps.up_ei = now; 1053 } 1054 1055 return events; 1056 } 1057 1058 static bool any_waiters(struct drm_i915_private *dev_priv) 1059 { 1060 struct intel_engine_cs *engine; 1061 enum intel_engine_id id; 1062 1063 for_each_engine(engine, dev_priv, id) 1064 if (intel_engine_has_waiter(engine)) 1065 return true; 1066 1067 return false; 1068 } 1069 1070 static void gen6_pm_rps_work(struct work_struct *work) 1071 { 1072 struct drm_i915_private *dev_priv = 1073 container_of(work, struct drm_i915_private, rps.work); 1074 bool client_boost; 1075 int new_delay, adj, min, max; 1076 u32 pm_iir; 1077 1078 spin_lock_irq(&dev_priv->irq_lock); 1079 /* Speed up work cancelation during disabling rps interrupts. */ 1080 if (!dev_priv->rps.interrupts_enabled) { 1081 spin_unlock_irq(&dev_priv->irq_lock); 1082 return; 1083 } 1084 1085 pm_iir = dev_priv->rps.pm_iir; 1086 dev_priv->rps.pm_iir = 0; 1087 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1088 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1089 client_boost = dev_priv->rps.client_boost; 1090 dev_priv->rps.client_boost = false; 1091 spin_unlock_irq(&dev_priv->irq_lock); 1092 1093 /* Make sure we didn't queue anything we're not going to process. */ 1094 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1095 1096 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1097 return; 1098 1099 mutex_lock(&dev_priv->rps.hw_lock); 1100 1101 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1102 1103 adj = dev_priv->rps.last_adj; 1104 new_delay = dev_priv->rps.cur_freq; 1105 min = dev_priv->rps.min_freq_softlimit; 1106 max = dev_priv->rps.max_freq_softlimit; 1107 if (client_boost || any_waiters(dev_priv)) 1108 max = dev_priv->rps.max_freq; 1109 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1110 new_delay = dev_priv->rps.boost_freq; 1111 adj = 0; 1112 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1113 if (adj > 0) 1114 adj *= 2; 1115 else /* CHV needs even encode values */ 1116 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1117 /* 1118 * For better performance, jump directly 1119 * to RPe if we're below it. 1120 */ 1121 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1122 new_delay = dev_priv->rps.efficient_freq; 1123 adj = 0; 1124 } 1125 } else if (client_boost || any_waiters(dev_priv)) { 1126 adj = 0; 1127 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1128 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1129 new_delay = dev_priv->rps.efficient_freq; 1130 else 1131 new_delay = dev_priv->rps.min_freq_softlimit; 1132 adj = 0; 1133 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1134 if (adj < 0) 1135 adj *= 2; 1136 else /* CHV needs even encode values */ 1137 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1138 } else { /* unknown event */ 1139 adj = 0; 1140 } 1141 1142 dev_priv->rps.last_adj = adj; 1143 1144 /* sysfs frequency interfaces may have snuck in while servicing the 1145 * interrupt 1146 */ 1147 new_delay += adj; 1148 new_delay = clamp_t(int, new_delay, min, max); 1149 1150 intel_set_rps(dev_priv, new_delay); 1151 1152 mutex_unlock(&dev_priv->rps.hw_lock); 1153 } 1154 1155 1156 /** 1157 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1158 * occurred. 1159 * @work: workqueue struct 1160 * 1161 * Doesn't actually do anything except notify userspace. As a consequence of 1162 * this event, userspace should try to remap the bad rows since statistically 1163 * it is likely the same row is more likely to go bad again. 1164 */ 1165 static void ivybridge_parity_work(struct work_struct *work) 1166 { 1167 struct drm_i915_private *dev_priv = 1168 container_of(work, struct drm_i915_private, l3_parity.error_work); 1169 u32 error_status, row, bank, subbank; 1170 char *parity_event[6]; 1171 uint32_t misccpctl; 1172 uint8_t slice = 0; 1173 1174 /* We must turn off DOP level clock gating to access the L3 registers. 1175 * In order to prevent a get/put style interface, acquire struct mutex 1176 * any time we access those registers. 1177 */ 1178 mutex_lock(&dev_priv->drm.struct_mutex); 1179 1180 /* If we've screwed up tracking, just let the interrupt fire again */ 1181 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1182 goto out; 1183 1184 misccpctl = I915_READ(GEN7_MISCCPCTL); 1185 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1186 POSTING_READ(GEN7_MISCCPCTL); 1187 1188 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1189 i915_reg_t reg; 1190 1191 slice--; 1192 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1193 break; 1194 1195 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1196 1197 reg = GEN7_L3CDERRST1(slice); 1198 1199 error_status = I915_READ(reg); 1200 row = GEN7_PARITY_ERROR_ROW(error_status); 1201 bank = GEN7_PARITY_ERROR_BANK(error_status); 1202 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1203 1204 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1205 POSTING_READ(reg); 1206 1207 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1208 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1209 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1210 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1212 parity_event[5] = NULL; 1213 1214 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1215 KOBJ_CHANGE, parity_event); 1216 1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1218 slice, row, bank, subbank); 1219 1220 kfree(parity_event[4]); 1221 kfree(parity_event[3]); 1222 kfree(parity_event[2]); 1223 kfree(parity_event[1]); 1224 } 1225 1226 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1227 1228 out: 1229 WARN_ON(dev_priv->l3_parity.which_slice); 1230 spin_lock_irq(&dev_priv->irq_lock); 1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1232 spin_unlock_irq(&dev_priv->irq_lock); 1233 1234 mutex_unlock(&dev_priv->drm.struct_mutex); 1235 } 1236 1237 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1238 u32 iir) 1239 { 1240 if (!HAS_L3_DPF(dev_priv)) 1241 return; 1242 1243 spin_lock(&dev_priv->irq_lock); 1244 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1245 spin_unlock(&dev_priv->irq_lock); 1246 1247 iir &= GT_PARITY_ERROR(dev_priv); 1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1249 dev_priv->l3_parity.which_slice |= 1 << 1; 1250 1251 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1252 dev_priv->l3_parity.which_slice |= 1 << 0; 1253 1254 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1255 } 1256 1257 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1258 u32 gt_iir) 1259 { 1260 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1261 notify_ring(dev_priv->engine[RCS]); 1262 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1263 notify_ring(dev_priv->engine[VCS]); 1264 } 1265 1266 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1267 u32 gt_iir) 1268 { 1269 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1270 notify_ring(dev_priv->engine[RCS]); 1271 if (gt_iir & GT_BSD_USER_INTERRUPT) 1272 notify_ring(dev_priv->engine[VCS]); 1273 if (gt_iir & GT_BLT_USER_INTERRUPT) 1274 notify_ring(dev_priv->engine[BCS]); 1275 1276 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1277 GT_BSD_CS_ERROR_INTERRUPT | 1278 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1279 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1280 1281 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1282 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1283 } 1284 1285 static __always_inline void 1286 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1287 { 1288 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1289 notify_ring(engine); 1290 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1291 tasklet_schedule(&engine->irq_tasklet); 1292 } 1293 1294 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1295 u32 master_ctl, 1296 u32 gt_iir[4]) 1297 { 1298 irqreturn_t ret = IRQ_NONE; 1299 1300 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1301 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1302 if (gt_iir[0]) { 1303 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1304 ret = IRQ_HANDLED; 1305 } else 1306 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1307 } 1308 1309 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1310 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1311 if (gt_iir[1]) { 1312 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1313 ret = IRQ_HANDLED; 1314 } else 1315 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1316 } 1317 1318 if (master_ctl & GEN8_GT_VECS_IRQ) { 1319 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1320 if (gt_iir[3]) { 1321 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1322 ret = IRQ_HANDLED; 1323 } else 1324 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1325 } 1326 1327 if (master_ctl & GEN8_GT_PM_IRQ) { 1328 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1329 if (gt_iir[2] & dev_priv->pm_rps_events) { 1330 I915_WRITE_FW(GEN8_GT_IIR(2), 1331 gt_iir[2] & dev_priv->pm_rps_events); 1332 ret = IRQ_HANDLED; 1333 } else 1334 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1335 } 1336 1337 return ret; 1338 } 1339 1340 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1341 u32 gt_iir[4]) 1342 { 1343 if (gt_iir[0]) { 1344 gen8_cs_irq_handler(dev_priv->engine[RCS], 1345 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1346 gen8_cs_irq_handler(dev_priv->engine[BCS], 1347 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1348 } 1349 1350 if (gt_iir[1]) { 1351 gen8_cs_irq_handler(dev_priv->engine[VCS], 1352 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1353 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1354 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1355 } 1356 1357 if (gt_iir[3]) 1358 gen8_cs_irq_handler(dev_priv->engine[VECS], 1359 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1360 1361 if (gt_iir[2] & dev_priv->pm_rps_events) 1362 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1363 } 1364 1365 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1366 { 1367 switch (port) { 1368 case PORT_A: 1369 return val & PORTA_HOTPLUG_LONG_DETECT; 1370 case PORT_B: 1371 return val & PORTB_HOTPLUG_LONG_DETECT; 1372 case PORT_C: 1373 return val & PORTC_HOTPLUG_LONG_DETECT; 1374 default: 1375 return false; 1376 } 1377 } 1378 1379 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1380 { 1381 switch (port) { 1382 case PORT_E: 1383 return val & PORTE_HOTPLUG_LONG_DETECT; 1384 default: 1385 return false; 1386 } 1387 } 1388 1389 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1390 { 1391 switch (port) { 1392 case PORT_A: 1393 return val & PORTA_HOTPLUG_LONG_DETECT; 1394 case PORT_B: 1395 return val & PORTB_HOTPLUG_LONG_DETECT; 1396 case PORT_C: 1397 return val & PORTC_HOTPLUG_LONG_DETECT; 1398 case PORT_D: 1399 return val & PORTD_HOTPLUG_LONG_DETECT; 1400 default: 1401 return false; 1402 } 1403 } 1404 1405 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1406 { 1407 switch (port) { 1408 case PORT_A: 1409 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1410 default: 1411 return false; 1412 } 1413 } 1414 1415 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1416 { 1417 switch (port) { 1418 case PORT_B: 1419 return val & PORTB_HOTPLUG_LONG_DETECT; 1420 case PORT_C: 1421 return val & PORTC_HOTPLUG_LONG_DETECT; 1422 case PORT_D: 1423 return val & PORTD_HOTPLUG_LONG_DETECT; 1424 default: 1425 return false; 1426 } 1427 } 1428 1429 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1430 { 1431 switch (port) { 1432 case PORT_B: 1433 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1434 case PORT_C: 1435 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1436 case PORT_D: 1437 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1438 default: 1439 return false; 1440 } 1441 } 1442 1443 /* 1444 * Get a bit mask of pins that have triggered, and which ones may be long. 1445 * This can be called multiple times with the same masks to accumulate 1446 * hotplug detection results from several registers. 1447 * 1448 * Note that the caller is expected to zero out the masks initially. 1449 */ 1450 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1451 u32 hotplug_trigger, u32 dig_hotplug_reg, 1452 const u32 hpd[HPD_NUM_PINS], 1453 bool long_pulse_detect(enum port port, u32 val)) 1454 { 1455 enum port port; 1456 int i; 1457 1458 for_each_hpd_pin(i) { 1459 if ((hpd[i] & hotplug_trigger) == 0) 1460 continue; 1461 1462 *pin_mask |= BIT(i); 1463 1464 if (!intel_hpd_pin_to_port(i, &port)) 1465 continue; 1466 1467 if (long_pulse_detect(port, dig_hotplug_reg)) 1468 *long_mask |= BIT(i); 1469 } 1470 1471 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1472 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1473 1474 } 1475 1476 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1477 { 1478 wake_up_all(&dev_priv->gmbus_wait_queue); 1479 } 1480 1481 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1482 { 1483 wake_up_all(&dev_priv->gmbus_wait_queue); 1484 } 1485 1486 #if defined(CONFIG_DEBUG_FS) 1487 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1488 enum pipe pipe, 1489 uint32_t crc0, uint32_t crc1, 1490 uint32_t crc2, uint32_t crc3, 1491 uint32_t crc4) 1492 { 1493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1494 struct intel_pipe_crc_entry *entry; 1495 int head, tail; 1496 1497 spin_lock(&pipe_crc->lock); 1498 1499 if (!pipe_crc->entries) { 1500 spin_unlock(&pipe_crc->lock); 1501 DRM_DEBUG_KMS("spurious interrupt\n"); 1502 return; 1503 } 1504 1505 head = pipe_crc->head; 1506 tail = pipe_crc->tail; 1507 1508 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1509 spin_unlock(&pipe_crc->lock); 1510 DRM_ERROR("CRC buffer overflowing\n"); 1511 return; 1512 } 1513 1514 entry = &pipe_crc->entries[head]; 1515 1516 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, 1517 pipe); 1518 entry->crc[0] = crc0; 1519 entry->crc[1] = crc1; 1520 entry->crc[2] = crc2; 1521 entry->crc[3] = crc3; 1522 entry->crc[4] = crc4; 1523 1524 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1525 pipe_crc->head = head; 1526 1527 spin_unlock(&pipe_crc->lock); 1528 1529 wake_up_interruptible(&pipe_crc->wq); 1530 } 1531 #else 1532 static inline void 1533 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1534 enum pipe pipe, 1535 uint32_t crc0, uint32_t crc1, 1536 uint32_t crc2, uint32_t crc3, 1537 uint32_t crc4) {} 1538 #endif 1539 1540 1541 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1542 enum pipe pipe) 1543 { 1544 display_pipe_crc_irq_handler(dev_priv, pipe, 1545 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1546 0, 0, 0, 0); 1547 } 1548 1549 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1550 enum pipe pipe) 1551 { 1552 display_pipe_crc_irq_handler(dev_priv, pipe, 1553 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1554 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1555 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1556 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1557 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1558 } 1559 1560 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1561 enum pipe pipe) 1562 { 1563 uint32_t res1, res2; 1564 1565 if (INTEL_GEN(dev_priv) >= 3) 1566 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1567 else 1568 res1 = 0; 1569 1570 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1571 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1572 else 1573 res2 = 0; 1574 1575 display_pipe_crc_irq_handler(dev_priv, pipe, 1576 I915_READ(PIPE_CRC_RES_RED(pipe)), 1577 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1578 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1579 res1, res2); 1580 } 1581 1582 /* The RPS events need forcewake, so we add them to a work queue and mask their 1583 * IMR bits until the work is done. Other interrupts can be processed without 1584 * the work queue. */ 1585 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1586 { 1587 if (pm_iir & dev_priv->pm_rps_events) { 1588 spin_lock(&dev_priv->irq_lock); 1589 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1590 if (dev_priv->rps.interrupts_enabled) { 1591 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1592 schedule_work(&dev_priv->rps.work); 1593 } 1594 spin_unlock(&dev_priv->irq_lock); 1595 } 1596 1597 if (INTEL_INFO(dev_priv)->gen >= 8) 1598 return; 1599 1600 if (HAS_VEBOX(dev_priv)) { 1601 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1602 notify_ring(dev_priv->engine[VECS]); 1603 1604 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1605 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1606 } 1607 } 1608 1609 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1610 enum pipe pipe) 1611 { 1612 bool ret; 1613 1614 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1615 if (ret) 1616 intel_finish_page_flip_mmio(dev_priv, pipe); 1617 1618 return ret; 1619 } 1620 1621 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1622 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1623 { 1624 int pipe; 1625 1626 spin_lock(&dev_priv->irq_lock); 1627 1628 if (!dev_priv->display_irqs_enabled) { 1629 spin_unlock(&dev_priv->irq_lock); 1630 return; 1631 } 1632 1633 for_each_pipe(dev_priv, pipe) { 1634 i915_reg_t reg; 1635 u32 mask, iir_bit = 0; 1636 1637 /* 1638 * PIPESTAT bits get signalled even when the interrupt is 1639 * disabled with the mask bits, and some of the status bits do 1640 * not generate interrupts at all (like the underrun bit). Hence 1641 * we need to be careful that we only handle what we want to 1642 * handle. 1643 */ 1644 1645 /* fifo underruns are filterered in the underrun handler. */ 1646 mask = PIPE_FIFO_UNDERRUN_STATUS; 1647 1648 switch (pipe) { 1649 case PIPE_A: 1650 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1651 break; 1652 case PIPE_B: 1653 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1654 break; 1655 case PIPE_C: 1656 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1657 break; 1658 } 1659 if (iir & iir_bit) 1660 mask |= dev_priv->pipestat_irq_mask[pipe]; 1661 1662 if (!mask) 1663 continue; 1664 1665 reg = PIPESTAT(pipe); 1666 mask |= PIPESTAT_INT_ENABLE_MASK; 1667 pipe_stats[pipe] = I915_READ(reg) & mask; 1668 1669 /* 1670 * Clear the PIPE*STAT regs before the IIR 1671 */ 1672 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1673 PIPESTAT_INT_STATUS_MASK)) 1674 I915_WRITE(reg, pipe_stats[pipe]); 1675 } 1676 spin_unlock(&dev_priv->irq_lock); 1677 } 1678 1679 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1680 u32 pipe_stats[I915_MAX_PIPES]) 1681 { 1682 enum pipe pipe; 1683 1684 for_each_pipe(dev_priv, pipe) { 1685 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1686 intel_pipe_handle_vblank(dev_priv, pipe)) 1687 intel_check_page_flip(dev_priv, pipe); 1688 1689 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1690 intel_finish_page_flip_cs(dev_priv, pipe); 1691 1692 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1693 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1694 1695 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1696 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1697 } 1698 1699 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1700 gmbus_irq_handler(dev_priv); 1701 } 1702 1703 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1704 { 1705 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1706 1707 if (hotplug_status) 1708 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1709 1710 return hotplug_status; 1711 } 1712 1713 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1714 u32 hotplug_status) 1715 { 1716 u32 pin_mask = 0, long_mask = 0; 1717 1718 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1719 IS_CHERRYVIEW(dev_priv)) { 1720 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1721 1722 if (hotplug_trigger) { 1723 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1724 hotplug_trigger, hpd_status_g4x, 1725 i9xx_port_hotplug_long_detect); 1726 1727 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1728 } 1729 1730 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1731 dp_aux_irq_handler(dev_priv); 1732 } else { 1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1734 1735 if (hotplug_trigger) { 1736 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1737 hotplug_trigger, hpd_status_i915, 1738 i9xx_port_hotplug_long_detect); 1739 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1740 } 1741 } 1742 } 1743 1744 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1745 { 1746 struct drm_device *dev = arg; 1747 struct drm_i915_private *dev_priv = to_i915(dev); 1748 irqreturn_t ret = IRQ_NONE; 1749 1750 if (!intel_irqs_enabled(dev_priv)) 1751 return IRQ_NONE; 1752 1753 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1754 disable_rpm_wakeref_asserts(dev_priv); 1755 1756 do { 1757 u32 iir, gt_iir, pm_iir; 1758 u32 pipe_stats[I915_MAX_PIPES] = {}; 1759 u32 hotplug_status = 0; 1760 u32 ier = 0; 1761 1762 gt_iir = I915_READ(GTIIR); 1763 pm_iir = I915_READ(GEN6_PMIIR); 1764 iir = I915_READ(VLV_IIR); 1765 1766 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1767 break; 1768 1769 ret = IRQ_HANDLED; 1770 1771 /* 1772 * Theory on interrupt generation, based on empirical evidence: 1773 * 1774 * x = ((VLV_IIR & VLV_IER) || 1775 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1776 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1777 * 1778 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1779 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1780 * guarantee the CPU interrupt will be raised again even if we 1781 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1782 * bits this time around. 1783 */ 1784 I915_WRITE(VLV_MASTER_IER, 0); 1785 ier = I915_READ(VLV_IER); 1786 I915_WRITE(VLV_IER, 0); 1787 1788 if (gt_iir) 1789 I915_WRITE(GTIIR, gt_iir); 1790 if (pm_iir) 1791 I915_WRITE(GEN6_PMIIR, pm_iir); 1792 1793 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1794 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1795 1796 /* Call regardless, as some status bits might not be 1797 * signalled in iir */ 1798 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1799 1800 /* 1801 * VLV_IIR is single buffered, and reflects the level 1802 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1803 */ 1804 if (iir) 1805 I915_WRITE(VLV_IIR, iir); 1806 1807 I915_WRITE(VLV_IER, ier); 1808 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1809 POSTING_READ(VLV_MASTER_IER); 1810 1811 if (gt_iir) 1812 snb_gt_irq_handler(dev_priv, gt_iir); 1813 if (pm_iir) 1814 gen6_rps_irq_handler(dev_priv, pm_iir); 1815 1816 if (hotplug_status) 1817 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1818 1819 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1820 } while (0); 1821 1822 enable_rpm_wakeref_asserts(dev_priv); 1823 1824 return ret; 1825 } 1826 1827 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1828 { 1829 struct drm_device *dev = arg; 1830 struct drm_i915_private *dev_priv = to_i915(dev); 1831 irqreturn_t ret = IRQ_NONE; 1832 1833 if (!intel_irqs_enabled(dev_priv)) 1834 return IRQ_NONE; 1835 1836 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1837 disable_rpm_wakeref_asserts(dev_priv); 1838 1839 do { 1840 u32 master_ctl, iir; 1841 u32 gt_iir[4] = {}; 1842 u32 pipe_stats[I915_MAX_PIPES] = {}; 1843 u32 hotplug_status = 0; 1844 u32 ier = 0; 1845 1846 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1847 iir = I915_READ(VLV_IIR); 1848 1849 if (master_ctl == 0 && iir == 0) 1850 break; 1851 1852 ret = IRQ_HANDLED; 1853 1854 /* 1855 * Theory on interrupt generation, based on empirical evidence: 1856 * 1857 * x = ((VLV_IIR & VLV_IER) || 1858 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1859 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1860 * 1861 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1862 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1863 * guarantee the CPU interrupt will be raised again even if we 1864 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1865 * bits this time around. 1866 */ 1867 I915_WRITE(GEN8_MASTER_IRQ, 0); 1868 ier = I915_READ(VLV_IER); 1869 I915_WRITE(VLV_IER, 0); 1870 1871 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1872 1873 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1874 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1875 1876 /* Call regardless, as some status bits might not be 1877 * signalled in iir */ 1878 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1879 1880 /* 1881 * VLV_IIR is single buffered, and reflects the level 1882 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1883 */ 1884 if (iir) 1885 I915_WRITE(VLV_IIR, iir); 1886 1887 I915_WRITE(VLV_IER, ier); 1888 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1889 POSTING_READ(GEN8_MASTER_IRQ); 1890 1891 gen8_gt_irq_handler(dev_priv, gt_iir); 1892 1893 if (hotplug_status) 1894 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1895 1896 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1897 } while (0); 1898 1899 enable_rpm_wakeref_asserts(dev_priv); 1900 1901 return ret; 1902 } 1903 1904 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1905 u32 hotplug_trigger, 1906 const u32 hpd[HPD_NUM_PINS]) 1907 { 1908 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1909 1910 /* 1911 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1912 * unless we touch the hotplug register, even if hotplug_trigger is 1913 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1914 * errors. 1915 */ 1916 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1917 if (!hotplug_trigger) { 1918 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1919 PORTD_HOTPLUG_STATUS_MASK | 1920 PORTC_HOTPLUG_STATUS_MASK | 1921 PORTB_HOTPLUG_STATUS_MASK; 1922 dig_hotplug_reg &= ~mask; 1923 } 1924 1925 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1926 if (!hotplug_trigger) 1927 return; 1928 1929 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1930 dig_hotplug_reg, hpd, 1931 pch_port_hotplug_long_detect); 1932 1933 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1934 } 1935 1936 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1937 { 1938 int pipe; 1939 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1940 1941 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 1942 1943 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1945 SDE_AUDIO_POWER_SHIFT); 1946 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1947 port_name(port)); 1948 } 1949 1950 if (pch_iir & SDE_AUX_MASK) 1951 dp_aux_irq_handler(dev_priv); 1952 1953 if (pch_iir & SDE_GMBUS) 1954 gmbus_irq_handler(dev_priv); 1955 1956 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1957 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1958 1959 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1960 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1961 1962 if (pch_iir & SDE_POISON) 1963 DRM_ERROR("PCH poison interrupt\n"); 1964 1965 if (pch_iir & SDE_FDI_MASK) 1966 for_each_pipe(dev_priv, pipe) 1967 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1968 pipe_name(pipe), 1969 I915_READ(FDI_RX_IIR(pipe))); 1970 1971 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1972 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1973 1974 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1975 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1976 1977 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1978 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1979 1980 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1982 } 1983 1984 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1985 { 1986 u32 err_int = I915_READ(GEN7_ERR_INT); 1987 enum pipe pipe; 1988 1989 if (err_int & ERR_INT_POISON) 1990 DRM_ERROR("Poison interrupt\n"); 1991 1992 for_each_pipe(dev_priv, pipe) { 1993 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1994 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1995 1996 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1997 if (IS_IVYBRIDGE(dev_priv)) 1998 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1999 else 2000 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2001 } 2002 } 2003 2004 I915_WRITE(GEN7_ERR_INT, err_int); 2005 } 2006 2007 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2008 { 2009 u32 serr_int = I915_READ(SERR_INT); 2010 2011 if (serr_int & SERR_INT_POISON) 2012 DRM_ERROR("PCH poison interrupt\n"); 2013 2014 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2015 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2016 2017 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2019 2020 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2021 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2022 2023 I915_WRITE(SERR_INT, serr_int); 2024 } 2025 2026 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2027 { 2028 int pipe; 2029 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2030 2031 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2032 2033 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2034 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2035 SDE_AUDIO_POWER_SHIFT_CPT); 2036 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2037 port_name(port)); 2038 } 2039 2040 if (pch_iir & SDE_AUX_MASK_CPT) 2041 dp_aux_irq_handler(dev_priv); 2042 2043 if (pch_iir & SDE_GMBUS_CPT) 2044 gmbus_irq_handler(dev_priv); 2045 2046 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2047 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2048 2049 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2050 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2051 2052 if (pch_iir & SDE_FDI_MASK_CPT) 2053 for_each_pipe(dev_priv, pipe) 2054 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2055 pipe_name(pipe), 2056 I915_READ(FDI_RX_IIR(pipe))); 2057 2058 if (pch_iir & SDE_ERROR_CPT) 2059 cpt_serr_int_handler(dev_priv); 2060 } 2061 2062 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2063 { 2064 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2065 ~SDE_PORTE_HOTPLUG_SPT; 2066 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2067 u32 pin_mask = 0, long_mask = 0; 2068 2069 if (hotplug_trigger) { 2070 u32 dig_hotplug_reg; 2071 2072 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2073 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2074 2075 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2076 dig_hotplug_reg, hpd_spt, 2077 spt_port_hotplug_long_detect); 2078 } 2079 2080 if (hotplug2_trigger) { 2081 u32 dig_hotplug_reg; 2082 2083 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2084 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2085 2086 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2087 dig_hotplug_reg, hpd_spt, 2088 spt_port_hotplug2_long_detect); 2089 } 2090 2091 if (pin_mask) 2092 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2093 2094 if (pch_iir & SDE_GMBUS_CPT) 2095 gmbus_irq_handler(dev_priv); 2096 } 2097 2098 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2099 u32 hotplug_trigger, 2100 const u32 hpd[HPD_NUM_PINS]) 2101 { 2102 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2103 2104 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2105 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2106 2107 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2108 dig_hotplug_reg, hpd, 2109 ilk_port_hotplug_long_detect); 2110 2111 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2112 } 2113 2114 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2115 u32 de_iir) 2116 { 2117 enum pipe pipe; 2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2119 2120 if (hotplug_trigger) 2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2122 2123 if (de_iir & DE_AUX_CHANNEL_A) 2124 dp_aux_irq_handler(dev_priv); 2125 2126 if (de_iir & DE_GSE) 2127 intel_opregion_asle_intr(dev_priv); 2128 2129 if (de_iir & DE_POISON) 2130 DRM_ERROR("Poison interrupt\n"); 2131 2132 for_each_pipe(dev_priv, pipe) { 2133 if (de_iir & DE_PIPE_VBLANK(pipe) && 2134 intel_pipe_handle_vblank(dev_priv, pipe)) 2135 intel_check_page_flip(dev_priv, pipe); 2136 2137 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2138 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2139 2140 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2141 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2142 2143 /* plane/pipes map 1:1 on ilk+ */ 2144 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2145 intel_finish_page_flip_cs(dev_priv, pipe); 2146 } 2147 2148 /* check event from PCH */ 2149 if (de_iir & DE_PCH_EVENT) { 2150 u32 pch_iir = I915_READ(SDEIIR); 2151 2152 if (HAS_PCH_CPT(dev_priv)) 2153 cpt_irq_handler(dev_priv, pch_iir); 2154 else 2155 ibx_irq_handler(dev_priv, pch_iir); 2156 2157 /* should clear PCH hotplug event before clear CPU irq */ 2158 I915_WRITE(SDEIIR, pch_iir); 2159 } 2160 2161 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2162 ironlake_rps_change_irq_handler(dev_priv); 2163 } 2164 2165 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2166 u32 de_iir) 2167 { 2168 enum pipe pipe; 2169 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2170 2171 if (hotplug_trigger) 2172 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2173 2174 if (de_iir & DE_ERR_INT_IVB) 2175 ivb_err_int_handler(dev_priv); 2176 2177 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2178 dp_aux_irq_handler(dev_priv); 2179 2180 if (de_iir & DE_GSE_IVB) 2181 intel_opregion_asle_intr(dev_priv); 2182 2183 for_each_pipe(dev_priv, pipe) { 2184 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2185 intel_pipe_handle_vblank(dev_priv, pipe)) 2186 intel_check_page_flip(dev_priv, pipe); 2187 2188 /* plane/pipes map 1:1 on ilk+ */ 2189 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2190 intel_finish_page_flip_cs(dev_priv, pipe); 2191 } 2192 2193 /* check event from PCH */ 2194 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2195 u32 pch_iir = I915_READ(SDEIIR); 2196 2197 cpt_irq_handler(dev_priv, pch_iir); 2198 2199 /* clear PCH hotplug event before clear CPU irq */ 2200 I915_WRITE(SDEIIR, pch_iir); 2201 } 2202 } 2203 2204 /* 2205 * To handle irqs with the minimum potential races with fresh interrupts, we: 2206 * 1 - Disable Master Interrupt Control. 2207 * 2 - Find the source(s) of the interrupt. 2208 * 3 - Clear the Interrupt Identity bits (IIR). 2209 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2210 * 5 - Re-enable Master Interrupt Control. 2211 */ 2212 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2213 { 2214 struct drm_device *dev = arg; 2215 struct drm_i915_private *dev_priv = to_i915(dev); 2216 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2217 irqreturn_t ret = IRQ_NONE; 2218 2219 if (!intel_irqs_enabled(dev_priv)) 2220 return IRQ_NONE; 2221 2222 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2223 disable_rpm_wakeref_asserts(dev_priv); 2224 2225 /* disable master interrupt before clearing iir */ 2226 de_ier = I915_READ(DEIER); 2227 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2228 POSTING_READ(DEIER); 2229 2230 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2231 * interrupts will will be stored on its back queue, and then we'll be 2232 * able to process them after we restore SDEIER (as soon as we restore 2233 * it, we'll get an interrupt if SDEIIR still has something to process 2234 * due to its back queue). */ 2235 if (!HAS_PCH_NOP(dev_priv)) { 2236 sde_ier = I915_READ(SDEIER); 2237 I915_WRITE(SDEIER, 0); 2238 POSTING_READ(SDEIER); 2239 } 2240 2241 /* Find, clear, then process each source of interrupt */ 2242 2243 gt_iir = I915_READ(GTIIR); 2244 if (gt_iir) { 2245 I915_WRITE(GTIIR, gt_iir); 2246 ret = IRQ_HANDLED; 2247 if (INTEL_GEN(dev_priv) >= 6) 2248 snb_gt_irq_handler(dev_priv, gt_iir); 2249 else 2250 ilk_gt_irq_handler(dev_priv, gt_iir); 2251 } 2252 2253 de_iir = I915_READ(DEIIR); 2254 if (de_iir) { 2255 I915_WRITE(DEIIR, de_iir); 2256 ret = IRQ_HANDLED; 2257 if (INTEL_GEN(dev_priv) >= 7) 2258 ivb_display_irq_handler(dev_priv, de_iir); 2259 else 2260 ilk_display_irq_handler(dev_priv, de_iir); 2261 } 2262 2263 if (INTEL_GEN(dev_priv) >= 6) { 2264 u32 pm_iir = I915_READ(GEN6_PMIIR); 2265 if (pm_iir) { 2266 I915_WRITE(GEN6_PMIIR, pm_iir); 2267 ret = IRQ_HANDLED; 2268 gen6_rps_irq_handler(dev_priv, pm_iir); 2269 } 2270 } 2271 2272 I915_WRITE(DEIER, de_ier); 2273 POSTING_READ(DEIER); 2274 if (!HAS_PCH_NOP(dev_priv)) { 2275 I915_WRITE(SDEIER, sde_ier); 2276 POSTING_READ(SDEIER); 2277 } 2278 2279 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2280 enable_rpm_wakeref_asserts(dev_priv); 2281 2282 return ret; 2283 } 2284 2285 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2286 u32 hotplug_trigger, 2287 const u32 hpd[HPD_NUM_PINS]) 2288 { 2289 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2290 2291 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2292 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2293 2294 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2295 dig_hotplug_reg, hpd, 2296 bxt_port_hotplug_long_detect); 2297 2298 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2299 } 2300 2301 static irqreturn_t 2302 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2303 { 2304 irqreturn_t ret = IRQ_NONE; 2305 u32 iir; 2306 enum pipe pipe; 2307 2308 if (master_ctl & GEN8_DE_MISC_IRQ) { 2309 iir = I915_READ(GEN8_DE_MISC_IIR); 2310 if (iir) { 2311 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2312 ret = IRQ_HANDLED; 2313 if (iir & GEN8_DE_MISC_GSE) 2314 intel_opregion_asle_intr(dev_priv); 2315 else 2316 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2317 } 2318 else 2319 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2320 } 2321 2322 if (master_ctl & GEN8_DE_PORT_IRQ) { 2323 iir = I915_READ(GEN8_DE_PORT_IIR); 2324 if (iir) { 2325 u32 tmp_mask; 2326 bool found = false; 2327 2328 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2329 ret = IRQ_HANDLED; 2330 2331 tmp_mask = GEN8_AUX_CHANNEL_A; 2332 if (INTEL_INFO(dev_priv)->gen >= 9) 2333 tmp_mask |= GEN9_AUX_CHANNEL_B | 2334 GEN9_AUX_CHANNEL_C | 2335 GEN9_AUX_CHANNEL_D; 2336 2337 if (iir & tmp_mask) { 2338 dp_aux_irq_handler(dev_priv); 2339 found = true; 2340 } 2341 2342 if (IS_BROXTON(dev_priv)) { 2343 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2344 if (tmp_mask) { 2345 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2346 hpd_bxt); 2347 found = true; 2348 } 2349 } else if (IS_BROADWELL(dev_priv)) { 2350 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2351 if (tmp_mask) { 2352 ilk_hpd_irq_handler(dev_priv, 2353 tmp_mask, hpd_bdw); 2354 found = true; 2355 } 2356 } 2357 2358 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2359 gmbus_irq_handler(dev_priv); 2360 found = true; 2361 } 2362 2363 if (!found) 2364 DRM_ERROR("Unexpected DE Port interrupt\n"); 2365 } 2366 else 2367 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2368 } 2369 2370 for_each_pipe(dev_priv, pipe) { 2371 u32 flip_done, fault_errors; 2372 2373 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2374 continue; 2375 2376 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2377 if (!iir) { 2378 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2379 continue; 2380 } 2381 2382 ret = IRQ_HANDLED; 2383 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2384 2385 if (iir & GEN8_PIPE_VBLANK && 2386 intel_pipe_handle_vblank(dev_priv, pipe)) 2387 intel_check_page_flip(dev_priv, pipe); 2388 2389 flip_done = iir; 2390 if (INTEL_INFO(dev_priv)->gen >= 9) 2391 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2392 else 2393 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2394 2395 if (flip_done) 2396 intel_finish_page_flip_cs(dev_priv, pipe); 2397 2398 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2399 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2400 2401 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2402 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2403 2404 fault_errors = iir; 2405 if (INTEL_INFO(dev_priv)->gen >= 9) 2406 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2407 else 2408 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2409 2410 if (fault_errors) 2411 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2412 pipe_name(pipe), 2413 fault_errors); 2414 } 2415 2416 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2417 master_ctl & GEN8_DE_PCH_IRQ) { 2418 /* 2419 * FIXME(BDW): Assume for now that the new interrupt handling 2420 * scheme also closed the SDE interrupt handling race we've seen 2421 * on older pch-split platforms. But this needs testing. 2422 */ 2423 iir = I915_READ(SDEIIR); 2424 if (iir) { 2425 I915_WRITE(SDEIIR, iir); 2426 ret = IRQ_HANDLED; 2427 2428 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2429 spt_irq_handler(dev_priv, iir); 2430 else 2431 cpt_irq_handler(dev_priv, iir); 2432 } else { 2433 /* 2434 * Like on previous PCH there seems to be something 2435 * fishy going on with forwarding PCH interrupts. 2436 */ 2437 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2438 } 2439 } 2440 2441 return ret; 2442 } 2443 2444 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2445 { 2446 struct drm_device *dev = arg; 2447 struct drm_i915_private *dev_priv = to_i915(dev); 2448 u32 master_ctl; 2449 u32 gt_iir[4] = {}; 2450 irqreturn_t ret; 2451 2452 if (!intel_irqs_enabled(dev_priv)) 2453 return IRQ_NONE; 2454 2455 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2456 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2457 if (!master_ctl) 2458 return IRQ_NONE; 2459 2460 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2461 2462 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2463 disable_rpm_wakeref_asserts(dev_priv); 2464 2465 /* Find, clear, then process each source of interrupt */ 2466 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2467 gen8_gt_irq_handler(dev_priv, gt_iir); 2468 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2469 2470 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2471 POSTING_READ_FW(GEN8_MASTER_IRQ); 2472 2473 enable_rpm_wakeref_asserts(dev_priv); 2474 2475 return ret; 2476 } 2477 2478 static void i915_error_wake_up(struct drm_i915_private *dev_priv) 2479 { 2480 /* 2481 * Notify all waiters for GPU completion events that reset state has 2482 * been changed, and that they need to restart their wait after 2483 * checking for potential errors (and bail out to drop locks if there is 2484 * a gpu reset pending so that i915_error_work_func can acquire them). 2485 */ 2486 2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2488 wake_up_all(&dev_priv->gpu_error.wait_queue); 2489 2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2491 wake_up_all(&dev_priv->pending_flip_queue); 2492 } 2493 2494 /** 2495 * i915_reset_and_wakeup - do process context error handling work 2496 * @dev_priv: i915 device private 2497 * 2498 * Fire an error uevent so userspace can see that a hang or error 2499 * was detected. 2500 */ 2501 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2502 { 2503 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2507 2508 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2509 2510 DRM_DEBUG_DRIVER("resetting chip\n"); 2511 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2512 2513 /* 2514 * In most cases it's guaranteed that we get here with an RPM 2515 * reference held, for example because there is a pending GPU 2516 * request that won't finish until the reset is done. This 2517 * isn't the case at least when we get here by doing a 2518 * simulated reset via debugs, so get an RPM reference. 2519 */ 2520 intel_runtime_pm_get(dev_priv); 2521 intel_prepare_reset(dev_priv); 2522 2523 do { 2524 /* 2525 * All state reset _must_ be completed before we update the 2526 * reset counter, for otherwise waiters might miss the reset 2527 * pending state and not properly drop locks, resulting in 2528 * deadlocks with the reset work. 2529 */ 2530 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2531 i915_reset(dev_priv); 2532 mutex_unlock(&dev_priv->drm.struct_mutex); 2533 } 2534 2535 /* We need to wait for anyone holding the lock to wakeup */ 2536 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2537 I915_RESET_IN_PROGRESS, 2538 TASK_UNINTERRUPTIBLE, 2539 HZ)); 2540 2541 intel_finish_reset(dev_priv); 2542 intel_runtime_pm_put(dev_priv); 2543 2544 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2545 kobject_uevent_env(kobj, 2546 KOBJ_CHANGE, reset_done_event); 2547 2548 /* 2549 * Note: The wake_up also serves as a memory barrier so that 2550 * waiters see the updated value of the dev_priv->gpu_error. 2551 */ 2552 wake_up_all(&dev_priv->gpu_error.reset_queue); 2553 } 2554 2555 static inline void 2556 i915_err_print_instdone(struct drm_i915_private *dev_priv, 2557 struct intel_instdone *instdone) 2558 { 2559 int slice; 2560 int subslice; 2561 2562 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone); 2563 2564 if (INTEL_GEN(dev_priv) <= 3) 2565 return; 2566 2567 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common); 2568 2569 if (INTEL_GEN(dev_priv) <= 6) 2570 return; 2571 2572 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2573 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 2574 slice, subslice, instdone->sampler[slice][subslice]); 2575 2576 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2577 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n", 2578 slice, subslice, instdone->row[slice][subslice]); 2579 } 2580 2581 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2582 { 2583 u32 eir; 2584 2585 if (!IS_GEN2(dev_priv)) 2586 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2587 2588 if (INTEL_GEN(dev_priv) < 4) 2589 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2590 else 2591 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2592 2593 I915_WRITE(EIR, I915_READ(EIR)); 2594 eir = I915_READ(EIR); 2595 if (eir) { 2596 /* 2597 * some errors might have become stuck, 2598 * mask them. 2599 */ 2600 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2601 I915_WRITE(EMR, I915_READ(EMR) | eir); 2602 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2603 } 2604 } 2605 2606 /** 2607 * i915_handle_error - handle a gpu error 2608 * @dev_priv: i915 device private 2609 * @engine_mask: mask representing engines that are hung 2610 * Do some basic checking of register state at error time and 2611 * dump it to the syslog. Also call i915_capture_error_state() to make 2612 * sure we get a record and make it available in debugfs. Fire a uevent 2613 * so userspace knows something bad happened (should trigger collection 2614 * of a ring dump etc.). 2615 * @fmt: Error message format string 2616 */ 2617 void i915_handle_error(struct drm_i915_private *dev_priv, 2618 u32 engine_mask, 2619 const char *fmt, ...) 2620 { 2621 va_list args; 2622 char error_msg[80]; 2623 2624 va_start(args, fmt); 2625 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2626 va_end(args); 2627 2628 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2629 i915_clear_error_registers(dev_priv); 2630 2631 if (!engine_mask) 2632 return; 2633 2634 if (test_and_set_bit(I915_RESET_IN_PROGRESS, 2635 &dev_priv->gpu_error.flags)) 2636 return; 2637 2638 /* 2639 * Wakeup waiting processes so that the reset function 2640 * i915_reset_and_wakeup doesn't deadlock trying to grab 2641 * various locks. By bumping the reset counter first, the woken 2642 * processes will see a reset in progress and back off, 2643 * releasing their locks and then wait for the reset completion. 2644 * We must do this for _all_ gpu waiters that might hold locks 2645 * that the reset work needs to acquire. 2646 * 2647 * Note: The wake_up also provides a memory barrier to ensure that the 2648 * waiters see the updated value of the reset flags. 2649 */ 2650 i915_error_wake_up(dev_priv); 2651 2652 i915_reset_and_wakeup(dev_priv); 2653 } 2654 2655 /* Called from drm generic code, passed 'crtc' which 2656 * we use as a pipe index 2657 */ 2658 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2659 { 2660 struct drm_i915_private *dev_priv = to_i915(dev); 2661 unsigned long irqflags; 2662 2663 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2664 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2665 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2666 2667 return 0; 2668 } 2669 2670 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2671 { 2672 struct drm_i915_private *dev_priv = to_i915(dev); 2673 unsigned long irqflags; 2674 2675 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2676 i915_enable_pipestat(dev_priv, pipe, 2677 PIPE_START_VBLANK_INTERRUPT_STATUS); 2678 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2679 2680 return 0; 2681 } 2682 2683 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2684 { 2685 struct drm_i915_private *dev_priv = to_i915(dev); 2686 unsigned long irqflags; 2687 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2688 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2689 2690 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2691 ilk_enable_display_irq(dev_priv, bit); 2692 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2693 2694 return 0; 2695 } 2696 2697 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2698 { 2699 struct drm_i915_private *dev_priv = to_i915(dev); 2700 unsigned long irqflags; 2701 2702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2703 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2704 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2705 2706 return 0; 2707 } 2708 2709 /* Called from drm generic code, passed 'crtc' which 2710 * we use as a pipe index 2711 */ 2712 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2713 { 2714 struct drm_i915_private *dev_priv = to_i915(dev); 2715 unsigned long irqflags; 2716 2717 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2718 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2719 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2720 } 2721 2722 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2723 { 2724 struct drm_i915_private *dev_priv = to_i915(dev); 2725 unsigned long irqflags; 2726 2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2728 i915_disable_pipestat(dev_priv, pipe, 2729 PIPE_START_VBLANK_INTERRUPT_STATUS); 2730 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2731 } 2732 2733 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2734 { 2735 struct drm_i915_private *dev_priv = to_i915(dev); 2736 unsigned long irqflags; 2737 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2738 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2739 2740 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2741 ilk_disable_display_irq(dev_priv, bit); 2742 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2743 } 2744 2745 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2746 { 2747 struct drm_i915_private *dev_priv = to_i915(dev); 2748 unsigned long irqflags; 2749 2750 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2751 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2752 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2753 } 2754 2755 static bool 2756 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) 2757 { 2758 if (INTEL_GEN(engine->i915) >= 8) { 2759 return (ipehr >> 23) == 0x1c; 2760 } else { 2761 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2762 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2763 MI_SEMAPHORE_REGISTER); 2764 } 2765 } 2766 2767 static struct intel_engine_cs * 2768 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2769 u64 offset) 2770 { 2771 struct drm_i915_private *dev_priv = engine->i915; 2772 struct intel_engine_cs *signaller; 2773 enum intel_engine_id id; 2774 2775 if (INTEL_GEN(dev_priv) >= 8) { 2776 for_each_engine(signaller, dev_priv, id) { 2777 if (engine == signaller) 2778 continue; 2779 2780 if (offset == signaller->semaphore.signal_ggtt[engine->hw_id]) 2781 return signaller; 2782 } 2783 } else { 2784 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2785 2786 for_each_engine(signaller, dev_priv, id) { 2787 if(engine == signaller) 2788 continue; 2789 2790 if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id]) 2791 return signaller; 2792 } 2793 } 2794 2795 DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n", 2796 engine->name, ipehr, offset); 2797 2798 return ERR_PTR(-ENODEV); 2799 } 2800 2801 static struct intel_engine_cs * 2802 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2803 { 2804 struct drm_i915_private *dev_priv = engine->i915; 2805 void __iomem *vaddr; 2806 u32 cmd, ipehr, head; 2807 u64 offset = 0; 2808 int i, backwards; 2809 2810 /* 2811 * This function does not support execlist mode - any attempt to 2812 * proceed further into this function will result in a kernel panic 2813 * when dereferencing ring->buffer, which is not set up in execlist 2814 * mode. 2815 * 2816 * The correct way of doing it would be to derive the currently 2817 * executing ring buffer from the current context, which is derived 2818 * from the currently running request. Unfortunately, to get the 2819 * current request we would have to grab the struct_mutex before doing 2820 * anything else, which would be ill-advised since some other thread 2821 * might have grabbed it already and managed to hang itself, causing 2822 * the hang checker to deadlock. 2823 * 2824 * Therefore, this function does not support execlist mode in its 2825 * current form. Just return NULL and move on. 2826 */ 2827 if (engine->buffer == NULL) 2828 return NULL; 2829 2830 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2831 if (!ipehr_is_semaphore_wait(engine, ipehr)) 2832 return NULL; 2833 2834 /* 2835 * HEAD is likely pointing to the dword after the actual command, 2836 * so scan backwards until we find the MBOX. But limit it to just 3 2837 * or 4 dwords depending on the semaphore wait command size. 2838 * Note that we don't care about ACTHD here since that might 2839 * point at at batch, and semaphores are always emitted into the 2840 * ringbuffer itself. 2841 */ 2842 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2843 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; 2844 vaddr = (void __iomem *)engine->buffer->vaddr; 2845 2846 for (i = backwards; i; --i) { 2847 /* 2848 * Be paranoid and presume the hw has gone off into the wild - 2849 * our ring is smaller than what the hardware (and hence 2850 * HEAD_ADDR) allows. Also handles wrap-around. 2851 */ 2852 head &= engine->buffer->size - 1; 2853 2854 /* This here seems to blow up */ 2855 cmd = ioread32(vaddr + head); 2856 if (cmd == ipehr) 2857 break; 2858 2859 head -= 4; 2860 } 2861 2862 if (!i) 2863 return NULL; 2864 2865 *seqno = ioread32(vaddr + head + 4) + 1; 2866 if (INTEL_GEN(dev_priv) >= 8) { 2867 offset = ioread32(vaddr + head + 12); 2868 offset <<= 32; 2869 offset |= ioread32(vaddr + head + 8); 2870 } 2871 return semaphore_wait_to_signaller_ring(engine, ipehr, offset); 2872 } 2873 2874 static int semaphore_passed(struct intel_engine_cs *engine) 2875 { 2876 struct drm_i915_private *dev_priv = engine->i915; 2877 struct intel_engine_cs *signaller; 2878 u32 seqno; 2879 2880 engine->hangcheck.deadlock++; 2881 2882 signaller = semaphore_waits_for(engine, &seqno); 2883 if (signaller == NULL) 2884 return -1; 2885 2886 if (IS_ERR(signaller)) 2887 return 0; 2888 2889 /* Prevent pathological recursion due to driver bugs */ 2890 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 2891 return -1; 2892 2893 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) 2894 return 1; 2895 2896 /* cursory check for an unkickable deadlock */ 2897 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2898 semaphore_passed(signaller) < 0) 2899 return -1; 2900 2901 return 0; 2902 } 2903 2904 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2905 { 2906 struct intel_engine_cs *engine; 2907 enum intel_engine_id id; 2908 2909 for_each_engine(engine, dev_priv, id) 2910 engine->hangcheck.deadlock = 0; 2911 } 2912 2913 static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) 2914 { 2915 u32 tmp = current_instdone | *old_instdone; 2916 bool unchanged; 2917 2918 unchanged = tmp == *old_instdone; 2919 *old_instdone |= tmp; 2920 2921 return unchanged; 2922 } 2923 2924 static bool subunits_stuck(struct intel_engine_cs *engine) 2925 { 2926 struct drm_i915_private *dev_priv = engine->i915; 2927 struct intel_instdone instdone; 2928 struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; 2929 bool stuck; 2930 int slice; 2931 int subslice; 2932 2933 if (engine->id != RCS) 2934 return true; 2935 2936 intel_engine_get_instdone(engine, &instdone); 2937 2938 /* There might be unstable subunit states even when 2939 * actual head is not moving. Filter out the unstable ones by 2940 * accumulating the undone -> done transitions and only 2941 * consider those as progress. 2942 */ 2943 stuck = instdone_unchanged(instdone.instdone, 2944 &accu_instdone->instdone); 2945 stuck &= instdone_unchanged(instdone.slice_common, 2946 &accu_instdone->slice_common); 2947 2948 for_each_instdone_slice_subslice(dev_priv, slice, subslice) { 2949 stuck &= instdone_unchanged(instdone.sampler[slice][subslice], 2950 &accu_instdone->sampler[slice][subslice]); 2951 stuck &= instdone_unchanged(instdone.row[slice][subslice], 2952 &accu_instdone->row[slice][subslice]); 2953 } 2954 2955 return stuck; 2956 } 2957 2958 static enum intel_engine_hangcheck_action 2959 head_stuck(struct intel_engine_cs *engine, u64 acthd) 2960 { 2961 if (acthd != engine->hangcheck.acthd) { 2962 2963 /* Clear subunit states on head movement */ 2964 memset(&engine->hangcheck.instdone, 0, 2965 sizeof(engine->hangcheck.instdone)); 2966 2967 return HANGCHECK_ACTIVE; 2968 } 2969 2970 if (!subunits_stuck(engine)) 2971 return HANGCHECK_ACTIVE; 2972 2973 return HANGCHECK_HUNG; 2974 } 2975 2976 static enum intel_engine_hangcheck_action 2977 engine_stuck(struct intel_engine_cs *engine, u64 acthd) 2978 { 2979 struct drm_i915_private *dev_priv = engine->i915; 2980 enum intel_engine_hangcheck_action ha; 2981 u32 tmp; 2982 2983 ha = head_stuck(engine, acthd); 2984 if (ha != HANGCHECK_HUNG) 2985 return ha; 2986 2987 if (IS_GEN2(dev_priv)) 2988 return HANGCHECK_HUNG; 2989 2990 /* Is the chip hanging on a WAIT_FOR_EVENT? 2991 * If so we can simply poke the RB_WAIT bit 2992 * and break the hang. This should work on 2993 * all but the second generation chipsets. 2994 */ 2995 tmp = I915_READ_CTL(engine); 2996 if (tmp & RING_WAIT) { 2997 i915_handle_error(dev_priv, 0, 2998 "Kicking stuck wait on %s", 2999 engine->name); 3000 I915_WRITE_CTL(engine, tmp); 3001 return HANGCHECK_KICK; 3002 } 3003 3004 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3005 switch (semaphore_passed(engine)) { 3006 default: 3007 return HANGCHECK_HUNG; 3008 case 1: 3009 i915_handle_error(dev_priv, 0, 3010 "Kicking stuck semaphore on %s", 3011 engine->name); 3012 I915_WRITE_CTL(engine, tmp); 3013 return HANGCHECK_KICK; 3014 case 0: 3015 return HANGCHECK_WAIT; 3016 } 3017 } 3018 3019 return HANGCHECK_HUNG; 3020 } 3021 3022 /* 3023 * This is called when the chip hasn't reported back with completed 3024 * batchbuffers in a long time. We keep track per ring seqno progress and 3025 * if there are no progress, hangcheck score for that ring is increased. 3026 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3027 * we kick the ring. If we see no progress on three subsequent calls 3028 * we assume chip is wedged and try to fix it by resetting the chip. 3029 */ 3030 static void i915_hangcheck_elapsed(struct work_struct *work) 3031 { 3032 struct drm_i915_private *dev_priv = 3033 container_of(work, typeof(*dev_priv), 3034 gpu_error.hangcheck_work.work); 3035 struct intel_engine_cs *engine; 3036 enum intel_engine_id id; 3037 unsigned int hung = 0, stuck = 0; 3038 int busy_count = 0; 3039 #define BUSY 1 3040 #define KICK 5 3041 #define HUNG 20 3042 #define ACTIVE_DECAY 15 3043 3044 if (!i915.enable_hangcheck) 3045 return; 3046 3047 if (!READ_ONCE(dev_priv->gt.awake)) 3048 return; 3049 3050 /* As enabling the GPU requires fairly extensive mmio access, 3051 * periodically arm the mmio checker to see if we are triggering 3052 * any invalid access. 3053 */ 3054 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3055 3056 for_each_engine(engine, dev_priv, id) { 3057 bool busy = intel_engine_has_waiter(engine); 3058 u64 acthd; 3059 u32 seqno; 3060 u32 submit; 3061 3062 semaphore_clear_deadlocks(dev_priv); 3063 3064 /* We don't strictly need an irq-barrier here, as we are not 3065 * serving an interrupt request, be paranoid in case the 3066 * barrier has side-effects (such as preventing a broken 3067 * cacheline snoop) and so be sure that we can see the seqno 3068 * advance. If the seqno should stick, due to a stale 3069 * cacheline, we would erroneously declare the GPU hung. 3070 */ 3071 if (engine->irq_seqno_barrier) 3072 engine->irq_seqno_barrier(engine); 3073 3074 acthd = intel_engine_get_active_head(engine); 3075 seqno = intel_engine_get_seqno(engine); 3076 submit = READ_ONCE(engine->last_submitted_seqno); 3077 3078 if (engine->hangcheck.seqno == seqno) { 3079 if (i915_seqno_passed(seqno, submit)) { 3080 engine->hangcheck.action = HANGCHECK_IDLE; 3081 } else { 3082 /* We always increment the hangcheck score 3083 * if the engine is busy and still processing 3084 * the same request, so that no single request 3085 * can run indefinitely (such as a chain of 3086 * batches). The only time we do not increment 3087 * the hangcheck score on this ring, if this 3088 * engine is in a legitimate wait for another 3089 * engine. In that case the waiting engine is a 3090 * victim and we want to be sure we catch the 3091 * right culprit. Then every time we do kick 3092 * the ring, add a small increment to the 3093 * score so that we can catch a batch that is 3094 * being repeatedly kicked and so responsible 3095 * for stalling the machine. 3096 */ 3097 engine->hangcheck.action = 3098 engine_stuck(engine, acthd); 3099 3100 switch (engine->hangcheck.action) { 3101 case HANGCHECK_IDLE: 3102 case HANGCHECK_WAIT: 3103 break; 3104 case HANGCHECK_ACTIVE: 3105 engine->hangcheck.score += BUSY; 3106 break; 3107 case HANGCHECK_KICK: 3108 engine->hangcheck.score += KICK; 3109 break; 3110 case HANGCHECK_HUNG: 3111 engine->hangcheck.score += HUNG; 3112 break; 3113 } 3114 } 3115 3116 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3117 hung |= intel_engine_flag(engine); 3118 if (engine->hangcheck.action != HANGCHECK_HUNG) 3119 stuck |= intel_engine_flag(engine); 3120 } 3121 } else { 3122 engine->hangcheck.action = HANGCHECK_ACTIVE; 3123 3124 /* Gradually reduce the count so that we catch DoS 3125 * attempts across multiple batches. 3126 */ 3127 if (engine->hangcheck.score > 0) 3128 engine->hangcheck.score -= ACTIVE_DECAY; 3129 if (engine->hangcheck.score < 0) 3130 engine->hangcheck.score = 0; 3131 3132 /* Clear head and subunit states on seqno movement */ 3133 acthd = 0; 3134 3135 memset(&engine->hangcheck.instdone, 0, 3136 sizeof(engine->hangcheck.instdone)); 3137 } 3138 3139 engine->hangcheck.seqno = seqno; 3140 engine->hangcheck.acthd = acthd; 3141 busy_count += busy; 3142 } 3143 3144 if (hung) { 3145 char msg[80]; 3146 unsigned int tmp; 3147 int len; 3148 3149 /* If some rings hung but others were still busy, only 3150 * blame the hanging rings in the synopsis. 3151 */ 3152 if (stuck != hung) 3153 hung &= ~stuck; 3154 len = scnprintf(msg, sizeof(msg), 3155 "%s on ", stuck == hung ? "No progress" : "Hang"); 3156 for_each_engine_masked(engine, dev_priv, hung, tmp) 3157 len += scnprintf(msg + len, sizeof(msg) - len, 3158 "%s, ", engine->name); 3159 msg[len-2] = '\0'; 3160 3161 return i915_handle_error(dev_priv, hung, msg); 3162 } 3163 3164 /* Reset timer in case GPU hangs without another request being added */ 3165 if (busy_count) 3166 i915_queue_hangcheck(dev_priv); 3167 } 3168 3169 static void ibx_irq_reset(struct drm_device *dev) 3170 { 3171 struct drm_i915_private *dev_priv = to_i915(dev); 3172 3173 if (HAS_PCH_NOP(dev_priv)) 3174 return; 3175 3176 GEN5_IRQ_RESET(SDE); 3177 3178 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3179 I915_WRITE(SERR_INT, 0xffffffff); 3180 } 3181 3182 /* 3183 * SDEIER is also touched by the interrupt handler to work around missed PCH 3184 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3185 * instead we unconditionally enable all PCH interrupt sources here, but then 3186 * only unmask them as needed with SDEIMR. 3187 * 3188 * This function needs to be called before interrupts are enabled. 3189 */ 3190 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3191 { 3192 struct drm_i915_private *dev_priv = to_i915(dev); 3193 3194 if (HAS_PCH_NOP(dev_priv)) 3195 return; 3196 3197 WARN_ON(I915_READ(SDEIER) != 0); 3198 I915_WRITE(SDEIER, 0xffffffff); 3199 POSTING_READ(SDEIER); 3200 } 3201 3202 static void gen5_gt_irq_reset(struct drm_device *dev) 3203 { 3204 struct drm_i915_private *dev_priv = to_i915(dev); 3205 3206 GEN5_IRQ_RESET(GT); 3207 if (INTEL_INFO(dev)->gen >= 6) 3208 GEN5_IRQ_RESET(GEN6_PM); 3209 } 3210 3211 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3212 { 3213 enum pipe pipe; 3214 3215 if (IS_CHERRYVIEW(dev_priv)) 3216 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3217 else 3218 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3219 3220 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3221 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3222 3223 for_each_pipe(dev_priv, pipe) { 3224 I915_WRITE(PIPESTAT(pipe), 3225 PIPE_FIFO_UNDERRUN_STATUS | 3226 PIPESTAT_INT_STATUS_MASK); 3227 dev_priv->pipestat_irq_mask[pipe] = 0; 3228 } 3229 3230 GEN5_IRQ_RESET(VLV_); 3231 dev_priv->irq_mask = ~0; 3232 } 3233 3234 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3235 { 3236 u32 pipestat_mask; 3237 u32 enable_mask; 3238 enum pipe pipe; 3239 3240 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3241 PIPE_CRC_DONE_INTERRUPT_STATUS; 3242 3243 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3244 for_each_pipe(dev_priv, pipe) 3245 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3246 3247 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3248 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3249 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3250 if (IS_CHERRYVIEW(dev_priv)) 3251 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3252 3253 WARN_ON(dev_priv->irq_mask != ~0); 3254 3255 dev_priv->irq_mask = ~enable_mask; 3256 3257 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3258 } 3259 3260 /* drm_dma.h hooks 3261 */ 3262 static void ironlake_irq_reset(struct drm_device *dev) 3263 { 3264 struct drm_i915_private *dev_priv = to_i915(dev); 3265 3266 I915_WRITE(HWSTAM, 0xffffffff); 3267 3268 GEN5_IRQ_RESET(DE); 3269 if (IS_GEN7(dev_priv)) 3270 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3271 3272 gen5_gt_irq_reset(dev); 3273 3274 ibx_irq_reset(dev); 3275 } 3276 3277 static void valleyview_irq_preinstall(struct drm_device *dev) 3278 { 3279 struct drm_i915_private *dev_priv = to_i915(dev); 3280 3281 I915_WRITE(VLV_MASTER_IER, 0); 3282 POSTING_READ(VLV_MASTER_IER); 3283 3284 gen5_gt_irq_reset(dev); 3285 3286 spin_lock_irq(&dev_priv->irq_lock); 3287 if (dev_priv->display_irqs_enabled) 3288 vlv_display_irq_reset(dev_priv); 3289 spin_unlock_irq(&dev_priv->irq_lock); 3290 } 3291 3292 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3293 { 3294 GEN8_IRQ_RESET_NDX(GT, 0); 3295 GEN8_IRQ_RESET_NDX(GT, 1); 3296 GEN8_IRQ_RESET_NDX(GT, 2); 3297 GEN8_IRQ_RESET_NDX(GT, 3); 3298 } 3299 3300 static void gen8_irq_reset(struct drm_device *dev) 3301 { 3302 struct drm_i915_private *dev_priv = to_i915(dev); 3303 int pipe; 3304 3305 I915_WRITE(GEN8_MASTER_IRQ, 0); 3306 POSTING_READ(GEN8_MASTER_IRQ); 3307 3308 gen8_gt_irq_reset(dev_priv); 3309 3310 for_each_pipe(dev_priv, pipe) 3311 if (intel_display_power_is_enabled(dev_priv, 3312 POWER_DOMAIN_PIPE(pipe))) 3313 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3314 3315 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3316 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3317 GEN5_IRQ_RESET(GEN8_PCU_); 3318 3319 if (HAS_PCH_SPLIT(dev_priv)) 3320 ibx_irq_reset(dev); 3321 } 3322 3323 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3324 unsigned int pipe_mask) 3325 { 3326 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3327 enum pipe pipe; 3328 3329 spin_lock_irq(&dev_priv->irq_lock); 3330 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3331 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3332 dev_priv->de_irq_mask[pipe], 3333 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3334 spin_unlock_irq(&dev_priv->irq_lock); 3335 } 3336 3337 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3338 unsigned int pipe_mask) 3339 { 3340 enum pipe pipe; 3341 3342 spin_lock_irq(&dev_priv->irq_lock); 3343 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3344 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3345 spin_unlock_irq(&dev_priv->irq_lock); 3346 3347 /* make sure we're done processing display irqs */ 3348 synchronize_irq(dev_priv->drm.irq); 3349 } 3350 3351 static void cherryview_irq_preinstall(struct drm_device *dev) 3352 { 3353 struct drm_i915_private *dev_priv = to_i915(dev); 3354 3355 I915_WRITE(GEN8_MASTER_IRQ, 0); 3356 POSTING_READ(GEN8_MASTER_IRQ); 3357 3358 gen8_gt_irq_reset(dev_priv); 3359 3360 GEN5_IRQ_RESET(GEN8_PCU_); 3361 3362 spin_lock_irq(&dev_priv->irq_lock); 3363 if (dev_priv->display_irqs_enabled) 3364 vlv_display_irq_reset(dev_priv); 3365 spin_unlock_irq(&dev_priv->irq_lock); 3366 } 3367 3368 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3369 const u32 hpd[HPD_NUM_PINS]) 3370 { 3371 struct intel_encoder *encoder; 3372 u32 enabled_irqs = 0; 3373 3374 for_each_intel_encoder(&dev_priv->drm, encoder) 3375 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3376 enabled_irqs |= hpd[encoder->hpd_pin]; 3377 3378 return enabled_irqs; 3379 } 3380 3381 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3382 { 3383 u32 hotplug_irqs, hotplug, enabled_irqs; 3384 3385 if (HAS_PCH_IBX(dev_priv)) { 3386 hotplug_irqs = SDE_HOTPLUG_MASK; 3387 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3388 } else { 3389 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3390 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3391 } 3392 3393 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3394 3395 /* 3396 * Enable digital hotplug on the PCH, and configure the DP short pulse 3397 * duration to 2ms (which is the minimum in the Display Port spec). 3398 * The pulse duration bits are reserved on LPT+. 3399 */ 3400 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3401 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3402 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3403 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3404 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3405 /* 3406 * When CPU and PCH are on the same package, port A 3407 * HPD must be enabled in both north and south. 3408 */ 3409 if (HAS_PCH_LPT_LP(dev_priv)) 3410 hotplug |= PORTA_HOTPLUG_ENABLE; 3411 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3412 } 3413 3414 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3415 { 3416 u32 hotplug_irqs, hotplug, enabled_irqs; 3417 3418 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3419 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3420 3421 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3422 3423 /* Enable digital hotplug on the PCH */ 3424 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3425 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | 3426 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; 3427 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3428 3429 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3430 hotplug |= PORTE_HOTPLUG_ENABLE; 3431 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3432 } 3433 3434 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3435 { 3436 u32 hotplug_irqs, hotplug, enabled_irqs; 3437 3438 if (INTEL_GEN(dev_priv) >= 8) { 3439 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3440 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3441 3442 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3443 } else if (INTEL_GEN(dev_priv) >= 7) { 3444 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3445 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3446 3447 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3448 } else { 3449 hotplug_irqs = DE_DP_A_HOTPLUG; 3450 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3451 3452 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3453 } 3454 3455 /* 3456 * Enable digital hotplug on the CPU, and configure the DP short pulse 3457 * duration to 2ms (which is the minimum in the Display Port spec) 3458 * The pulse duration bits are reserved on HSW+. 3459 */ 3460 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3461 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3462 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3463 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3464 3465 ibx_hpd_irq_setup(dev_priv); 3466 } 3467 3468 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3469 { 3470 u32 hotplug_irqs, hotplug, enabled_irqs; 3471 3472 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3473 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3474 3475 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3476 3477 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3478 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | 3479 PORTA_HOTPLUG_ENABLE; 3480 3481 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3482 hotplug, enabled_irqs); 3483 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3484 3485 /* 3486 * For BXT invert bit has to be set based on AOB design 3487 * for HPD detection logic, update it based on VBT fields. 3488 */ 3489 3490 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3491 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3492 hotplug |= BXT_DDIA_HPD_INVERT; 3493 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3494 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3495 hotplug |= BXT_DDIB_HPD_INVERT; 3496 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3497 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3498 hotplug |= BXT_DDIC_HPD_INVERT; 3499 3500 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3501 } 3502 3503 static void ibx_irq_postinstall(struct drm_device *dev) 3504 { 3505 struct drm_i915_private *dev_priv = to_i915(dev); 3506 u32 mask; 3507 3508 if (HAS_PCH_NOP(dev_priv)) 3509 return; 3510 3511 if (HAS_PCH_IBX(dev_priv)) 3512 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3513 else 3514 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3515 3516 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3517 I915_WRITE(SDEIMR, ~mask); 3518 } 3519 3520 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3521 { 3522 struct drm_i915_private *dev_priv = to_i915(dev); 3523 u32 pm_irqs, gt_irqs; 3524 3525 pm_irqs = gt_irqs = 0; 3526 3527 dev_priv->gt_irq_mask = ~0; 3528 if (HAS_L3_DPF(dev_priv)) { 3529 /* L3 parity interrupt is always unmasked. */ 3530 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3531 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3532 } 3533 3534 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3535 if (IS_GEN5(dev_priv)) { 3536 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3537 } else { 3538 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3539 } 3540 3541 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3542 3543 if (INTEL_INFO(dev)->gen >= 6) { 3544 /* 3545 * RPS interrupts will get enabled/disabled on demand when RPS 3546 * itself is enabled/disabled. 3547 */ 3548 if (HAS_VEBOX(dev)) 3549 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3550 3551 dev_priv->pm_irq_mask = 0xffffffff; 3552 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3553 } 3554 } 3555 3556 static int ironlake_irq_postinstall(struct drm_device *dev) 3557 { 3558 struct drm_i915_private *dev_priv = to_i915(dev); 3559 u32 display_mask, extra_mask; 3560 3561 if (INTEL_INFO(dev)->gen >= 7) { 3562 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3563 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3564 DE_PLANEB_FLIP_DONE_IVB | 3565 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3566 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3567 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3568 DE_DP_A_HOTPLUG_IVB); 3569 } else { 3570 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3571 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3572 DE_AUX_CHANNEL_A | 3573 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3574 DE_POISON); 3575 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3576 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3577 DE_DP_A_HOTPLUG); 3578 } 3579 3580 dev_priv->irq_mask = ~display_mask; 3581 3582 I915_WRITE(HWSTAM, 0xeffe); 3583 3584 ibx_irq_pre_postinstall(dev); 3585 3586 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3587 3588 gen5_gt_irq_postinstall(dev); 3589 3590 ibx_irq_postinstall(dev); 3591 3592 if (IS_IRONLAKE_M(dev_priv)) { 3593 /* Enable PCU event interrupts 3594 * 3595 * spinlocking not required here for correctness since interrupt 3596 * setup is guaranteed to run in single-threaded context. But we 3597 * need it to make the assert_spin_locked happy. */ 3598 spin_lock_irq(&dev_priv->irq_lock); 3599 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3600 spin_unlock_irq(&dev_priv->irq_lock); 3601 } 3602 3603 return 0; 3604 } 3605 3606 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3607 { 3608 assert_spin_locked(&dev_priv->irq_lock); 3609 3610 if (dev_priv->display_irqs_enabled) 3611 return; 3612 3613 dev_priv->display_irqs_enabled = true; 3614 3615 if (intel_irqs_enabled(dev_priv)) { 3616 vlv_display_irq_reset(dev_priv); 3617 vlv_display_irq_postinstall(dev_priv); 3618 } 3619 } 3620 3621 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3622 { 3623 assert_spin_locked(&dev_priv->irq_lock); 3624 3625 if (!dev_priv->display_irqs_enabled) 3626 return; 3627 3628 dev_priv->display_irqs_enabled = false; 3629 3630 if (intel_irqs_enabled(dev_priv)) 3631 vlv_display_irq_reset(dev_priv); 3632 } 3633 3634 3635 static int valleyview_irq_postinstall(struct drm_device *dev) 3636 { 3637 struct drm_i915_private *dev_priv = to_i915(dev); 3638 3639 gen5_gt_irq_postinstall(dev); 3640 3641 spin_lock_irq(&dev_priv->irq_lock); 3642 if (dev_priv->display_irqs_enabled) 3643 vlv_display_irq_postinstall(dev_priv); 3644 spin_unlock_irq(&dev_priv->irq_lock); 3645 3646 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3647 POSTING_READ(VLV_MASTER_IER); 3648 3649 return 0; 3650 } 3651 3652 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3653 { 3654 /* These are interrupts we'll toggle with the ring mask register */ 3655 uint32_t gt_interrupts[] = { 3656 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3657 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3658 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3659 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3660 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3661 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3662 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3663 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3664 0, 3665 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3666 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3667 }; 3668 3669 if (HAS_L3_DPF(dev_priv)) 3670 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3671 3672 dev_priv->pm_irq_mask = 0xffffffff; 3673 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3674 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3675 /* 3676 * RPS interrupts will get enabled/disabled on demand when RPS itself 3677 * is enabled/disabled. 3678 */ 3679 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3680 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3681 } 3682 3683 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3684 { 3685 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3686 uint32_t de_pipe_enables; 3687 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3688 u32 de_port_enables; 3689 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3690 enum pipe pipe; 3691 3692 if (INTEL_INFO(dev_priv)->gen >= 9) { 3693 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3694 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3695 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3696 GEN9_AUX_CHANNEL_D; 3697 if (IS_BROXTON(dev_priv)) 3698 de_port_masked |= BXT_DE_PORT_GMBUS; 3699 } else { 3700 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3701 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3702 } 3703 3704 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3705 GEN8_PIPE_FIFO_UNDERRUN; 3706 3707 de_port_enables = de_port_masked; 3708 if (IS_BROXTON(dev_priv)) 3709 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3710 else if (IS_BROADWELL(dev_priv)) 3711 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3712 3713 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3714 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3715 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3716 3717 for_each_pipe(dev_priv, pipe) 3718 if (intel_display_power_is_enabled(dev_priv, 3719 POWER_DOMAIN_PIPE(pipe))) 3720 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3721 dev_priv->de_irq_mask[pipe], 3722 de_pipe_enables); 3723 3724 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3725 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3726 } 3727 3728 static int gen8_irq_postinstall(struct drm_device *dev) 3729 { 3730 struct drm_i915_private *dev_priv = to_i915(dev); 3731 3732 if (HAS_PCH_SPLIT(dev_priv)) 3733 ibx_irq_pre_postinstall(dev); 3734 3735 gen8_gt_irq_postinstall(dev_priv); 3736 gen8_de_irq_postinstall(dev_priv); 3737 3738 if (HAS_PCH_SPLIT(dev_priv)) 3739 ibx_irq_postinstall(dev); 3740 3741 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3742 POSTING_READ(GEN8_MASTER_IRQ); 3743 3744 return 0; 3745 } 3746 3747 static int cherryview_irq_postinstall(struct drm_device *dev) 3748 { 3749 struct drm_i915_private *dev_priv = to_i915(dev); 3750 3751 gen8_gt_irq_postinstall(dev_priv); 3752 3753 spin_lock_irq(&dev_priv->irq_lock); 3754 if (dev_priv->display_irqs_enabled) 3755 vlv_display_irq_postinstall(dev_priv); 3756 spin_unlock_irq(&dev_priv->irq_lock); 3757 3758 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3759 POSTING_READ(GEN8_MASTER_IRQ); 3760 3761 return 0; 3762 } 3763 3764 static void gen8_irq_uninstall(struct drm_device *dev) 3765 { 3766 struct drm_i915_private *dev_priv = to_i915(dev); 3767 3768 if (!dev_priv) 3769 return; 3770 3771 gen8_irq_reset(dev); 3772 } 3773 3774 static void valleyview_irq_uninstall(struct drm_device *dev) 3775 { 3776 struct drm_i915_private *dev_priv = to_i915(dev); 3777 3778 if (!dev_priv) 3779 return; 3780 3781 I915_WRITE(VLV_MASTER_IER, 0); 3782 POSTING_READ(VLV_MASTER_IER); 3783 3784 gen5_gt_irq_reset(dev); 3785 3786 I915_WRITE(HWSTAM, 0xffffffff); 3787 3788 spin_lock_irq(&dev_priv->irq_lock); 3789 if (dev_priv->display_irqs_enabled) 3790 vlv_display_irq_reset(dev_priv); 3791 spin_unlock_irq(&dev_priv->irq_lock); 3792 } 3793 3794 static void cherryview_irq_uninstall(struct drm_device *dev) 3795 { 3796 struct drm_i915_private *dev_priv = to_i915(dev); 3797 3798 if (!dev_priv) 3799 return; 3800 3801 I915_WRITE(GEN8_MASTER_IRQ, 0); 3802 POSTING_READ(GEN8_MASTER_IRQ); 3803 3804 gen8_gt_irq_reset(dev_priv); 3805 3806 GEN5_IRQ_RESET(GEN8_PCU_); 3807 3808 spin_lock_irq(&dev_priv->irq_lock); 3809 if (dev_priv->display_irqs_enabled) 3810 vlv_display_irq_reset(dev_priv); 3811 spin_unlock_irq(&dev_priv->irq_lock); 3812 } 3813 3814 static void ironlake_irq_uninstall(struct drm_device *dev) 3815 { 3816 struct drm_i915_private *dev_priv = to_i915(dev); 3817 3818 if (!dev_priv) 3819 return; 3820 3821 ironlake_irq_reset(dev); 3822 } 3823 3824 static void i8xx_irq_preinstall(struct drm_device * dev) 3825 { 3826 struct drm_i915_private *dev_priv = to_i915(dev); 3827 int pipe; 3828 3829 for_each_pipe(dev_priv, pipe) 3830 I915_WRITE(PIPESTAT(pipe), 0); 3831 I915_WRITE16(IMR, 0xffff); 3832 I915_WRITE16(IER, 0x0); 3833 POSTING_READ16(IER); 3834 } 3835 3836 static int i8xx_irq_postinstall(struct drm_device *dev) 3837 { 3838 struct drm_i915_private *dev_priv = to_i915(dev); 3839 3840 I915_WRITE16(EMR, 3841 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3842 3843 /* Unmask the interrupts that we always want on. */ 3844 dev_priv->irq_mask = 3845 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3846 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3847 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3848 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3849 I915_WRITE16(IMR, dev_priv->irq_mask); 3850 3851 I915_WRITE16(IER, 3852 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3853 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3854 I915_USER_INTERRUPT); 3855 POSTING_READ16(IER); 3856 3857 /* Interrupt setup is already guaranteed to be single-threaded, this is 3858 * just to make the assert_spin_locked check happy. */ 3859 spin_lock_irq(&dev_priv->irq_lock); 3860 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3861 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3862 spin_unlock_irq(&dev_priv->irq_lock); 3863 3864 return 0; 3865 } 3866 3867 /* 3868 * Returns true when a page flip has completed. 3869 */ 3870 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3871 int plane, int pipe, u32 iir) 3872 { 3873 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3874 3875 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3876 return false; 3877 3878 if ((iir & flip_pending) == 0) 3879 goto check_page_flip; 3880 3881 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3882 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3883 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3884 * the flip is completed (no longer pending). Since this doesn't raise 3885 * an interrupt per se, we watch for the change at vblank. 3886 */ 3887 if (I915_READ16(ISR) & flip_pending) 3888 goto check_page_flip; 3889 3890 intel_finish_page_flip_cs(dev_priv, pipe); 3891 return true; 3892 3893 check_page_flip: 3894 intel_check_page_flip(dev_priv, pipe); 3895 return false; 3896 } 3897 3898 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3899 { 3900 struct drm_device *dev = arg; 3901 struct drm_i915_private *dev_priv = to_i915(dev); 3902 u16 iir, new_iir; 3903 u32 pipe_stats[2]; 3904 int pipe; 3905 u16 flip_mask = 3906 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3907 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3908 irqreturn_t ret; 3909 3910 if (!intel_irqs_enabled(dev_priv)) 3911 return IRQ_NONE; 3912 3913 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3914 disable_rpm_wakeref_asserts(dev_priv); 3915 3916 ret = IRQ_NONE; 3917 iir = I915_READ16(IIR); 3918 if (iir == 0) 3919 goto out; 3920 3921 while (iir & ~flip_mask) { 3922 /* Can't rely on pipestat interrupt bit in iir as it might 3923 * have been cleared after the pipestat interrupt was received. 3924 * It doesn't set the bit in iir again, but it still produces 3925 * interrupts (for non-MSI). 3926 */ 3927 spin_lock(&dev_priv->irq_lock); 3928 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3929 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3930 3931 for_each_pipe(dev_priv, pipe) { 3932 i915_reg_t reg = PIPESTAT(pipe); 3933 pipe_stats[pipe] = I915_READ(reg); 3934 3935 /* 3936 * Clear the PIPE*STAT regs before the IIR 3937 */ 3938 if (pipe_stats[pipe] & 0x8000ffff) 3939 I915_WRITE(reg, pipe_stats[pipe]); 3940 } 3941 spin_unlock(&dev_priv->irq_lock); 3942 3943 I915_WRITE16(IIR, iir & ~flip_mask); 3944 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3945 3946 if (iir & I915_USER_INTERRUPT) 3947 notify_ring(dev_priv->engine[RCS]); 3948 3949 for_each_pipe(dev_priv, pipe) { 3950 int plane = pipe; 3951 if (HAS_FBC(dev_priv)) 3952 plane = !plane; 3953 3954 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3955 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 3956 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3957 3958 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3959 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3960 3961 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3962 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3963 pipe); 3964 } 3965 3966 iir = new_iir; 3967 } 3968 ret = IRQ_HANDLED; 3969 3970 out: 3971 enable_rpm_wakeref_asserts(dev_priv); 3972 3973 return ret; 3974 } 3975 3976 static void i8xx_irq_uninstall(struct drm_device * dev) 3977 { 3978 struct drm_i915_private *dev_priv = to_i915(dev); 3979 int pipe; 3980 3981 for_each_pipe(dev_priv, pipe) { 3982 /* Clear enable bits; then clear status bits */ 3983 I915_WRITE(PIPESTAT(pipe), 0); 3984 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3985 } 3986 I915_WRITE16(IMR, 0xffff); 3987 I915_WRITE16(IER, 0x0); 3988 I915_WRITE16(IIR, I915_READ16(IIR)); 3989 } 3990 3991 static void i915_irq_preinstall(struct drm_device * dev) 3992 { 3993 struct drm_i915_private *dev_priv = to_i915(dev); 3994 int pipe; 3995 3996 if (I915_HAS_HOTPLUG(dev)) { 3997 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3998 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3999 } 4000 4001 I915_WRITE16(HWSTAM, 0xeffe); 4002 for_each_pipe(dev_priv, pipe) 4003 I915_WRITE(PIPESTAT(pipe), 0); 4004 I915_WRITE(IMR, 0xffffffff); 4005 I915_WRITE(IER, 0x0); 4006 POSTING_READ(IER); 4007 } 4008 4009 static int i915_irq_postinstall(struct drm_device *dev) 4010 { 4011 struct drm_i915_private *dev_priv = to_i915(dev); 4012 u32 enable_mask; 4013 4014 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4015 4016 /* Unmask the interrupts that we always want on. */ 4017 dev_priv->irq_mask = 4018 ~(I915_ASLE_INTERRUPT | 4019 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4020 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4021 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4022 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4023 4024 enable_mask = 4025 I915_ASLE_INTERRUPT | 4026 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4027 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4028 I915_USER_INTERRUPT; 4029 4030 if (I915_HAS_HOTPLUG(dev)) { 4031 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4032 POSTING_READ(PORT_HOTPLUG_EN); 4033 4034 /* Enable in IER... */ 4035 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4036 /* and unmask in IMR */ 4037 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4038 } 4039 4040 I915_WRITE(IMR, dev_priv->irq_mask); 4041 I915_WRITE(IER, enable_mask); 4042 POSTING_READ(IER); 4043 4044 i915_enable_asle_pipestat(dev_priv); 4045 4046 /* Interrupt setup is already guaranteed to be single-threaded, this is 4047 * just to make the assert_spin_locked check happy. */ 4048 spin_lock_irq(&dev_priv->irq_lock); 4049 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4050 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4051 spin_unlock_irq(&dev_priv->irq_lock); 4052 4053 return 0; 4054 } 4055 4056 /* 4057 * Returns true when a page flip has completed. 4058 */ 4059 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 4060 int plane, int pipe, u32 iir) 4061 { 4062 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4063 4064 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 4065 return false; 4066 4067 if ((iir & flip_pending) == 0) 4068 goto check_page_flip; 4069 4070 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4071 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4072 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4073 * the flip is completed (no longer pending). Since this doesn't raise 4074 * an interrupt per se, we watch for the change at vblank. 4075 */ 4076 if (I915_READ(ISR) & flip_pending) 4077 goto check_page_flip; 4078 4079 intel_finish_page_flip_cs(dev_priv, pipe); 4080 return true; 4081 4082 check_page_flip: 4083 intel_check_page_flip(dev_priv, pipe); 4084 return false; 4085 } 4086 4087 static irqreturn_t i915_irq_handler(int irq, void *arg) 4088 { 4089 struct drm_device *dev = arg; 4090 struct drm_i915_private *dev_priv = to_i915(dev); 4091 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4092 u32 flip_mask = 4093 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4094 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4095 int pipe, ret = IRQ_NONE; 4096 4097 if (!intel_irqs_enabled(dev_priv)) 4098 return IRQ_NONE; 4099 4100 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4101 disable_rpm_wakeref_asserts(dev_priv); 4102 4103 iir = I915_READ(IIR); 4104 do { 4105 bool irq_received = (iir & ~flip_mask) != 0; 4106 bool blc_event = false; 4107 4108 /* Can't rely on pipestat interrupt bit in iir as it might 4109 * have been cleared after the pipestat interrupt was received. 4110 * It doesn't set the bit in iir again, but it still produces 4111 * interrupts (for non-MSI). 4112 */ 4113 spin_lock(&dev_priv->irq_lock); 4114 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4115 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4116 4117 for_each_pipe(dev_priv, pipe) { 4118 i915_reg_t reg = PIPESTAT(pipe); 4119 pipe_stats[pipe] = I915_READ(reg); 4120 4121 /* Clear the PIPE*STAT regs before the IIR */ 4122 if (pipe_stats[pipe] & 0x8000ffff) { 4123 I915_WRITE(reg, pipe_stats[pipe]); 4124 irq_received = true; 4125 } 4126 } 4127 spin_unlock(&dev_priv->irq_lock); 4128 4129 if (!irq_received) 4130 break; 4131 4132 /* Consume port. Then clear IIR or we'll miss events */ 4133 if (I915_HAS_HOTPLUG(dev_priv) && 4134 iir & I915_DISPLAY_PORT_INTERRUPT) { 4135 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4136 if (hotplug_status) 4137 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4138 } 4139 4140 I915_WRITE(IIR, iir & ~flip_mask); 4141 new_iir = I915_READ(IIR); /* Flush posted writes */ 4142 4143 if (iir & I915_USER_INTERRUPT) 4144 notify_ring(dev_priv->engine[RCS]); 4145 4146 for_each_pipe(dev_priv, pipe) { 4147 int plane = pipe; 4148 if (HAS_FBC(dev_priv)) 4149 plane = !plane; 4150 4151 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4152 i915_handle_vblank(dev_priv, plane, pipe, iir)) 4153 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4154 4155 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4156 blc_event = true; 4157 4158 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4159 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4160 4161 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4162 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4163 pipe); 4164 } 4165 4166 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4167 intel_opregion_asle_intr(dev_priv); 4168 4169 /* With MSI, interrupts are only generated when iir 4170 * transitions from zero to nonzero. If another bit got 4171 * set while we were handling the existing iir bits, then 4172 * we would never get another interrupt. 4173 * 4174 * This is fine on non-MSI as well, as if we hit this path 4175 * we avoid exiting the interrupt handler only to generate 4176 * another one. 4177 * 4178 * Note that for MSI this could cause a stray interrupt report 4179 * if an interrupt landed in the time between writing IIR and 4180 * the posting read. This should be rare enough to never 4181 * trigger the 99% of 100,000 interrupts test for disabling 4182 * stray interrupts. 4183 */ 4184 ret = IRQ_HANDLED; 4185 iir = new_iir; 4186 } while (iir & ~flip_mask); 4187 4188 enable_rpm_wakeref_asserts(dev_priv); 4189 4190 return ret; 4191 } 4192 4193 static void i915_irq_uninstall(struct drm_device * dev) 4194 { 4195 struct drm_i915_private *dev_priv = to_i915(dev); 4196 int pipe; 4197 4198 if (I915_HAS_HOTPLUG(dev)) { 4199 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4200 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4201 } 4202 4203 I915_WRITE16(HWSTAM, 0xffff); 4204 for_each_pipe(dev_priv, pipe) { 4205 /* Clear enable bits; then clear status bits */ 4206 I915_WRITE(PIPESTAT(pipe), 0); 4207 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4208 } 4209 I915_WRITE(IMR, 0xffffffff); 4210 I915_WRITE(IER, 0x0); 4211 4212 I915_WRITE(IIR, I915_READ(IIR)); 4213 } 4214 4215 static void i965_irq_preinstall(struct drm_device * dev) 4216 { 4217 struct drm_i915_private *dev_priv = to_i915(dev); 4218 int pipe; 4219 4220 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4221 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4222 4223 I915_WRITE(HWSTAM, 0xeffe); 4224 for_each_pipe(dev_priv, pipe) 4225 I915_WRITE(PIPESTAT(pipe), 0); 4226 I915_WRITE(IMR, 0xffffffff); 4227 I915_WRITE(IER, 0x0); 4228 POSTING_READ(IER); 4229 } 4230 4231 static int i965_irq_postinstall(struct drm_device *dev) 4232 { 4233 struct drm_i915_private *dev_priv = to_i915(dev); 4234 u32 enable_mask; 4235 u32 error_mask; 4236 4237 /* Unmask the interrupts that we always want on. */ 4238 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4239 I915_DISPLAY_PORT_INTERRUPT | 4240 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4241 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4242 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4243 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4244 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4245 4246 enable_mask = ~dev_priv->irq_mask; 4247 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4248 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4249 enable_mask |= I915_USER_INTERRUPT; 4250 4251 if (IS_G4X(dev_priv)) 4252 enable_mask |= I915_BSD_USER_INTERRUPT; 4253 4254 /* Interrupt setup is already guaranteed to be single-threaded, this is 4255 * just to make the assert_spin_locked check happy. */ 4256 spin_lock_irq(&dev_priv->irq_lock); 4257 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4258 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4259 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4260 spin_unlock_irq(&dev_priv->irq_lock); 4261 4262 /* 4263 * Enable some error detection, note the instruction error mask 4264 * bit is reserved, so we leave it masked. 4265 */ 4266 if (IS_G4X(dev_priv)) { 4267 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4268 GM45_ERROR_MEM_PRIV | 4269 GM45_ERROR_CP_PRIV | 4270 I915_ERROR_MEMORY_REFRESH); 4271 } else { 4272 error_mask = ~(I915_ERROR_PAGE_TABLE | 4273 I915_ERROR_MEMORY_REFRESH); 4274 } 4275 I915_WRITE(EMR, error_mask); 4276 4277 I915_WRITE(IMR, dev_priv->irq_mask); 4278 I915_WRITE(IER, enable_mask); 4279 POSTING_READ(IER); 4280 4281 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4282 POSTING_READ(PORT_HOTPLUG_EN); 4283 4284 i915_enable_asle_pipestat(dev_priv); 4285 4286 return 0; 4287 } 4288 4289 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4290 { 4291 u32 hotplug_en; 4292 4293 assert_spin_locked(&dev_priv->irq_lock); 4294 4295 /* Note HDMI and DP share hotplug bits */ 4296 /* enable bits are the same for all generations */ 4297 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4298 /* Programming the CRT detection parameters tends 4299 to generate a spurious hotplug event about three 4300 seconds later. So just do it once. 4301 */ 4302 if (IS_G4X(dev_priv)) 4303 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4304 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4305 4306 /* Ignore TV since it's buggy */ 4307 i915_hotplug_interrupt_update_locked(dev_priv, 4308 HOTPLUG_INT_EN_MASK | 4309 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4310 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4311 hotplug_en); 4312 } 4313 4314 static irqreturn_t i965_irq_handler(int irq, void *arg) 4315 { 4316 struct drm_device *dev = arg; 4317 struct drm_i915_private *dev_priv = to_i915(dev); 4318 u32 iir, new_iir; 4319 u32 pipe_stats[I915_MAX_PIPES]; 4320 int ret = IRQ_NONE, pipe; 4321 u32 flip_mask = 4322 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4323 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4324 4325 if (!intel_irqs_enabled(dev_priv)) 4326 return IRQ_NONE; 4327 4328 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4329 disable_rpm_wakeref_asserts(dev_priv); 4330 4331 iir = I915_READ(IIR); 4332 4333 for (;;) { 4334 bool irq_received = (iir & ~flip_mask) != 0; 4335 bool blc_event = false; 4336 4337 /* Can't rely on pipestat interrupt bit in iir as it might 4338 * have been cleared after the pipestat interrupt was received. 4339 * It doesn't set the bit in iir again, but it still produces 4340 * interrupts (for non-MSI). 4341 */ 4342 spin_lock(&dev_priv->irq_lock); 4343 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4344 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4345 4346 for_each_pipe(dev_priv, pipe) { 4347 i915_reg_t reg = PIPESTAT(pipe); 4348 pipe_stats[pipe] = I915_READ(reg); 4349 4350 /* 4351 * Clear the PIPE*STAT regs before the IIR 4352 */ 4353 if (pipe_stats[pipe] & 0x8000ffff) { 4354 I915_WRITE(reg, pipe_stats[pipe]); 4355 irq_received = true; 4356 } 4357 } 4358 spin_unlock(&dev_priv->irq_lock); 4359 4360 if (!irq_received) 4361 break; 4362 4363 ret = IRQ_HANDLED; 4364 4365 /* Consume port. Then clear IIR or we'll miss events */ 4366 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4367 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4368 if (hotplug_status) 4369 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4370 } 4371 4372 I915_WRITE(IIR, iir & ~flip_mask); 4373 new_iir = I915_READ(IIR); /* Flush posted writes */ 4374 4375 if (iir & I915_USER_INTERRUPT) 4376 notify_ring(dev_priv->engine[RCS]); 4377 if (iir & I915_BSD_USER_INTERRUPT) 4378 notify_ring(dev_priv->engine[VCS]); 4379 4380 for_each_pipe(dev_priv, pipe) { 4381 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4382 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4383 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4384 4385 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4386 blc_event = true; 4387 4388 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4389 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4390 4391 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4392 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4393 } 4394 4395 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4396 intel_opregion_asle_intr(dev_priv); 4397 4398 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4399 gmbus_irq_handler(dev_priv); 4400 4401 /* With MSI, interrupts are only generated when iir 4402 * transitions from zero to nonzero. If another bit got 4403 * set while we were handling the existing iir bits, then 4404 * we would never get another interrupt. 4405 * 4406 * This is fine on non-MSI as well, as if we hit this path 4407 * we avoid exiting the interrupt handler only to generate 4408 * another one. 4409 * 4410 * Note that for MSI this could cause a stray interrupt report 4411 * if an interrupt landed in the time between writing IIR and 4412 * the posting read. This should be rare enough to never 4413 * trigger the 99% of 100,000 interrupts test for disabling 4414 * stray interrupts. 4415 */ 4416 iir = new_iir; 4417 } 4418 4419 enable_rpm_wakeref_asserts(dev_priv); 4420 4421 return ret; 4422 } 4423 4424 static void i965_irq_uninstall(struct drm_device * dev) 4425 { 4426 struct drm_i915_private *dev_priv = to_i915(dev); 4427 int pipe; 4428 4429 if (!dev_priv) 4430 return; 4431 4432 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4433 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4434 4435 I915_WRITE(HWSTAM, 0xffffffff); 4436 for_each_pipe(dev_priv, pipe) 4437 I915_WRITE(PIPESTAT(pipe), 0); 4438 I915_WRITE(IMR, 0xffffffff); 4439 I915_WRITE(IER, 0x0); 4440 4441 for_each_pipe(dev_priv, pipe) 4442 I915_WRITE(PIPESTAT(pipe), 4443 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4444 I915_WRITE(IIR, I915_READ(IIR)); 4445 } 4446 4447 /** 4448 * intel_irq_init - initializes irq support 4449 * @dev_priv: i915 device instance 4450 * 4451 * This function initializes all the irq support including work items, timers 4452 * and all the vtables. It does not setup the interrupt itself though. 4453 */ 4454 void intel_irq_init(struct drm_i915_private *dev_priv) 4455 { 4456 struct drm_device *dev = &dev_priv->drm; 4457 4458 intel_hpd_init_work(dev_priv); 4459 4460 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4461 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4462 4463 /* Let's track the enabled rps events */ 4464 if (IS_VALLEYVIEW(dev_priv)) 4465 /* WaGsvRC0ResidencyMethod:vlv */ 4466 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4467 else 4468 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4469 4470 dev_priv->rps.pm_intr_keep = 0; 4471 4472 /* 4473 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 4474 * if GEN6_PM_UP_EI_EXPIRED is masked. 4475 * 4476 * TODO: verify if this can be reproduced on VLV,CHV. 4477 */ 4478 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 4479 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; 4480 4481 if (INTEL_INFO(dev_priv)->gen >= 8) 4482 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC; 4483 4484 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4485 i915_hangcheck_elapsed); 4486 4487 if (IS_GEN2(dev_priv)) { 4488 /* Gen2 doesn't have a hardware frame counter */ 4489 dev->max_vblank_count = 0; 4490 dev->driver->get_vblank_counter = drm_vblank_no_hw_counter; 4491 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4492 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4493 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4494 } else { 4495 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4496 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4497 } 4498 4499 /* 4500 * Opt out of the vblank disable timer on everything except gen2. 4501 * Gen2 doesn't have a hardware frame counter and so depends on 4502 * vblank interrupts to produce sane vblank seuquence numbers. 4503 */ 4504 if (!IS_GEN2(dev_priv)) 4505 dev->vblank_disable_immediate = true; 4506 4507 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4508 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4509 4510 if (IS_CHERRYVIEW(dev_priv)) { 4511 dev->driver->irq_handler = cherryview_irq_handler; 4512 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4513 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4514 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4515 dev->driver->enable_vblank = i965_enable_vblank; 4516 dev->driver->disable_vblank = i965_disable_vblank; 4517 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4518 } else if (IS_VALLEYVIEW(dev_priv)) { 4519 dev->driver->irq_handler = valleyview_irq_handler; 4520 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4521 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4522 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4523 dev->driver->enable_vblank = i965_enable_vblank; 4524 dev->driver->disable_vblank = i965_disable_vblank; 4525 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4526 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4527 dev->driver->irq_handler = gen8_irq_handler; 4528 dev->driver->irq_preinstall = gen8_irq_reset; 4529 dev->driver->irq_postinstall = gen8_irq_postinstall; 4530 dev->driver->irq_uninstall = gen8_irq_uninstall; 4531 dev->driver->enable_vblank = gen8_enable_vblank; 4532 dev->driver->disable_vblank = gen8_disable_vblank; 4533 if (IS_BROXTON(dev_priv)) 4534 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4535 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 4536 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4537 else 4538 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4539 } else if (HAS_PCH_SPLIT(dev_priv)) { 4540 dev->driver->irq_handler = ironlake_irq_handler; 4541 dev->driver->irq_preinstall = ironlake_irq_reset; 4542 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4543 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4544 dev->driver->enable_vblank = ironlake_enable_vblank; 4545 dev->driver->disable_vblank = ironlake_disable_vblank; 4546 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4547 } else { 4548 if (IS_GEN2(dev_priv)) { 4549 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4550 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4551 dev->driver->irq_handler = i8xx_irq_handler; 4552 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4553 dev->driver->enable_vblank = i8xx_enable_vblank; 4554 dev->driver->disable_vblank = i8xx_disable_vblank; 4555 } else if (IS_GEN3(dev_priv)) { 4556 dev->driver->irq_preinstall = i915_irq_preinstall; 4557 dev->driver->irq_postinstall = i915_irq_postinstall; 4558 dev->driver->irq_uninstall = i915_irq_uninstall; 4559 dev->driver->irq_handler = i915_irq_handler; 4560 dev->driver->enable_vblank = i8xx_enable_vblank; 4561 dev->driver->disable_vblank = i8xx_disable_vblank; 4562 } else { 4563 dev->driver->irq_preinstall = i965_irq_preinstall; 4564 dev->driver->irq_postinstall = i965_irq_postinstall; 4565 dev->driver->irq_uninstall = i965_irq_uninstall; 4566 dev->driver->irq_handler = i965_irq_handler; 4567 dev->driver->enable_vblank = i965_enable_vblank; 4568 dev->driver->disable_vblank = i965_disable_vblank; 4569 } 4570 if (I915_HAS_HOTPLUG(dev_priv)) 4571 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4572 } 4573 } 4574 4575 /** 4576 * intel_irq_install - enables the hardware interrupt 4577 * @dev_priv: i915 device instance 4578 * 4579 * This function enables the hardware interrupt handling, but leaves the hotplug 4580 * handling still disabled. It is called after intel_irq_init(). 4581 * 4582 * In the driver load and resume code we need working interrupts in a few places 4583 * but don't want to deal with the hassle of concurrent probe and hotplug 4584 * workers. Hence the split into this two-stage approach. 4585 */ 4586 int intel_irq_install(struct drm_i915_private *dev_priv) 4587 { 4588 /* 4589 * We enable some interrupt sources in our postinstall hooks, so mark 4590 * interrupts as enabled _before_ actually enabling them to avoid 4591 * special cases in our ordering checks. 4592 */ 4593 dev_priv->pm.irqs_enabled = true; 4594 4595 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4596 } 4597 4598 /** 4599 * intel_irq_uninstall - finilizes all irq handling 4600 * @dev_priv: i915 device instance 4601 * 4602 * This stops interrupt and hotplug handling and unregisters and frees all 4603 * resources acquired in the init functions. 4604 */ 4605 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4606 { 4607 drm_irq_uninstall(&dev_priv->drm); 4608 intel_hpd_cancel_work(dev_priv); 4609 dev_priv->pm.irqs_enabled = false; 4610 } 4611 4612 /** 4613 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4614 * @dev_priv: i915 device instance 4615 * 4616 * This function is used to disable interrupts at runtime, both in the runtime 4617 * pm and the system suspend/resume code. 4618 */ 4619 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4620 { 4621 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4622 dev_priv->pm.irqs_enabled = false; 4623 synchronize_irq(dev_priv->drm.irq); 4624 } 4625 4626 /** 4627 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4628 * @dev_priv: i915 device instance 4629 * 4630 * This function is used to enable interrupts at runtime, both in the runtime 4631 * pm and the system suspend/resume code. 4632 */ 4633 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4634 { 4635 dev_priv->pm.irqs_enabled = true; 4636 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4637 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4638 } 4639