1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ibx[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 54 }; 55 56 static const u32 hpd_cpt[] = { 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 62 }; 63 64 static const u32 hpd_mask_i915[] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 71 }; 72 73 static const u32 hpd_status_g4x[] = { 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 89 }; 90 91 /* IIR can theoretically queue up two events. Be paranoid. */ 92 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 94 POSTING_READ(GEN8_##type##_IMR(which)); \ 95 I915_WRITE(GEN8_##type##_IER(which), 0); \ 96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 97 POSTING_READ(GEN8_##type##_IIR(which)); \ 98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 99 POSTING_READ(GEN8_##type##_IIR(which)); \ 100 } while (0) 101 102 #define GEN5_IRQ_RESET(type) do { \ 103 I915_WRITE(type##IMR, 0xffffffff); \ 104 POSTING_READ(type##IMR); \ 105 I915_WRITE(type##IER, 0); \ 106 I915_WRITE(type##IIR, 0xffffffff); \ 107 POSTING_READ(type##IIR); \ 108 I915_WRITE(type##IIR, 0xffffffff); \ 109 POSTING_READ(type##IIR); \ 110 } while (0) 111 112 /* 113 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 114 */ 115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 116 u32 val = I915_READ(reg); \ 117 if (val) { \ 118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 119 (reg), val); \ 120 I915_WRITE((reg), 0xffffffff); \ 121 POSTING_READ(reg); \ 122 I915_WRITE((reg), 0xffffffff); \ 123 POSTING_READ(reg); \ 124 } \ 125 } while (0) 126 127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 131 POSTING_READ(GEN8_##type##_IMR(which)); \ 132 } while (0) 133 134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 136 I915_WRITE(type##IER, (ier_val)); \ 137 I915_WRITE(type##IMR, (imr_val)); \ 138 POSTING_READ(type##IMR); \ 139 } while (0) 140 141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 142 143 /* For display hotplug interrupt */ 144 void 145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 146 { 147 assert_spin_locked(&dev_priv->irq_lock); 148 149 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 150 return; 151 152 if ((dev_priv->irq_mask & mask) != 0) { 153 dev_priv->irq_mask &= ~mask; 154 I915_WRITE(DEIMR, dev_priv->irq_mask); 155 POSTING_READ(DEIMR); 156 } 157 } 158 159 void 160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 161 { 162 assert_spin_locked(&dev_priv->irq_lock); 163 164 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 165 return; 166 167 if ((dev_priv->irq_mask & mask) != mask) { 168 dev_priv->irq_mask |= mask; 169 I915_WRITE(DEIMR, dev_priv->irq_mask); 170 POSTING_READ(DEIMR); 171 } 172 } 173 174 /** 175 * ilk_update_gt_irq - update GTIMR 176 * @dev_priv: driver private 177 * @interrupt_mask: mask of interrupt bits to update 178 * @enabled_irq_mask: mask of interrupt bits to enable 179 */ 180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 181 uint32_t interrupt_mask, 182 uint32_t enabled_irq_mask) 183 { 184 assert_spin_locked(&dev_priv->irq_lock); 185 186 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 187 return; 188 189 dev_priv->gt_irq_mask &= ~interrupt_mask; 190 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 191 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 192 POSTING_READ(GTIMR); 193 } 194 195 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 196 { 197 ilk_update_gt_irq(dev_priv, mask, mask); 198 } 199 200 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 201 { 202 ilk_update_gt_irq(dev_priv, mask, 0); 203 } 204 205 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 206 { 207 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 208 } 209 210 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 211 { 212 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 213 } 214 215 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 216 { 217 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 218 } 219 220 /** 221 * snb_update_pm_irq - update GEN6_PMIMR 222 * @dev_priv: driver private 223 * @interrupt_mask: mask of interrupt bits to update 224 * @enabled_irq_mask: mask of interrupt bits to enable 225 */ 226 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 227 uint32_t interrupt_mask, 228 uint32_t enabled_irq_mask) 229 { 230 uint32_t new_val; 231 232 assert_spin_locked(&dev_priv->irq_lock); 233 234 new_val = dev_priv->pm_irq_mask; 235 new_val &= ~interrupt_mask; 236 new_val |= (~enabled_irq_mask & interrupt_mask); 237 238 if (new_val != dev_priv->pm_irq_mask) { 239 dev_priv->pm_irq_mask = new_val; 240 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 241 POSTING_READ(gen6_pm_imr(dev_priv)); 242 } 243 } 244 245 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 246 { 247 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 248 return; 249 250 snb_update_pm_irq(dev_priv, mask, mask); 251 } 252 253 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 254 uint32_t mask) 255 { 256 snb_update_pm_irq(dev_priv, mask, 0); 257 } 258 259 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 260 { 261 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 262 return; 263 264 __gen6_disable_pm_irq(dev_priv, mask); 265 } 266 267 void gen6_reset_rps_interrupts(struct drm_device *dev) 268 { 269 struct drm_i915_private *dev_priv = dev->dev_private; 270 uint32_t reg = gen6_pm_iir(dev_priv); 271 272 spin_lock_irq(&dev_priv->irq_lock); 273 I915_WRITE(reg, dev_priv->pm_rps_events); 274 I915_WRITE(reg, dev_priv->pm_rps_events); 275 POSTING_READ(reg); 276 spin_unlock_irq(&dev_priv->irq_lock); 277 } 278 279 void gen6_enable_rps_interrupts(struct drm_device *dev) 280 { 281 struct drm_i915_private *dev_priv = dev->dev_private; 282 283 spin_lock_irq(&dev_priv->irq_lock); 284 285 WARN_ON(dev_priv->rps.pm_iir); 286 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 287 dev_priv->rps.interrupts_enabled = true; 288 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 289 dev_priv->pm_rps_events); 290 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 291 292 spin_unlock_irq(&dev_priv->irq_lock); 293 } 294 295 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 296 { 297 /* 298 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 299 * if GEN6_PM_UP_EI_EXPIRED is masked. 300 * 301 * TODO: verify if this can be reproduced on VLV,CHV. 302 */ 303 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 304 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 305 306 if (INTEL_INFO(dev_priv)->gen >= 8) 307 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 308 309 return mask; 310 } 311 312 void gen6_disable_rps_interrupts(struct drm_device *dev) 313 { 314 struct drm_i915_private *dev_priv = dev->dev_private; 315 316 spin_lock_irq(&dev_priv->irq_lock); 317 dev_priv->rps.interrupts_enabled = false; 318 spin_unlock_irq(&dev_priv->irq_lock); 319 320 cancel_work_sync(&dev_priv->rps.work); 321 322 spin_lock_irq(&dev_priv->irq_lock); 323 324 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 325 326 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 327 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 328 ~dev_priv->pm_rps_events); 329 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 330 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 331 332 dev_priv->rps.pm_iir = 0; 333 334 spin_unlock_irq(&dev_priv->irq_lock); 335 } 336 337 /** 338 * ibx_display_interrupt_update - update SDEIMR 339 * @dev_priv: driver private 340 * @interrupt_mask: mask of interrupt bits to update 341 * @enabled_irq_mask: mask of interrupt bits to enable 342 */ 343 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 344 uint32_t interrupt_mask, 345 uint32_t enabled_irq_mask) 346 { 347 uint32_t sdeimr = I915_READ(SDEIMR); 348 sdeimr &= ~interrupt_mask; 349 sdeimr |= (~enabled_irq_mask & interrupt_mask); 350 351 assert_spin_locked(&dev_priv->irq_lock); 352 353 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 354 return; 355 356 I915_WRITE(SDEIMR, sdeimr); 357 POSTING_READ(SDEIMR); 358 } 359 360 static void 361 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 362 u32 enable_mask, u32 status_mask) 363 { 364 u32 reg = PIPESTAT(pipe); 365 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 366 367 assert_spin_locked(&dev_priv->irq_lock); 368 WARN_ON(!intel_irqs_enabled(dev_priv)); 369 370 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 371 status_mask & ~PIPESTAT_INT_STATUS_MASK, 372 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 373 pipe_name(pipe), enable_mask, status_mask)) 374 return; 375 376 if ((pipestat & enable_mask) == enable_mask) 377 return; 378 379 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 380 381 /* Enable the interrupt, clear any pending status */ 382 pipestat |= enable_mask | status_mask; 383 I915_WRITE(reg, pipestat); 384 POSTING_READ(reg); 385 } 386 387 static void 388 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 389 u32 enable_mask, u32 status_mask) 390 { 391 u32 reg = PIPESTAT(pipe); 392 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 393 394 assert_spin_locked(&dev_priv->irq_lock); 395 WARN_ON(!intel_irqs_enabled(dev_priv)); 396 397 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 398 status_mask & ~PIPESTAT_INT_STATUS_MASK, 399 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 400 pipe_name(pipe), enable_mask, status_mask)) 401 return; 402 403 if ((pipestat & enable_mask) == 0) 404 return; 405 406 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 407 408 pipestat &= ~enable_mask; 409 I915_WRITE(reg, pipestat); 410 POSTING_READ(reg); 411 } 412 413 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 414 { 415 u32 enable_mask = status_mask << 16; 416 417 /* 418 * On pipe A we don't support the PSR interrupt yet, 419 * on pipe B and C the same bit MBZ. 420 */ 421 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 422 return 0; 423 /* 424 * On pipe B and C we don't support the PSR interrupt yet, on pipe 425 * A the same bit is for perf counters which we don't use either. 426 */ 427 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 428 return 0; 429 430 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 431 SPRITE0_FLIP_DONE_INT_EN_VLV | 432 SPRITE1_FLIP_DONE_INT_EN_VLV); 433 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 434 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 435 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 436 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 437 438 return enable_mask; 439 } 440 441 void 442 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 443 u32 status_mask) 444 { 445 u32 enable_mask; 446 447 if (IS_VALLEYVIEW(dev_priv->dev)) 448 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 449 status_mask); 450 else 451 enable_mask = status_mask << 16; 452 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 453 } 454 455 void 456 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 457 u32 status_mask) 458 { 459 u32 enable_mask; 460 461 if (IS_VALLEYVIEW(dev_priv->dev)) 462 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 463 status_mask); 464 else 465 enable_mask = status_mask << 16; 466 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 467 } 468 469 /** 470 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 471 */ 472 static void i915_enable_asle_pipestat(struct drm_device *dev) 473 { 474 struct drm_i915_private *dev_priv = dev->dev_private; 475 476 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 477 return; 478 479 spin_lock_irq(&dev_priv->irq_lock); 480 481 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 482 if (INTEL_INFO(dev)->gen >= 4) 483 i915_enable_pipestat(dev_priv, PIPE_A, 484 PIPE_LEGACY_BLC_EVENT_STATUS); 485 486 spin_unlock_irq(&dev_priv->irq_lock); 487 } 488 489 /** 490 * i915_pipe_enabled - check if a pipe is enabled 491 * @dev: DRM device 492 * @pipe: pipe to check 493 * 494 * Reading certain registers when the pipe is disabled can hang the chip. 495 * Use this routine to make sure the PLL is running and the pipe is active 496 * before reading such registers if unsure. 497 */ 498 static int 499 i915_pipe_enabled(struct drm_device *dev, int pipe) 500 { 501 struct drm_i915_private *dev_priv = dev->dev_private; 502 503 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 504 /* Locking is horribly broken here, but whatever. */ 505 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 506 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 507 508 return intel_crtc->active; 509 } else { 510 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 511 } 512 } 513 514 /* 515 * This timing diagram depicts the video signal in and 516 * around the vertical blanking period. 517 * 518 * Assumptions about the fictitious mode used in this example: 519 * vblank_start >= 3 520 * vsync_start = vblank_start + 1 521 * vsync_end = vblank_start + 2 522 * vtotal = vblank_start + 3 523 * 524 * start of vblank: 525 * latch double buffered registers 526 * increment frame counter (ctg+) 527 * generate start of vblank interrupt (gen4+) 528 * | 529 * | frame start: 530 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 531 * | may be shifted forward 1-3 extra lines via PIPECONF 532 * | | 533 * | | start of vsync: 534 * | | generate vsync interrupt 535 * | | | 536 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 537 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 538 * ----va---> <-----------------vb--------------------> <--------va------------- 539 * | | <----vs-----> | 540 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 541 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 542 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 543 * | | | 544 * last visible pixel first visible pixel 545 * | increment frame counter (gen3/4) 546 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 547 * 548 * x = horizontal active 549 * _ = horizontal blanking 550 * hs = horizontal sync 551 * va = vertical active 552 * vb = vertical blanking 553 * vs = vertical sync 554 * vbs = vblank_start (number) 555 * 556 * Summary: 557 * - most events happen at the start of horizontal sync 558 * - frame start happens at the start of horizontal blank, 1-4 lines 559 * (depending on PIPECONF settings) after the start of vblank 560 * - gen3/4 pixel and frame counter are synchronized with the start 561 * of horizontal active on the first line of vertical active 562 */ 563 564 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 565 { 566 /* Gen2 doesn't have a hardware frame counter */ 567 return 0; 568 } 569 570 /* Called from drm generic code, passed a 'crtc', which 571 * we use as a pipe index 572 */ 573 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 574 { 575 struct drm_i915_private *dev_priv = dev->dev_private; 576 unsigned long high_frame; 577 unsigned long low_frame; 578 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 579 580 if (!i915_pipe_enabled(dev, pipe)) { 581 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 582 "pipe %c\n", pipe_name(pipe)); 583 return 0; 584 } 585 586 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 587 struct intel_crtc *intel_crtc = 588 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 589 const struct drm_display_mode *mode = 590 &intel_crtc->config.adjusted_mode; 591 592 htotal = mode->crtc_htotal; 593 hsync_start = mode->crtc_hsync_start; 594 vbl_start = mode->crtc_vblank_start; 595 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 596 vbl_start = DIV_ROUND_UP(vbl_start, 2); 597 } else { 598 enum transcoder cpu_transcoder = (enum transcoder) pipe; 599 600 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 601 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 602 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 603 if ((I915_READ(PIPECONF(cpu_transcoder)) & 604 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 605 vbl_start = DIV_ROUND_UP(vbl_start, 2); 606 } 607 608 /* Convert to pixel count */ 609 vbl_start *= htotal; 610 611 /* Start of vblank event occurs at start of hsync */ 612 vbl_start -= htotal - hsync_start; 613 614 high_frame = PIPEFRAME(pipe); 615 low_frame = PIPEFRAMEPIXEL(pipe); 616 617 /* 618 * High & low register fields aren't synchronized, so make sure 619 * we get a low value that's stable across two reads of the high 620 * register. 621 */ 622 do { 623 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 624 low = I915_READ(low_frame); 625 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 626 } while (high1 != high2); 627 628 high1 >>= PIPE_FRAME_HIGH_SHIFT; 629 pixel = low & PIPE_PIXEL_MASK; 630 low >>= PIPE_FRAME_LOW_SHIFT; 631 632 /* 633 * The frame counter increments at beginning of active. 634 * Cook up a vblank counter by also checking the pixel 635 * counter against vblank start. 636 */ 637 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 638 } 639 640 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 641 { 642 struct drm_i915_private *dev_priv = dev->dev_private; 643 int reg = PIPE_FRMCOUNT_GM45(pipe); 644 645 if (!i915_pipe_enabled(dev, pipe)) { 646 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 647 "pipe %c\n", pipe_name(pipe)); 648 return 0; 649 } 650 651 return I915_READ(reg); 652 } 653 654 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 655 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 656 657 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 658 { 659 struct drm_device *dev = crtc->base.dev; 660 struct drm_i915_private *dev_priv = dev->dev_private; 661 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 662 enum pipe pipe = crtc->pipe; 663 int position, vtotal; 664 665 vtotal = mode->crtc_vtotal; 666 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 667 vtotal /= 2; 668 669 if (IS_GEN2(dev)) 670 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 671 else 672 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 673 674 /* 675 * See update_scanline_offset() for the details on the 676 * scanline_offset adjustment. 677 */ 678 return (position + crtc->scanline_offset) % vtotal; 679 } 680 681 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 682 unsigned int flags, int *vpos, int *hpos, 683 ktime_t *stime, ktime_t *etime) 684 { 685 struct drm_i915_private *dev_priv = dev->dev_private; 686 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 687 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 688 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 689 int position; 690 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 691 bool in_vbl = true; 692 int ret = 0; 693 unsigned long irqflags; 694 695 if (!intel_crtc->active) { 696 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 697 "pipe %c\n", pipe_name(pipe)); 698 return 0; 699 } 700 701 htotal = mode->crtc_htotal; 702 hsync_start = mode->crtc_hsync_start; 703 vtotal = mode->crtc_vtotal; 704 vbl_start = mode->crtc_vblank_start; 705 vbl_end = mode->crtc_vblank_end; 706 707 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 708 vbl_start = DIV_ROUND_UP(vbl_start, 2); 709 vbl_end /= 2; 710 vtotal /= 2; 711 } 712 713 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 714 715 /* 716 * Lock uncore.lock, as we will do multiple timing critical raw 717 * register reads, potentially with preemption disabled, so the 718 * following code must not block on uncore.lock. 719 */ 720 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 721 722 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 723 724 /* Get optional system timestamp before query. */ 725 if (stime) 726 *stime = ktime_get(); 727 728 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 729 /* No obvious pixelcount register. Only query vertical 730 * scanout position from Display scan line register. 731 */ 732 position = __intel_get_crtc_scanline(intel_crtc); 733 } else { 734 /* Have access to pixelcount since start of frame. 735 * We can split this into vertical and horizontal 736 * scanout position. 737 */ 738 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 739 740 /* convert to pixel counts */ 741 vbl_start *= htotal; 742 vbl_end *= htotal; 743 vtotal *= htotal; 744 745 /* 746 * In interlaced modes, the pixel counter counts all pixels, 747 * so one field will have htotal more pixels. In order to avoid 748 * the reported position from jumping backwards when the pixel 749 * counter is beyond the length of the shorter field, just 750 * clamp the position the length of the shorter field. This 751 * matches how the scanline counter based position works since 752 * the scanline counter doesn't count the two half lines. 753 */ 754 if (position >= vtotal) 755 position = vtotal - 1; 756 757 /* 758 * Start of vblank interrupt is triggered at start of hsync, 759 * just prior to the first active line of vblank. However we 760 * consider lines to start at the leading edge of horizontal 761 * active. So, should we get here before we've crossed into 762 * the horizontal active of the first line in vblank, we would 763 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 764 * always add htotal-hsync_start to the current pixel position. 765 */ 766 position = (position + htotal - hsync_start) % vtotal; 767 } 768 769 /* Get optional system timestamp after query. */ 770 if (etime) 771 *etime = ktime_get(); 772 773 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 774 775 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 776 777 in_vbl = position >= vbl_start && position < vbl_end; 778 779 /* 780 * While in vblank, position will be negative 781 * counting up towards 0 at vbl_end. And outside 782 * vblank, position will be positive counting 783 * up since vbl_end. 784 */ 785 if (position >= vbl_start) 786 position -= vbl_end; 787 else 788 position += vtotal - vbl_end; 789 790 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 791 *vpos = position; 792 *hpos = 0; 793 } else { 794 *vpos = position / htotal; 795 *hpos = position - (*vpos * htotal); 796 } 797 798 /* In vblank? */ 799 if (in_vbl) 800 ret |= DRM_SCANOUTPOS_IN_VBLANK; 801 802 return ret; 803 } 804 805 int intel_get_crtc_scanline(struct intel_crtc *crtc) 806 { 807 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 808 unsigned long irqflags; 809 int position; 810 811 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 812 position = __intel_get_crtc_scanline(crtc); 813 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 814 815 return position; 816 } 817 818 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 819 int *max_error, 820 struct timeval *vblank_time, 821 unsigned flags) 822 { 823 struct drm_crtc *crtc; 824 825 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 826 DRM_ERROR("Invalid crtc %d\n", pipe); 827 return -EINVAL; 828 } 829 830 /* Get drm_crtc to timestamp: */ 831 crtc = intel_get_crtc_for_pipe(dev, pipe); 832 if (crtc == NULL) { 833 DRM_ERROR("Invalid crtc %d\n", pipe); 834 return -EINVAL; 835 } 836 837 if (!crtc->enabled) { 838 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 839 return -EBUSY; 840 } 841 842 /* Helper routine in DRM core does all the work: */ 843 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 844 vblank_time, flags, 845 crtc, 846 &to_intel_crtc(crtc)->config.adjusted_mode); 847 } 848 849 static bool intel_hpd_irq_event(struct drm_device *dev, 850 struct drm_connector *connector) 851 { 852 enum drm_connector_status old_status; 853 854 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 855 old_status = connector->status; 856 857 connector->status = connector->funcs->detect(connector, false); 858 if (old_status == connector->status) 859 return false; 860 861 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 862 connector->base.id, 863 connector->name, 864 drm_get_connector_status_name(old_status), 865 drm_get_connector_status_name(connector->status)); 866 867 return true; 868 } 869 870 static void i915_digport_work_func(struct work_struct *work) 871 { 872 struct drm_i915_private *dev_priv = 873 container_of(work, struct drm_i915_private, dig_port_work); 874 u32 long_port_mask, short_port_mask; 875 struct intel_digital_port *intel_dig_port; 876 int i, ret; 877 u32 old_bits = 0; 878 879 spin_lock_irq(&dev_priv->irq_lock); 880 long_port_mask = dev_priv->long_hpd_port_mask; 881 dev_priv->long_hpd_port_mask = 0; 882 short_port_mask = dev_priv->short_hpd_port_mask; 883 dev_priv->short_hpd_port_mask = 0; 884 spin_unlock_irq(&dev_priv->irq_lock); 885 886 for (i = 0; i < I915_MAX_PORTS; i++) { 887 bool valid = false; 888 bool long_hpd = false; 889 intel_dig_port = dev_priv->hpd_irq_port[i]; 890 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 891 continue; 892 893 if (long_port_mask & (1 << i)) { 894 valid = true; 895 long_hpd = true; 896 } else if (short_port_mask & (1 << i)) 897 valid = true; 898 899 if (valid) { 900 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 901 if (ret == true) { 902 /* if we get true fallback to old school hpd */ 903 old_bits |= (1 << intel_dig_port->base.hpd_pin); 904 } 905 } 906 } 907 908 if (old_bits) { 909 spin_lock_irq(&dev_priv->irq_lock); 910 dev_priv->hpd_event_bits |= old_bits; 911 spin_unlock_irq(&dev_priv->irq_lock); 912 schedule_work(&dev_priv->hotplug_work); 913 } 914 } 915 916 /* 917 * Handle hotplug events outside the interrupt handler proper. 918 */ 919 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 920 921 static void i915_hotplug_work_func(struct work_struct *work) 922 { 923 struct drm_i915_private *dev_priv = 924 container_of(work, struct drm_i915_private, hotplug_work); 925 struct drm_device *dev = dev_priv->dev; 926 struct drm_mode_config *mode_config = &dev->mode_config; 927 struct intel_connector *intel_connector; 928 struct intel_encoder *intel_encoder; 929 struct drm_connector *connector; 930 bool hpd_disabled = false; 931 bool changed = false; 932 u32 hpd_event_bits; 933 934 mutex_lock(&mode_config->mutex); 935 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 936 937 spin_lock_irq(&dev_priv->irq_lock); 938 939 hpd_event_bits = dev_priv->hpd_event_bits; 940 dev_priv->hpd_event_bits = 0; 941 list_for_each_entry(connector, &mode_config->connector_list, head) { 942 intel_connector = to_intel_connector(connector); 943 if (!intel_connector->encoder) 944 continue; 945 intel_encoder = intel_connector->encoder; 946 if (intel_encoder->hpd_pin > HPD_NONE && 947 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 948 connector->polled == DRM_CONNECTOR_POLL_HPD) { 949 DRM_INFO("HPD interrupt storm detected on connector %s: " 950 "switching from hotplug detection to polling\n", 951 connector->name); 952 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 953 connector->polled = DRM_CONNECTOR_POLL_CONNECT 954 | DRM_CONNECTOR_POLL_DISCONNECT; 955 hpd_disabled = true; 956 } 957 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 958 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 959 connector->name, intel_encoder->hpd_pin); 960 } 961 } 962 /* if there were no outputs to poll, poll was disabled, 963 * therefore make sure it's enabled when disabling HPD on 964 * some connectors */ 965 if (hpd_disabled) { 966 drm_kms_helper_poll_enable(dev); 967 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 968 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 969 } 970 971 spin_unlock_irq(&dev_priv->irq_lock); 972 973 list_for_each_entry(connector, &mode_config->connector_list, head) { 974 intel_connector = to_intel_connector(connector); 975 if (!intel_connector->encoder) 976 continue; 977 intel_encoder = intel_connector->encoder; 978 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 979 if (intel_encoder->hot_plug) 980 intel_encoder->hot_plug(intel_encoder); 981 if (intel_hpd_irq_event(dev, connector)) 982 changed = true; 983 } 984 } 985 mutex_unlock(&mode_config->mutex); 986 987 if (changed) 988 drm_kms_helper_hotplug_event(dev); 989 } 990 991 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 992 { 993 struct drm_i915_private *dev_priv = dev->dev_private; 994 u32 busy_up, busy_down, max_avg, min_avg; 995 u8 new_delay; 996 997 spin_lock(&mchdev_lock); 998 999 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1000 1001 new_delay = dev_priv->ips.cur_delay; 1002 1003 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1004 busy_up = I915_READ(RCPREVBSYTUPAVG); 1005 busy_down = I915_READ(RCPREVBSYTDNAVG); 1006 max_avg = I915_READ(RCBMAXAVG); 1007 min_avg = I915_READ(RCBMINAVG); 1008 1009 /* Handle RCS change request from hw */ 1010 if (busy_up > max_avg) { 1011 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1012 new_delay = dev_priv->ips.cur_delay - 1; 1013 if (new_delay < dev_priv->ips.max_delay) 1014 new_delay = dev_priv->ips.max_delay; 1015 } else if (busy_down < min_avg) { 1016 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1017 new_delay = dev_priv->ips.cur_delay + 1; 1018 if (new_delay > dev_priv->ips.min_delay) 1019 new_delay = dev_priv->ips.min_delay; 1020 } 1021 1022 if (ironlake_set_drps(dev, new_delay)) 1023 dev_priv->ips.cur_delay = new_delay; 1024 1025 spin_unlock(&mchdev_lock); 1026 1027 return; 1028 } 1029 1030 static void notify_ring(struct drm_device *dev, 1031 struct intel_engine_cs *ring) 1032 { 1033 if (!intel_ring_initialized(ring)) 1034 return; 1035 1036 trace_i915_gem_request_complete(ring); 1037 1038 wake_up_all(&ring->irq_queue); 1039 } 1040 1041 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1042 struct intel_rps_ei *rps_ei) 1043 { 1044 u32 cz_ts, cz_freq_khz; 1045 u32 render_count, media_count; 1046 u32 elapsed_render, elapsed_media, elapsed_time; 1047 u32 residency = 0; 1048 1049 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1050 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); 1051 1052 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); 1053 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); 1054 1055 if (rps_ei->cz_clock == 0) { 1056 rps_ei->cz_clock = cz_ts; 1057 rps_ei->render_c0 = render_count; 1058 rps_ei->media_c0 = media_count; 1059 1060 return dev_priv->rps.cur_freq; 1061 } 1062 1063 elapsed_time = cz_ts - rps_ei->cz_clock; 1064 rps_ei->cz_clock = cz_ts; 1065 1066 elapsed_render = render_count - rps_ei->render_c0; 1067 rps_ei->render_c0 = render_count; 1068 1069 elapsed_media = media_count - rps_ei->media_c0; 1070 rps_ei->media_c0 = media_count; 1071 1072 /* Convert all the counters into common unit of milli sec */ 1073 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; 1074 elapsed_render /= cz_freq_khz; 1075 elapsed_media /= cz_freq_khz; 1076 1077 /* 1078 * Calculate overall C0 residency percentage 1079 * only if elapsed time is non zero 1080 */ 1081 if (elapsed_time) { 1082 residency = 1083 ((max(elapsed_render, elapsed_media) * 100) 1084 / elapsed_time); 1085 } 1086 1087 return residency; 1088 } 1089 1090 /** 1091 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU 1092 * busy-ness calculated from C0 counters of render & media power wells 1093 * @dev_priv: DRM device private 1094 * 1095 */ 1096 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1097 { 1098 u32 residency_C0_up = 0, residency_C0_down = 0; 1099 int new_delay, adj; 1100 1101 dev_priv->rps.ei_interrupt_count++; 1102 1103 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 1104 1105 1106 if (dev_priv->rps.up_ei.cz_clock == 0) { 1107 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); 1108 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); 1109 return dev_priv->rps.cur_freq; 1110 } 1111 1112 1113 /* 1114 * To down throttle, C0 residency should be less than down threshold 1115 * for continous EI intervals. So calculate down EI counters 1116 * once in VLV_INT_COUNT_FOR_DOWN_EI 1117 */ 1118 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { 1119 1120 dev_priv->rps.ei_interrupt_count = 0; 1121 1122 residency_C0_down = vlv_c0_residency(dev_priv, 1123 &dev_priv->rps.down_ei); 1124 } else { 1125 residency_C0_up = vlv_c0_residency(dev_priv, 1126 &dev_priv->rps.up_ei); 1127 } 1128 1129 new_delay = dev_priv->rps.cur_freq; 1130 1131 adj = dev_priv->rps.last_adj; 1132 /* C0 residency is greater than UP threshold. Increase Frequency */ 1133 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { 1134 if (adj > 0) 1135 adj *= 2; 1136 else 1137 adj = 1; 1138 1139 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) 1140 new_delay = dev_priv->rps.cur_freq + adj; 1141 1142 /* 1143 * For better performance, jump directly 1144 * to RPe if we're below it. 1145 */ 1146 if (new_delay < dev_priv->rps.efficient_freq) 1147 new_delay = dev_priv->rps.efficient_freq; 1148 1149 } else if (!dev_priv->rps.ei_interrupt_count && 1150 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { 1151 if (adj < 0) 1152 adj *= 2; 1153 else 1154 adj = -1; 1155 /* 1156 * This means, C0 residency is less than down threshold over 1157 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq 1158 */ 1159 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1160 new_delay = dev_priv->rps.cur_freq + adj; 1161 } 1162 1163 return new_delay; 1164 } 1165 1166 static void gen6_pm_rps_work(struct work_struct *work) 1167 { 1168 struct drm_i915_private *dev_priv = 1169 container_of(work, struct drm_i915_private, rps.work); 1170 u32 pm_iir; 1171 int new_delay, adj; 1172 1173 spin_lock_irq(&dev_priv->irq_lock); 1174 /* Speed up work cancelation during disabling rps interrupts. */ 1175 if (!dev_priv->rps.interrupts_enabled) { 1176 spin_unlock_irq(&dev_priv->irq_lock); 1177 return; 1178 } 1179 pm_iir = dev_priv->rps.pm_iir; 1180 dev_priv->rps.pm_iir = 0; 1181 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1182 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1183 spin_unlock_irq(&dev_priv->irq_lock); 1184 1185 /* Make sure we didn't queue anything we're not going to process. */ 1186 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1187 1188 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1189 return; 1190 1191 mutex_lock(&dev_priv->rps.hw_lock); 1192 1193 adj = dev_priv->rps.last_adj; 1194 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1195 if (adj > 0) 1196 adj *= 2; 1197 else { 1198 /* CHV needs even encode values */ 1199 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; 1200 } 1201 new_delay = dev_priv->rps.cur_freq + adj; 1202 1203 /* 1204 * For better performance, jump directly 1205 * to RPe if we're below it. 1206 */ 1207 if (new_delay < dev_priv->rps.efficient_freq) 1208 new_delay = dev_priv->rps.efficient_freq; 1209 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1210 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1211 new_delay = dev_priv->rps.efficient_freq; 1212 else 1213 new_delay = dev_priv->rps.min_freq_softlimit; 1214 adj = 0; 1215 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1216 new_delay = vlv_calc_delay_from_C0_counters(dev_priv); 1217 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1218 if (adj < 0) 1219 adj *= 2; 1220 else { 1221 /* CHV needs even encode values */ 1222 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; 1223 } 1224 new_delay = dev_priv->rps.cur_freq + adj; 1225 } else { /* unknown event */ 1226 new_delay = dev_priv->rps.cur_freq; 1227 } 1228 1229 /* sysfs frequency interfaces may have snuck in while servicing the 1230 * interrupt 1231 */ 1232 new_delay = clamp_t(int, new_delay, 1233 dev_priv->rps.min_freq_softlimit, 1234 dev_priv->rps.max_freq_softlimit); 1235 1236 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1237 1238 if (IS_VALLEYVIEW(dev_priv->dev)) 1239 valleyview_set_rps(dev_priv->dev, new_delay); 1240 else 1241 gen6_set_rps(dev_priv->dev, new_delay); 1242 1243 mutex_unlock(&dev_priv->rps.hw_lock); 1244 } 1245 1246 1247 /** 1248 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1249 * occurred. 1250 * @work: workqueue struct 1251 * 1252 * Doesn't actually do anything except notify userspace. As a consequence of 1253 * this event, userspace should try to remap the bad rows since statistically 1254 * it is likely the same row is more likely to go bad again. 1255 */ 1256 static void ivybridge_parity_work(struct work_struct *work) 1257 { 1258 struct drm_i915_private *dev_priv = 1259 container_of(work, struct drm_i915_private, l3_parity.error_work); 1260 u32 error_status, row, bank, subbank; 1261 char *parity_event[6]; 1262 uint32_t misccpctl; 1263 uint8_t slice = 0; 1264 1265 /* We must turn off DOP level clock gating to access the L3 registers. 1266 * In order to prevent a get/put style interface, acquire struct mutex 1267 * any time we access those registers. 1268 */ 1269 mutex_lock(&dev_priv->dev->struct_mutex); 1270 1271 /* If we've screwed up tracking, just let the interrupt fire again */ 1272 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1273 goto out; 1274 1275 misccpctl = I915_READ(GEN7_MISCCPCTL); 1276 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1277 POSTING_READ(GEN7_MISCCPCTL); 1278 1279 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1280 u32 reg; 1281 1282 slice--; 1283 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1284 break; 1285 1286 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1287 1288 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1289 1290 error_status = I915_READ(reg); 1291 row = GEN7_PARITY_ERROR_ROW(error_status); 1292 bank = GEN7_PARITY_ERROR_BANK(error_status); 1293 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1294 1295 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1296 POSTING_READ(reg); 1297 1298 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1299 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1300 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1301 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1302 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1303 parity_event[5] = NULL; 1304 1305 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1306 KOBJ_CHANGE, parity_event); 1307 1308 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1309 slice, row, bank, subbank); 1310 1311 kfree(parity_event[4]); 1312 kfree(parity_event[3]); 1313 kfree(parity_event[2]); 1314 kfree(parity_event[1]); 1315 } 1316 1317 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1318 1319 out: 1320 WARN_ON(dev_priv->l3_parity.which_slice); 1321 spin_lock_irq(&dev_priv->irq_lock); 1322 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1323 spin_unlock_irq(&dev_priv->irq_lock); 1324 1325 mutex_unlock(&dev_priv->dev->struct_mutex); 1326 } 1327 1328 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1329 { 1330 struct drm_i915_private *dev_priv = dev->dev_private; 1331 1332 if (!HAS_L3_DPF(dev)) 1333 return; 1334 1335 spin_lock(&dev_priv->irq_lock); 1336 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1337 spin_unlock(&dev_priv->irq_lock); 1338 1339 iir &= GT_PARITY_ERROR(dev); 1340 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1341 dev_priv->l3_parity.which_slice |= 1 << 1; 1342 1343 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1344 dev_priv->l3_parity.which_slice |= 1 << 0; 1345 1346 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1347 } 1348 1349 static void ilk_gt_irq_handler(struct drm_device *dev, 1350 struct drm_i915_private *dev_priv, 1351 u32 gt_iir) 1352 { 1353 if (gt_iir & 1354 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1355 notify_ring(dev, &dev_priv->ring[RCS]); 1356 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1357 notify_ring(dev, &dev_priv->ring[VCS]); 1358 } 1359 1360 static void snb_gt_irq_handler(struct drm_device *dev, 1361 struct drm_i915_private *dev_priv, 1362 u32 gt_iir) 1363 { 1364 1365 if (gt_iir & 1366 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1367 notify_ring(dev, &dev_priv->ring[RCS]); 1368 if (gt_iir & GT_BSD_USER_INTERRUPT) 1369 notify_ring(dev, &dev_priv->ring[VCS]); 1370 if (gt_iir & GT_BLT_USER_INTERRUPT) 1371 notify_ring(dev, &dev_priv->ring[BCS]); 1372 1373 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1374 GT_BSD_CS_ERROR_INTERRUPT | 1375 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1376 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1377 1378 if (gt_iir & GT_PARITY_ERROR(dev)) 1379 ivybridge_parity_error_irq_handler(dev, gt_iir); 1380 } 1381 1382 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1383 struct drm_i915_private *dev_priv, 1384 u32 master_ctl) 1385 { 1386 struct intel_engine_cs *ring; 1387 u32 rcs, bcs, vcs; 1388 uint32_t tmp = 0; 1389 irqreturn_t ret = IRQ_NONE; 1390 1391 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1392 tmp = I915_READ(GEN8_GT_IIR(0)); 1393 if (tmp) { 1394 I915_WRITE(GEN8_GT_IIR(0), tmp); 1395 ret = IRQ_HANDLED; 1396 1397 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1398 ring = &dev_priv->ring[RCS]; 1399 if (rcs & GT_RENDER_USER_INTERRUPT) 1400 notify_ring(dev, ring); 1401 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) 1402 intel_execlists_handle_ctx_events(ring); 1403 1404 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1405 ring = &dev_priv->ring[BCS]; 1406 if (bcs & GT_RENDER_USER_INTERRUPT) 1407 notify_ring(dev, ring); 1408 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) 1409 intel_execlists_handle_ctx_events(ring); 1410 } else 1411 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1412 } 1413 1414 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1415 tmp = I915_READ(GEN8_GT_IIR(1)); 1416 if (tmp) { 1417 I915_WRITE(GEN8_GT_IIR(1), tmp); 1418 ret = IRQ_HANDLED; 1419 1420 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1421 ring = &dev_priv->ring[VCS]; 1422 if (vcs & GT_RENDER_USER_INTERRUPT) 1423 notify_ring(dev, ring); 1424 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1425 intel_execlists_handle_ctx_events(ring); 1426 1427 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1428 ring = &dev_priv->ring[VCS2]; 1429 if (vcs & GT_RENDER_USER_INTERRUPT) 1430 notify_ring(dev, ring); 1431 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1432 intel_execlists_handle_ctx_events(ring); 1433 } else 1434 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1435 } 1436 1437 if (master_ctl & GEN8_GT_PM_IRQ) { 1438 tmp = I915_READ(GEN8_GT_IIR(2)); 1439 if (tmp & dev_priv->pm_rps_events) { 1440 I915_WRITE(GEN8_GT_IIR(2), 1441 tmp & dev_priv->pm_rps_events); 1442 ret = IRQ_HANDLED; 1443 gen6_rps_irq_handler(dev_priv, tmp); 1444 } else 1445 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1446 } 1447 1448 if (master_ctl & GEN8_GT_VECS_IRQ) { 1449 tmp = I915_READ(GEN8_GT_IIR(3)); 1450 if (tmp) { 1451 I915_WRITE(GEN8_GT_IIR(3), tmp); 1452 ret = IRQ_HANDLED; 1453 1454 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1455 ring = &dev_priv->ring[VECS]; 1456 if (vcs & GT_RENDER_USER_INTERRUPT) 1457 notify_ring(dev, ring); 1458 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1459 intel_execlists_handle_ctx_events(ring); 1460 } else 1461 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1462 } 1463 1464 return ret; 1465 } 1466 1467 #define HPD_STORM_DETECT_PERIOD 1000 1468 #define HPD_STORM_THRESHOLD 5 1469 1470 static int pch_port_to_hotplug_shift(enum port port) 1471 { 1472 switch (port) { 1473 case PORT_A: 1474 case PORT_E: 1475 default: 1476 return -1; 1477 case PORT_B: 1478 return 0; 1479 case PORT_C: 1480 return 8; 1481 case PORT_D: 1482 return 16; 1483 } 1484 } 1485 1486 static int i915_port_to_hotplug_shift(enum port port) 1487 { 1488 switch (port) { 1489 case PORT_A: 1490 case PORT_E: 1491 default: 1492 return -1; 1493 case PORT_B: 1494 return 17; 1495 case PORT_C: 1496 return 19; 1497 case PORT_D: 1498 return 21; 1499 } 1500 } 1501 1502 static inline enum port get_port_from_pin(enum hpd_pin pin) 1503 { 1504 switch (pin) { 1505 case HPD_PORT_B: 1506 return PORT_B; 1507 case HPD_PORT_C: 1508 return PORT_C; 1509 case HPD_PORT_D: 1510 return PORT_D; 1511 default: 1512 return PORT_A; /* no hpd */ 1513 } 1514 } 1515 1516 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1517 u32 hotplug_trigger, 1518 u32 dig_hotplug_reg, 1519 const u32 *hpd) 1520 { 1521 struct drm_i915_private *dev_priv = dev->dev_private; 1522 int i; 1523 enum port port; 1524 bool storm_detected = false; 1525 bool queue_dig = false, queue_hp = false; 1526 u32 dig_shift; 1527 u32 dig_port_mask = 0; 1528 1529 if (!hotplug_trigger) 1530 return; 1531 1532 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1533 hotplug_trigger, dig_hotplug_reg); 1534 1535 spin_lock(&dev_priv->irq_lock); 1536 for (i = 1; i < HPD_NUM_PINS; i++) { 1537 if (!(hpd[i] & hotplug_trigger)) 1538 continue; 1539 1540 port = get_port_from_pin(i); 1541 if (port && dev_priv->hpd_irq_port[port]) { 1542 bool long_hpd; 1543 1544 if (HAS_PCH_SPLIT(dev)) { 1545 dig_shift = pch_port_to_hotplug_shift(port); 1546 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1547 } else { 1548 dig_shift = i915_port_to_hotplug_shift(port); 1549 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1550 } 1551 1552 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1553 port_name(port), 1554 long_hpd ? "long" : "short"); 1555 /* for long HPD pulses we want to have the digital queue happen, 1556 but we still want HPD storm detection to function. */ 1557 if (long_hpd) { 1558 dev_priv->long_hpd_port_mask |= (1 << port); 1559 dig_port_mask |= hpd[i]; 1560 } else { 1561 /* for short HPD just trigger the digital queue */ 1562 dev_priv->short_hpd_port_mask |= (1 << port); 1563 hotplug_trigger &= ~hpd[i]; 1564 } 1565 queue_dig = true; 1566 } 1567 } 1568 1569 for (i = 1; i < HPD_NUM_PINS; i++) { 1570 if (hpd[i] & hotplug_trigger && 1571 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1572 /* 1573 * On GMCH platforms the interrupt mask bits only 1574 * prevent irq generation, not the setting of the 1575 * hotplug bits itself. So only WARN about unexpected 1576 * interrupts on saner platforms. 1577 */ 1578 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1579 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1580 hotplug_trigger, i, hpd[i]); 1581 1582 continue; 1583 } 1584 1585 if (!(hpd[i] & hotplug_trigger) || 1586 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1587 continue; 1588 1589 if (!(dig_port_mask & hpd[i])) { 1590 dev_priv->hpd_event_bits |= (1 << i); 1591 queue_hp = true; 1592 } 1593 1594 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1595 dev_priv->hpd_stats[i].hpd_last_jiffies 1596 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1597 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1598 dev_priv->hpd_stats[i].hpd_cnt = 0; 1599 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1600 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1601 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1602 dev_priv->hpd_event_bits &= ~(1 << i); 1603 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1604 storm_detected = true; 1605 } else { 1606 dev_priv->hpd_stats[i].hpd_cnt++; 1607 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1608 dev_priv->hpd_stats[i].hpd_cnt); 1609 } 1610 } 1611 1612 if (storm_detected) 1613 dev_priv->display.hpd_irq_setup(dev); 1614 spin_unlock(&dev_priv->irq_lock); 1615 1616 /* 1617 * Our hotplug handler can grab modeset locks (by calling down into the 1618 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1619 * queue for otherwise the flush_work in the pageflip code will 1620 * deadlock. 1621 */ 1622 if (queue_dig) 1623 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); 1624 if (queue_hp) 1625 schedule_work(&dev_priv->hotplug_work); 1626 } 1627 1628 static void gmbus_irq_handler(struct drm_device *dev) 1629 { 1630 struct drm_i915_private *dev_priv = dev->dev_private; 1631 1632 wake_up_all(&dev_priv->gmbus_wait_queue); 1633 } 1634 1635 static void dp_aux_irq_handler(struct drm_device *dev) 1636 { 1637 struct drm_i915_private *dev_priv = dev->dev_private; 1638 1639 wake_up_all(&dev_priv->gmbus_wait_queue); 1640 } 1641 1642 #if defined(CONFIG_DEBUG_FS) 1643 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1644 uint32_t crc0, uint32_t crc1, 1645 uint32_t crc2, uint32_t crc3, 1646 uint32_t crc4) 1647 { 1648 struct drm_i915_private *dev_priv = dev->dev_private; 1649 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1650 struct intel_pipe_crc_entry *entry; 1651 int head, tail; 1652 1653 spin_lock(&pipe_crc->lock); 1654 1655 if (!pipe_crc->entries) { 1656 spin_unlock(&pipe_crc->lock); 1657 DRM_DEBUG_KMS("spurious interrupt\n"); 1658 return; 1659 } 1660 1661 head = pipe_crc->head; 1662 tail = pipe_crc->tail; 1663 1664 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1665 spin_unlock(&pipe_crc->lock); 1666 DRM_ERROR("CRC buffer overflowing\n"); 1667 return; 1668 } 1669 1670 entry = &pipe_crc->entries[head]; 1671 1672 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1673 entry->crc[0] = crc0; 1674 entry->crc[1] = crc1; 1675 entry->crc[2] = crc2; 1676 entry->crc[3] = crc3; 1677 entry->crc[4] = crc4; 1678 1679 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1680 pipe_crc->head = head; 1681 1682 spin_unlock(&pipe_crc->lock); 1683 1684 wake_up_interruptible(&pipe_crc->wq); 1685 } 1686 #else 1687 static inline void 1688 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1689 uint32_t crc0, uint32_t crc1, 1690 uint32_t crc2, uint32_t crc3, 1691 uint32_t crc4) {} 1692 #endif 1693 1694 1695 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1696 { 1697 struct drm_i915_private *dev_priv = dev->dev_private; 1698 1699 display_pipe_crc_irq_handler(dev, pipe, 1700 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1701 0, 0, 0, 0); 1702 } 1703 1704 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1705 { 1706 struct drm_i915_private *dev_priv = dev->dev_private; 1707 1708 display_pipe_crc_irq_handler(dev, pipe, 1709 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1710 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1711 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1712 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1713 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1714 } 1715 1716 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1717 { 1718 struct drm_i915_private *dev_priv = dev->dev_private; 1719 uint32_t res1, res2; 1720 1721 if (INTEL_INFO(dev)->gen >= 3) 1722 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1723 else 1724 res1 = 0; 1725 1726 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1727 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1728 else 1729 res2 = 0; 1730 1731 display_pipe_crc_irq_handler(dev, pipe, 1732 I915_READ(PIPE_CRC_RES_RED(pipe)), 1733 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1734 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1735 res1, res2); 1736 } 1737 1738 /* The RPS events need forcewake, so we add them to a work queue and mask their 1739 * IMR bits until the work is done. Other interrupts can be processed without 1740 * the work queue. */ 1741 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1742 { 1743 /* TODO: RPS on GEN9+ is not supported yet. */ 1744 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, 1745 "GEN9+: unexpected RPS IRQ\n")) 1746 return; 1747 1748 if (pm_iir & dev_priv->pm_rps_events) { 1749 spin_lock(&dev_priv->irq_lock); 1750 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1751 if (dev_priv->rps.interrupts_enabled) { 1752 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1753 queue_work(dev_priv->wq, &dev_priv->rps.work); 1754 } 1755 spin_unlock(&dev_priv->irq_lock); 1756 } 1757 1758 if (INTEL_INFO(dev_priv)->gen >= 8) 1759 return; 1760 1761 if (HAS_VEBOX(dev_priv->dev)) { 1762 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1763 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1764 1765 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1766 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1767 } 1768 } 1769 1770 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1771 { 1772 if (!drm_handle_vblank(dev, pipe)) 1773 return false; 1774 1775 return true; 1776 } 1777 1778 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1779 { 1780 struct drm_i915_private *dev_priv = dev->dev_private; 1781 u32 pipe_stats[I915_MAX_PIPES] = { }; 1782 int pipe; 1783 1784 spin_lock(&dev_priv->irq_lock); 1785 for_each_pipe(dev_priv, pipe) { 1786 int reg; 1787 u32 mask, iir_bit = 0; 1788 1789 /* 1790 * PIPESTAT bits get signalled even when the interrupt is 1791 * disabled with the mask bits, and some of the status bits do 1792 * not generate interrupts at all (like the underrun bit). Hence 1793 * we need to be careful that we only handle what we want to 1794 * handle. 1795 */ 1796 1797 /* fifo underruns are filterered in the underrun handler. */ 1798 mask = PIPE_FIFO_UNDERRUN_STATUS; 1799 1800 switch (pipe) { 1801 case PIPE_A: 1802 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1803 break; 1804 case PIPE_B: 1805 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1806 break; 1807 case PIPE_C: 1808 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1809 break; 1810 } 1811 if (iir & iir_bit) 1812 mask |= dev_priv->pipestat_irq_mask[pipe]; 1813 1814 if (!mask) 1815 continue; 1816 1817 reg = PIPESTAT(pipe); 1818 mask |= PIPESTAT_INT_ENABLE_MASK; 1819 pipe_stats[pipe] = I915_READ(reg) & mask; 1820 1821 /* 1822 * Clear the PIPE*STAT regs before the IIR 1823 */ 1824 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1825 PIPESTAT_INT_STATUS_MASK)) 1826 I915_WRITE(reg, pipe_stats[pipe]); 1827 } 1828 spin_unlock(&dev_priv->irq_lock); 1829 1830 for_each_pipe(dev_priv, pipe) { 1831 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1832 intel_pipe_handle_vblank(dev, pipe)) 1833 intel_check_page_flip(dev, pipe); 1834 1835 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1836 intel_prepare_page_flip(dev, pipe); 1837 intel_finish_page_flip(dev, pipe); 1838 } 1839 1840 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1841 i9xx_pipe_crc_irq_handler(dev, pipe); 1842 1843 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1844 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1845 } 1846 1847 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1848 gmbus_irq_handler(dev); 1849 } 1850 1851 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1852 { 1853 struct drm_i915_private *dev_priv = dev->dev_private; 1854 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1855 1856 if (hotplug_status) { 1857 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1858 /* 1859 * Make sure hotplug status is cleared before we clear IIR, or else we 1860 * may miss hotplug events. 1861 */ 1862 POSTING_READ(PORT_HOTPLUG_STAT); 1863 1864 if (IS_G4X(dev)) { 1865 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1866 1867 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 1868 } else { 1869 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1870 1871 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 1872 } 1873 1874 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1875 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1876 dp_aux_irq_handler(dev); 1877 } 1878 } 1879 1880 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1881 { 1882 struct drm_device *dev = arg; 1883 struct drm_i915_private *dev_priv = dev->dev_private; 1884 u32 iir, gt_iir, pm_iir; 1885 irqreturn_t ret = IRQ_NONE; 1886 1887 while (true) { 1888 /* Find, clear, then process each source of interrupt */ 1889 1890 gt_iir = I915_READ(GTIIR); 1891 if (gt_iir) 1892 I915_WRITE(GTIIR, gt_iir); 1893 1894 pm_iir = I915_READ(GEN6_PMIIR); 1895 if (pm_iir) 1896 I915_WRITE(GEN6_PMIIR, pm_iir); 1897 1898 iir = I915_READ(VLV_IIR); 1899 if (iir) { 1900 /* Consume port before clearing IIR or we'll miss events */ 1901 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1902 i9xx_hpd_irq_handler(dev); 1903 I915_WRITE(VLV_IIR, iir); 1904 } 1905 1906 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1907 goto out; 1908 1909 ret = IRQ_HANDLED; 1910 1911 if (gt_iir) 1912 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1913 if (pm_iir) 1914 gen6_rps_irq_handler(dev_priv, pm_iir); 1915 /* Call regardless, as some status bits might not be 1916 * signalled in iir */ 1917 valleyview_pipestat_irq_handler(dev, iir); 1918 } 1919 1920 out: 1921 return ret; 1922 } 1923 1924 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1925 { 1926 struct drm_device *dev = arg; 1927 struct drm_i915_private *dev_priv = dev->dev_private; 1928 u32 master_ctl, iir; 1929 irqreturn_t ret = IRQ_NONE; 1930 1931 for (;;) { 1932 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1933 iir = I915_READ(VLV_IIR); 1934 1935 if (master_ctl == 0 && iir == 0) 1936 break; 1937 1938 ret = IRQ_HANDLED; 1939 1940 I915_WRITE(GEN8_MASTER_IRQ, 0); 1941 1942 /* Find, clear, then process each source of interrupt */ 1943 1944 if (iir) { 1945 /* Consume port before clearing IIR or we'll miss events */ 1946 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1947 i9xx_hpd_irq_handler(dev); 1948 I915_WRITE(VLV_IIR, iir); 1949 } 1950 1951 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1952 1953 /* Call regardless, as some status bits might not be 1954 * signalled in iir */ 1955 valleyview_pipestat_irq_handler(dev, iir); 1956 1957 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1958 POSTING_READ(GEN8_MASTER_IRQ); 1959 } 1960 1961 return ret; 1962 } 1963 1964 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1965 { 1966 struct drm_i915_private *dev_priv = dev->dev_private; 1967 int pipe; 1968 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1969 u32 dig_hotplug_reg; 1970 1971 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1972 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1973 1974 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 1975 1976 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1977 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1978 SDE_AUDIO_POWER_SHIFT); 1979 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1980 port_name(port)); 1981 } 1982 1983 if (pch_iir & SDE_AUX_MASK) 1984 dp_aux_irq_handler(dev); 1985 1986 if (pch_iir & SDE_GMBUS) 1987 gmbus_irq_handler(dev); 1988 1989 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1990 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1991 1992 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1993 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1994 1995 if (pch_iir & SDE_POISON) 1996 DRM_ERROR("PCH poison interrupt\n"); 1997 1998 if (pch_iir & SDE_FDI_MASK) 1999 for_each_pipe(dev_priv, pipe) 2000 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2001 pipe_name(pipe), 2002 I915_READ(FDI_RX_IIR(pipe))); 2003 2004 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2005 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2006 2007 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2008 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2009 2010 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2011 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2012 2013 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2014 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2015 } 2016 2017 static void ivb_err_int_handler(struct drm_device *dev) 2018 { 2019 struct drm_i915_private *dev_priv = dev->dev_private; 2020 u32 err_int = I915_READ(GEN7_ERR_INT); 2021 enum pipe pipe; 2022 2023 if (err_int & ERR_INT_POISON) 2024 DRM_ERROR("Poison interrupt\n"); 2025 2026 for_each_pipe(dev_priv, pipe) { 2027 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2028 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2029 2030 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2031 if (IS_IVYBRIDGE(dev)) 2032 ivb_pipe_crc_irq_handler(dev, pipe); 2033 else 2034 hsw_pipe_crc_irq_handler(dev, pipe); 2035 } 2036 } 2037 2038 I915_WRITE(GEN7_ERR_INT, err_int); 2039 } 2040 2041 static void cpt_serr_int_handler(struct drm_device *dev) 2042 { 2043 struct drm_i915_private *dev_priv = dev->dev_private; 2044 u32 serr_int = I915_READ(SERR_INT); 2045 2046 if (serr_int & SERR_INT_POISON) 2047 DRM_ERROR("PCH poison interrupt\n"); 2048 2049 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2050 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2051 2052 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2053 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2054 2055 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2056 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2057 2058 I915_WRITE(SERR_INT, serr_int); 2059 } 2060 2061 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2062 { 2063 struct drm_i915_private *dev_priv = dev->dev_private; 2064 int pipe; 2065 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2066 u32 dig_hotplug_reg; 2067 2068 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2069 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2070 2071 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 2072 2073 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2074 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2075 SDE_AUDIO_POWER_SHIFT_CPT); 2076 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2077 port_name(port)); 2078 } 2079 2080 if (pch_iir & SDE_AUX_MASK_CPT) 2081 dp_aux_irq_handler(dev); 2082 2083 if (pch_iir & SDE_GMBUS_CPT) 2084 gmbus_irq_handler(dev); 2085 2086 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2087 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2088 2089 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2090 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2091 2092 if (pch_iir & SDE_FDI_MASK_CPT) 2093 for_each_pipe(dev_priv, pipe) 2094 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2095 pipe_name(pipe), 2096 I915_READ(FDI_RX_IIR(pipe))); 2097 2098 if (pch_iir & SDE_ERROR_CPT) 2099 cpt_serr_int_handler(dev); 2100 } 2101 2102 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2103 { 2104 struct drm_i915_private *dev_priv = dev->dev_private; 2105 enum pipe pipe; 2106 2107 if (de_iir & DE_AUX_CHANNEL_A) 2108 dp_aux_irq_handler(dev); 2109 2110 if (de_iir & DE_GSE) 2111 intel_opregion_asle_intr(dev); 2112 2113 if (de_iir & DE_POISON) 2114 DRM_ERROR("Poison interrupt\n"); 2115 2116 for_each_pipe(dev_priv, pipe) { 2117 if (de_iir & DE_PIPE_VBLANK(pipe) && 2118 intel_pipe_handle_vblank(dev, pipe)) 2119 intel_check_page_flip(dev, pipe); 2120 2121 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2122 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2123 2124 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2125 i9xx_pipe_crc_irq_handler(dev, pipe); 2126 2127 /* plane/pipes map 1:1 on ilk+ */ 2128 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2129 intel_prepare_page_flip(dev, pipe); 2130 intel_finish_page_flip_plane(dev, pipe); 2131 } 2132 } 2133 2134 /* check event from PCH */ 2135 if (de_iir & DE_PCH_EVENT) { 2136 u32 pch_iir = I915_READ(SDEIIR); 2137 2138 if (HAS_PCH_CPT(dev)) 2139 cpt_irq_handler(dev, pch_iir); 2140 else 2141 ibx_irq_handler(dev, pch_iir); 2142 2143 /* should clear PCH hotplug event before clear CPU irq */ 2144 I915_WRITE(SDEIIR, pch_iir); 2145 } 2146 2147 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2148 ironlake_rps_change_irq_handler(dev); 2149 } 2150 2151 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2152 { 2153 struct drm_i915_private *dev_priv = dev->dev_private; 2154 enum pipe pipe; 2155 2156 if (de_iir & DE_ERR_INT_IVB) 2157 ivb_err_int_handler(dev); 2158 2159 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2160 dp_aux_irq_handler(dev); 2161 2162 if (de_iir & DE_GSE_IVB) 2163 intel_opregion_asle_intr(dev); 2164 2165 for_each_pipe(dev_priv, pipe) { 2166 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2167 intel_pipe_handle_vblank(dev, pipe)) 2168 intel_check_page_flip(dev, pipe); 2169 2170 /* plane/pipes map 1:1 on ilk+ */ 2171 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2172 intel_prepare_page_flip(dev, pipe); 2173 intel_finish_page_flip_plane(dev, pipe); 2174 } 2175 } 2176 2177 /* check event from PCH */ 2178 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2179 u32 pch_iir = I915_READ(SDEIIR); 2180 2181 cpt_irq_handler(dev, pch_iir); 2182 2183 /* clear PCH hotplug event before clear CPU irq */ 2184 I915_WRITE(SDEIIR, pch_iir); 2185 } 2186 } 2187 2188 /* 2189 * To handle irqs with the minimum potential races with fresh interrupts, we: 2190 * 1 - Disable Master Interrupt Control. 2191 * 2 - Find the source(s) of the interrupt. 2192 * 3 - Clear the Interrupt Identity bits (IIR). 2193 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2194 * 5 - Re-enable Master Interrupt Control. 2195 */ 2196 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2197 { 2198 struct drm_device *dev = arg; 2199 struct drm_i915_private *dev_priv = dev->dev_private; 2200 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2201 irqreturn_t ret = IRQ_NONE; 2202 2203 /* We get interrupts on unclaimed registers, so check for this before we 2204 * do any I915_{READ,WRITE}. */ 2205 intel_uncore_check_errors(dev); 2206 2207 /* disable master interrupt before clearing iir */ 2208 de_ier = I915_READ(DEIER); 2209 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2210 POSTING_READ(DEIER); 2211 2212 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2213 * interrupts will will be stored on its back queue, and then we'll be 2214 * able to process them after we restore SDEIER (as soon as we restore 2215 * it, we'll get an interrupt if SDEIIR still has something to process 2216 * due to its back queue). */ 2217 if (!HAS_PCH_NOP(dev)) { 2218 sde_ier = I915_READ(SDEIER); 2219 I915_WRITE(SDEIER, 0); 2220 POSTING_READ(SDEIER); 2221 } 2222 2223 /* Find, clear, then process each source of interrupt */ 2224 2225 gt_iir = I915_READ(GTIIR); 2226 if (gt_iir) { 2227 I915_WRITE(GTIIR, gt_iir); 2228 ret = IRQ_HANDLED; 2229 if (INTEL_INFO(dev)->gen >= 6) 2230 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2231 else 2232 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2233 } 2234 2235 de_iir = I915_READ(DEIIR); 2236 if (de_iir) { 2237 I915_WRITE(DEIIR, de_iir); 2238 ret = IRQ_HANDLED; 2239 if (INTEL_INFO(dev)->gen >= 7) 2240 ivb_display_irq_handler(dev, de_iir); 2241 else 2242 ilk_display_irq_handler(dev, de_iir); 2243 } 2244 2245 if (INTEL_INFO(dev)->gen >= 6) { 2246 u32 pm_iir = I915_READ(GEN6_PMIIR); 2247 if (pm_iir) { 2248 I915_WRITE(GEN6_PMIIR, pm_iir); 2249 ret = IRQ_HANDLED; 2250 gen6_rps_irq_handler(dev_priv, pm_iir); 2251 } 2252 } 2253 2254 I915_WRITE(DEIER, de_ier); 2255 POSTING_READ(DEIER); 2256 if (!HAS_PCH_NOP(dev)) { 2257 I915_WRITE(SDEIER, sde_ier); 2258 POSTING_READ(SDEIER); 2259 } 2260 2261 return ret; 2262 } 2263 2264 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2265 { 2266 struct drm_device *dev = arg; 2267 struct drm_i915_private *dev_priv = dev->dev_private; 2268 u32 master_ctl; 2269 irqreturn_t ret = IRQ_NONE; 2270 uint32_t tmp = 0; 2271 enum pipe pipe; 2272 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2273 2274 if (IS_GEN9(dev)) 2275 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2276 GEN9_AUX_CHANNEL_D; 2277 2278 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2279 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2280 if (!master_ctl) 2281 return IRQ_NONE; 2282 2283 I915_WRITE(GEN8_MASTER_IRQ, 0); 2284 POSTING_READ(GEN8_MASTER_IRQ); 2285 2286 /* Find, clear, then process each source of interrupt */ 2287 2288 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2289 2290 if (master_ctl & GEN8_DE_MISC_IRQ) { 2291 tmp = I915_READ(GEN8_DE_MISC_IIR); 2292 if (tmp) { 2293 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2294 ret = IRQ_HANDLED; 2295 if (tmp & GEN8_DE_MISC_GSE) 2296 intel_opregion_asle_intr(dev); 2297 else 2298 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2299 } 2300 else 2301 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2302 } 2303 2304 if (master_ctl & GEN8_DE_PORT_IRQ) { 2305 tmp = I915_READ(GEN8_DE_PORT_IIR); 2306 if (tmp) { 2307 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2308 ret = IRQ_HANDLED; 2309 2310 if (tmp & aux_mask) 2311 dp_aux_irq_handler(dev); 2312 else 2313 DRM_ERROR("Unexpected DE Port interrupt\n"); 2314 } 2315 else 2316 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2317 } 2318 2319 for_each_pipe(dev_priv, pipe) { 2320 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2321 2322 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2323 continue; 2324 2325 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2326 if (pipe_iir) { 2327 ret = IRQ_HANDLED; 2328 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2329 2330 if (pipe_iir & GEN8_PIPE_VBLANK && 2331 intel_pipe_handle_vblank(dev, pipe)) 2332 intel_check_page_flip(dev, pipe); 2333 2334 if (IS_GEN9(dev)) 2335 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2336 else 2337 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2338 2339 if (flip_done) { 2340 intel_prepare_page_flip(dev, pipe); 2341 intel_finish_page_flip_plane(dev, pipe); 2342 } 2343 2344 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2345 hsw_pipe_crc_irq_handler(dev, pipe); 2346 2347 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2348 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2349 pipe); 2350 2351 2352 if (IS_GEN9(dev)) 2353 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2354 else 2355 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2356 2357 if (fault_errors) 2358 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2359 pipe_name(pipe), 2360 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2361 } else 2362 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2363 } 2364 2365 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2366 /* 2367 * FIXME(BDW): Assume for now that the new interrupt handling 2368 * scheme also closed the SDE interrupt handling race we've seen 2369 * on older pch-split platforms. But this needs testing. 2370 */ 2371 u32 pch_iir = I915_READ(SDEIIR); 2372 if (pch_iir) { 2373 I915_WRITE(SDEIIR, pch_iir); 2374 ret = IRQ_HANDLED; 2375 cpt_irq_handler(dev, pch_iir); 2376 } else 2377 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2378 2379 } 2380 2381 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2382 POSTING_READ(GEN8_MASTER_IRQ); 2383 2384 return ret; 2385 } 2386 2387 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2388 bool reset_completed) 2389 { 2390 struct intel_engine_cs *ring; 2391 int i; 2392 2393 /* 2394 * Notify all waiters for GPU completion events that reset state has 2395 * been changed, and that they need to restart their wait after 2396 * checking for potential errors (and bail out to drop locks if there is 2397 * a gpu reset pending so that i915_error_work_func can acquire them). 2398 */ 2399 2400 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2401 for_each_ring(ring, dev_priv, i) 2402 wake_up_all(&ring->irq_queue); 2403 2404 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2405 wake_up_all(&dev_priv->pending_flip_queue); 2406 2407 /* 2408 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2409 * reset state is cleared. 2410 */ 2411 if (reset_completed) 2412 wake_up_all(&dev_priv->gpu_error.reset_queue); 2413 } 2414 2415 /** 2416 * i915_error_work_func - do process context error handling work 2417 * @work: work struct 2418 * 2419 * Fire an error uevent so userspace can see that a hang or error 2420 * was detected. 2421 */ 2422 static void i915_error_work_func(struct work_struct *work) 2423 { 2424 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2425 work); 2426 struct drm_i915_private *dev_priv = 2427 container_of(error, struct drm_i915_private, gpu_error); 2428 struct drm_device *dev = dev_priv->dev; 2429 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2430 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2431 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2432 int ret; 2433 2434 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2435 2436 /* 2437 * Note that there's only one work item which does gpu resets, so we 2438 * need not worry about concurrent gpu resets potentially incrementing 2439 * error->reset_counter twice. We only need to take care of another 2440 * racing irq/hangcheck declaring the gpu dead for a second time. A 2441 * quick check for that is good enough: schedule_work ensures the 2442 * correct ordering between hang detection and this work item, and since 2443 * the reset in-progress bit is only ever set by code outside of this 2444 * work we don't need to worry about any other races. 2445 */ 2446 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2447 DRM_DEBUG_DRIVER("resetting chip\n"); 2448 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2449 reset_event); 2450 2451 /* 2452 * In most cases it's guaranteed that we get here with an RPM 2453 * reference held, for example because there is a pending GPU 2454 * request that won't finish until the reset is done. This 2455 * isn't the case at least when we get here by doing a 2456 * simulated reset via debugs, so get an RPM reference. 2457 */ 2458 intel_runtime_pm_get(dev_priv); 2459 2460 intel_prepare_reset(dev); 2461 2462 /* 2463 * All state reset _must_ be completed before we update the 2464 * reset counter, for otherwise waiters might miss the reset 2465 * pending state and not properly drop locks, resulting in 2466 * deadlocks with the reset work. 2467 */ 2468 ret = i915_reset(dev); 2469 2470 intel_finish_reset(dev); 2471 2472 intel_runtime_pm_put(dev_priv); 2473 2474 if (ret == 0) { 2475 /* 2476 * After all the gem state is reset, increment the reset 2477 * counter and wake up everyone waiting for the reset to 2478 * complete. 2479 * 2480 * Since unlock operations are a one-sided barrier only, 2481 * we need to insert a barrier here to order any seqno 2482 * updates before 2483 * the counter increment. 2484 */ 2485 smp_mb__before_atomic(); 2486 atomic_inc(&dev_priv->gpu_error.reset_counter); 2487 2488 kobject_uevent_env(&dev->primary->kdev->kobj, 2489 KOBJ_CHANGE, reset_done_event); 2490 } else { 2491 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2492 } 2493 2494 /* 2495 * Note: The wake_up also serves as a memory barrier so that 2496 * waiters see the update value of the reset counter atomic_t. 2497 */ 2498 i915_error_wake_up(dev_priv, true); 2499 } 2500 } 2501 2502 static void i915_report_and_clear_eir(struct drm_device *dev) 2503 { 2504 struct drm_i915_private *dev_priv = dev->dev_private; 2505 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2506 u32 eir = I915_READ(EIR); 2507 int pipe, i; 2508 2509 if (!eir) 2510 return; 2511 2512 pr_err("render error detected, EIR: 0x%08x\n", eir); 2513 2514 i915_get_extra_instdone(dev, instdone); 2515 2516 if (IS_G4X(dev)) { 2517 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2518 u32 ipeir = I915_READ(IPEIR_I965); 2519 2520 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2521 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2522 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2523 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2524 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2525 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2526 I915_WRITE(IPEIR_I965, ipeir); 2527 POSTING_READ(IPEIR_I965); 2528 } 2529 if (eir & GM45_ERROR_PAGE_TABLE) { 2530 u32 pgtbl_err = I915_READ(PGTBL_ER); 2531 pr_err("page table error\n"); 2532 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2533 I915_WRITE(PGTBL_ER, pgtbl_err); 2534 POSTING_READ(PGTBL_ER); 2535 } 2536 } 2537 2538 if (!IS_GEN2(dev)) { 2539 if (eir & I915_ERROR_PAGE_TABLE) { 2540 u32 pgtbl_err = I915_READ(PGTBL_ER); 2541 pr_err("page table error\n"); 2542 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2543 I915_WRITE(PGTBL_ER, pgtbl_err); 2544 POSTING_READ(PGTBL_ER); 2545 } 2546 } 2547 2548 if (eir & I915_ERROR_MEMORY_REFRESH) { 2549 pr_err("memory refresh error:\n"); 2550 for_each_pipe(dev_priv, pipe) 2551 pr_err("pipe %c stat: 0x%08x\n", 2552 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2553 /* pipestat has already been acked */ 2554 } 2555 if (eir & I915_ERROR_INSTRUCTION) { 2556 pr_err("instruction error\n"); 2557 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2558 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2559 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2560 if (INTEL_INFO(dev)->gen < 4) { 2561 u32 ipeir = I915_READ(IPEIR); 2562 2563 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2564 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2565 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2566 I915_WRITE(IPEIR, ipeir); 2567 POSTING_READ(IPEIR); 2568 } else { 2569 u32 ipeir = I915_READ(IPEIR_I965); 2570 2571 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2572 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2573 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2574 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2575 I915_WRITE(IPEIR_I965, ipeir); 2576 POSTING_READ(IPEIR_I965); 2577 } 2578 } 2579 2580 I915_WRITE(EIR, eir); 2581 POSTING_READ(EIR); 2582 eir = I915_READ(EIR); 2583 if (eir) { 2584 /* 2585 * some errors might have become stuck, 2586 * mask them. 2587 */ 2588 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2589 I915_WRITE(EMR, I915_READ(EMR) | eir); 2590 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2591 } 2592 } 2593 2594 /** 2595 * i915_handle_error - handle an error interrupt 2596 * @dev: drm device 2597 * 2598 * Do some basic checking of regsiter state at error interrupt time and 2599 * dump it to the syslog. Also call i915_capture_error_state() to make 2600 * sure we get a record and make it available in debugfs. Fire a uevent 2601 * so userspace knows something bad happened (should trigger collection 2602 * of a ring dump etc.). 2603 */ 2604 void i915_handle_error(struct drm_device *dev, bool wedged, 2605 const char *fmt, ...) 2606 { 2607 struct drm_i915_private *dev_priv = dev->dev_private; 2608 va_list args; 2609 char error_msg[80]; 2610 2611 va_start(args, fmt); 2612 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2613 va_end(args); 2614 2615 i915_capture_error_state(dev, wedged, error_msg); 2616 i915_report_and_clear_eir(dev); 2617 2618 if (wedged) { 2619 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2620 &dev_priv->gpu_error.reset_counter); 2621 2622 /* 2623 * Wakeup waiting processes so that the reset work function 2624 * i915_error_work_func doesn't deadlock trying to grab various 2625 * locks. By bumping the reset counter first, the woken 2626 * processes will see a reset in progress and back off, 2627 * releasing their locks and then wait for the reset completion. 2628 * We must do this for _all_ gpu waiters that might hold locks 2629 * that the reset work needs to acquire. 2630 * 2631 * Note: The wake_up serves as the required memory barrier to 2632 * ensure that the waiters see the updated value of the reset 2633 * counter atomic_t. 2634 */ 2635 i915_error_wake_up(dev_priv, false); 2636 } 2637 2638 /* 2639 * Our reset work can grab modeset locks (since it needs to reset the 2640 * state of outstanding pagelips). Hence it must not be run on our own 2641 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2642 * code will deadlock. 2643 */ 2644 schedule_work(&dev_priv->gpu_error.work); 2645 } 2646 2647 /* Called from drm generic code, passed 'crtc' which 2648 * we use as a pipe index 2649 */ 2650 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2651 { 2652 struct drm_i915_private *dev_priv = dev->dev_private; 2653 unsigned long irqflags; 2654 2655 if (!i915_pipe_enabled(dev, pipe)) 2656 return -EINVAL; 2657 2658 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2659 if (INTEL_INFO(dev)->gen >= 4) 2660 i915_enable_pipestat(dev_priv, pipe, 2661 PIPE_START_VBLANK_INTERRUPT_STATUS); 2662 else 2663 i915_enable_pipestat(dev_priv, pipe, 2664 PIPE_VBLANK_INTERRUPT_STATUS); 2665 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2666 2667 return 0; 2668 } 2669 2670 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2671 { 2672 struct drm_i915_private *dev_priv = dev->dev_private; 2673 unsigned long irqflags; 2674 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2675 DE_PIPE_VBLANK(pipe); 2676 2677 if (!i915_pipe_enabled(dev, pipe)) 2678 return -EINVAL; 2679 2680 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2681 ironlake_enable_display_irq(dev_priv, bit); 2682 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2683 2684 return 0; 2685 } 2686 2687 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2688 { 2689 struct drm_i915_private *dev_priv = dev->dev_private; 2690 unsigned long irqflags; 2691 2692 if (!i915_pipe_enabled(dev, pipe)) 2693 return -EINVAL; 2694 2695 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2696 i915_enable_pipestat(dev_priv, pipe, 2697 PIPE_START_VBLANK_INTERRUPT_STATUS); 2698 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2699 2700 return 0; 2701 } 2702 2703 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2704 { 2705 struct drm_i915_private *dev_priv = dev->dev_private; 2706 unsigned long irqflags; 2707 2708 if (!i915_pipe_enabled(dev, pipe)) 2709 return -EINVAL; 2710 2711 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2712 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2713 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2714 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2715 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2716 return 0; 2717 } 2718 2719 /* Called from drm generic code, passed 'crtc' which 2720 * we use as a pipe index 2721 */ 2722 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2723 { 2724 struct drm_i915_private *dev_priv = dev->dev_private; 2725 unsigned long irqflags; 2726 2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2728 i915_disable_pipestat(dev_priv, pipe, 2729 PIPE_VBLANK_INTERRUPT_STATUS | 2730 PIPE_START_VBLANK_INTERRUPT_STATUS); 2731 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2732 } 2733 2734 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2735 { 2736 struct drm_i915_private *dev_priv = dev->dev_private; 2737 unsigned long irqflags; 2738 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2739 DE_PIPE_VBLANK(pipe); 2740 2741 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2742 ironlake_disable_display_irq(dev_priv, bit); 2743 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2744 } 2745 2746 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2747 { 2748 struct drm_i915_private *dev_priv = dev->dev_private; 2749 unsigned long irqflags; 2750 2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2752 i915_disable_pipestat(dev_priv, pipe, 2753 PIPE_START_VBLANK_INTERRUPT_STATUS); 2754 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2755 } 2756 2757 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2758 { 2759 struct drm_i915_private *dev_priv = dev->dev_private; 2760 unsigned long irqflags; 2761 2762 if (!i915_pipe_enabled(dev, pipe)) 2763 return; 2764 2765 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2766 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2767 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2768 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2769 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2770 } 2771 2772 static u32 2773 ring_last_seqno(struct intel_engine_cs *ring) 2774 { 2775 return list_entry(ring->request_list.prev, 2776 struct drm_i915_gem_request, list)->seqno; 2777 } 2778 2779 static bool 2780 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2781 { 2782 return (list_empty(&ring->request_list) || 2783 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2784 } 2785 2786 static bool 2787 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2788 { 2789 if (INTEL_INFO(dev)->gen >= 8) { 2790 return (ipehr >> 23) == 0x1c; 2791 } else { 2792 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2793 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2794 MI_SEMAPHORE_REGISTER); 2795 } 2796 } 2797 2798 static struct intel_engine_cs * 2799 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2800 { 2801 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2802 struct intel_engine_cs *signaller; 2803 int i; 2804 2805 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2806 for_each_ring(signaller, dev_priv, i) { 2807 if (ring == signaller) 2808 continue; 2809 2810 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2811 return signaller; 2812 } 2813 } else { 2814 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2815 2816 for_each_ring(signaller, dev_priv, i) { 2817 if(ring == signaller) 2818 continue; 2819 2820 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2821 return signaller; 2822 } 2823 } 2824 2825 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2826 ring->id, ipehr, offset); 2827 2828 return NULL; 2829 } 2830 2831 static struct intel_engine_cs * 2832 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2833 { 2834 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2835 u32 cmd, ipehr, head; 2836 u64 offset = 0; 2837 int i, backwards; 2838 2839 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2840 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2841 return NULL; 2842 2843 /* 2844 * HEAD is likely pointing to the dword after the actual command, 2845 * so scan backwards until we find the MBOX. But limit it to just 3 2846 * or 4 dwords depending on the semaphore wait command size. 2847 * Note that we don't care about ACTHD here since that might 2848 * point at at batch, and semaphores are always emitted into the 2849 * ringbuffer itself. 2850 */ 2851 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2852 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2853 2854 for (i = backwards; i; --i) { 2855 /* 2856 * Be paranoid and presume the hw has gone off into the wild - 2857 * our ring is smaller than what the hardware (and hence 2858 * HEAD_ADDR) allows. Also handles wrap-around. 2859 */ 2860 head &= ring->buffer->size - 1; 2861 2862 /* This here seems to blow up */ 2863 cmd = ioread32(ring->buffer->virtual_start + head); 2864 if (cmd == ipehr) 2865 break; 2866 2867 head -= 4; 2868 } 2869 2870 if (!i) 2871 return NULL; 2872 2873 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2874 if (INTEL_INFO(ring->dev)->gen >= 8) { 2875 offset = ioread32(ring->buffer->virtual_start + head + 12); 2876 offset <<= 32; 2877 offset = ioread32(ring->buffer->virtual_start + head + 8); 2878 } 2879 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2880 } 2881 2882 static int semaphore_passed(struct intel_engine_cs *ring) 2883 { 2884 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2885 struct intel_engine_cs *signaller; 2886 u32 seqno; 2887 2888 ring->hangcheck.deadlock++; 2889 2890 signaller = semaphore_waits_for(ring, &seqno); 2891 if (signaller == NULL) 2892 return -1; 2893 2894 /* Prevent pathological recursion due to driver bugs */ 2895 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2896 return -1; 2897 2898 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2899 return 1; 2900 2901 /* cursory check for an unkickable deadlock */ 2902 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2903 semaphore_passed(signaller) < 0) 2904 return -1; 2905 2906 return 0; 2907 } 2908 2909 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2910 { 2911 struct intel_engine_cs *ring; 2912 int i; 2913 2914 for_each_ring(ring, dev_priv, i) 2915 ring->hangcheck.deadlock = 0; 2916 } 2917 2918 static enum intel_ring_hangcheck_action 2919 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2920 { 2921 struct drm_device *dev = ring->dev; 2922 struct drm_i915_private *dev_priv = dev->dev_private; 2923 u32 tmp; 2924 2925 if (acthd != ring->hangcheck.acthd) { 2926 if (acthd > ring->hangcheck.max_acthd) { 2927 ring->hangcheck.max_acthd = acthd; 2928 return HANGCHECK_ACTIVE; 2929 } 2930 2931 return HANGCHECK_ACTIVE_LOOP; 2932 } 2933 2934 if (IS_GEN2(dev)) 2935 return HANGCHECK_HUNG; 2936 2937 /* Is the chip hanging on a WAIT_FOR_EVENT? 2938 * If so we can simply poke the RB_WAIT bit 2939 * and break the hang. This should work on 2940 * all but the second generation chipsets. 2941 */ 2942 tmp = I915_READ_CTL(ring); 2943 if (tmp & RING_WAIT) { 2944 i915_handle_error(dev, false, 2945 "Kicking stuck wait on %s", 2946 ring->name); 2947 I915_WRITE_CTL(ring, tmp); 2948 return HANGCHECK_KICK; 2949 } 2950 2951 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2952 switch (semaphore_passed(ring)) { 2953 default: 2954 return HANGCHECK_HUNG; 2955 case 1: 2956 i915_handle_error(dev, false, 2957 "Kicking stuck semaphore on %s", 2958 ring->name); 2959 I915_WRITE_CTL(ring, tmp); 2960 return HANGCHECK_KICK; 2961 case 0: 2962 return HANGCHECK_WAIT; 2963 } 2964 } 2965 2966 return HANGCHECK_HUNG; 2967 } 2968 2969 /** 2970 * This is called when the chip hasn't reported back with completed 2971 * batchbuffers in a long time. We keep track per ring seqno progress and 2972 * if there are no progress, hangcheck score for that ring is increased. 2973 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2974 * we kick the ring. If we see no progress on three subsequent calls 2975 * we assume chip is wedged and try to fix it by resetting the chip. 2976 */ 2977 static void i915_hangcheck_elapsed(unsigned long data) 2978 { 2979 struct drm_device *dev = (struct drm_device *)data; 2980 struct drm_i915_private *dev_priv = dev->dev_private; 2981 struct intel_engine_cs *ring; 2982 int i; 2983 int busy_count = 0, rings_hung = 0; 2984 bool stuck[I915_NUM_RINGS] = { 0 }; 2985 #define BUSY 1 2986 #define KICK 5 2987 #define HUNG 20 2988 2989 if (!i915.enable_hangcheck) 2990 return; 2991 2992 for_each_ring(ring, dev_priv, i) { 2993 u64 acthd; 2994 u32 seqno; 2995 bool busy = true; 2996 2997 semaphore_clear_deadlocks(dev_priv); 2998 2999 seqno = ring->get_seqno(ring, false); 3000 acthd = intel_ring_get_active_head(ring); 3001 3002 if (ring->hangcheck.seqno == seqno) { 3003 if (ring_idle(ring, seqno)) { 3004 ring->hangcheck.action = HANGCHECK_IDLE; 3005 3006 if (waitqueue_active(&ring->irq_queue)) { 3007 /* Issue a wake-up to catch stuck h/w. */ 3008 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 3009 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 3010 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3011 ring->name); 3012 else 3013 DRM_INFO("Fake missed irq on %s\n", 3014 ring->name); 3015 wake_up_all(&ring->irq_queue); 3016 } 3017 /* Safeguard against driver failure */ 3018 ring->hangcheck.score += BUSY; 3019 } else 3020 busy = false; 3021 } else { 3022 /* We always increment the hangcheck score 3023 * if the ring is busy and still processing 3024 * the same request, so that no single request 3025 * can run indefinitely (such as a chain of 3026 * batches). The only time we do not increment 3027 * the hangcheck score on this ring, if this 3028 * ring is in a legitimate wait for another 3029 * ring. In that case the waiting ring is a 3030 * victim and we want to be sure we catch the 3031 * right culprit. Then every time we do kick 3032 * the ring, add a small increment to the 3033 * score so that we can catch a batch that is 3034 * being repeatedly kicked and so responsible 3035 * for stalling the machine. 3036 */ 3037 ring->hangcheck.action = ring_stuck(ring, 3038 acthd); 3039 3040 switch (ring->hangcheck.action) { 3041 case HANGCHECK_IDLE: 3042 case HANGCHECK_WAIT: 3043 case HANGCHECK_ACTIVE: 3044 break; 3045 case HANGCHECK_ACTIVE_LOOP: 3046 ring->hangcheck.score += BUSY; 3047 break; 3048 case HANGCHECK_KICK: 3049 ring->hangcheck.score += KICK; 3050 break; 3051 case HANGCHECK_HUNG: 3052 ring->hangcheck.score += HUNG; 3053 stuck[i] = true; 3054 break; 3055 } 3056 } 3057 } else { 3058 ring->hangcheck.action = HANGCHECK_ACTIVE; 3059 3060 /* Gradually reduce the count so that we catch DoS 3061 * attempts across multiple batches. 3062 */ 3063 if (ring->hangcheck.score > 0) 3064 ring->hangcheck.score--; 3065 3066 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3067 } 3068 3069 ring->hangcheck.seqno = seqno; 3070 ring->hangcheck.acthd = acthd; 3071 busy_count += busy; 3072 } 3073 3074 for_each_ring(ring, dev_priv, i) { 3075 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3076 DRM_INFO("%s on %s\n", 3077 stuck[i] ? "stuck" : "no progress", 3078 ring->name); 3079 rings_hung++; 3080 } 3081 } 3082 3083 if (rings_hung) 3084 return i915_handle_error(dev, true, "Ring hung"); 3085 3086 if (busy_count) 3087 /* Reset timer case chip hangs without another request 3088 * being added */ 3089 i915_queue_hangcheck(dev); 3090 } 3091 3092 void i915_queue_hangcheck(struct drm_device *dev) 3093 { 3094 struct drm_i915_private *dev_priv = dev->dev_private; 3095 struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer; 3096 3097 if (!i915.enable_hangcheck) 3098 return; 3099 3100 /* Don't continually defer the hangcheck, but make sure it is active */ 3101 if (timer_pending(timer)) 3102 return; 3103 mod_timer(timer, 3104 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3105 } 3106 3107 static void ibx_irq_reset(struct drm_device *dev) 3108 { 3109 struct drm_i915_private *dev_priv = dev->dev_private; 3110 3111 if (HAS_PCH_NOP(dev)) 3112 return; 3113 3114 GEN5_IRQ_RESET(SDE); 3115 3116 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3117 I915_WRITE(SERR_INT, 0xffffffff); 3118 } 3119 3120 /* 3121 * SDEIER is also touched by the interrupt handler to work around missed PCH 3122 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3123 * instead we unconditionally enable all PCH interrupt sources here, but then 3124 * only unmask them as needed with SDEIMR. 3125 * 3126 * This function needs to be called before interrupts are enabled. 3127 */ 3128 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3129 { 3130 struct drm_i915_private *dev_priv = dev->dev_private; 3131 3132 if (HAS_PCH_NOP(dev)) 3133 return; 3134 3135 WARN_ON(I915_READ(SDEIER) != 0); 3136 I915_WRITE(SDEIER, 0xffffffff); 3137 POSTING_READ(SDEIER); 3138 } 3139 3140 static void gen5_gt_irq_reset(struct drm_device *dev) 3141 { 3142 struct drm_i915_private *dev_priv = dev->dev_private; 3143 3144 GEN5_IRQ_RESET(GT); 3145 if (INTEL_INFO(dev)->gen >= 6) 3146 GEN5_IRQ_RESET(GEN6_PM); 3147 } 3148 3149 /* drm_dma.h hooks 3150 */ 3151 static void ironlake_irq_reset(struct drm_device *dev) 3152 { 3153 struct drm_i915_private *dev_priv = dev->dev_private; 3154 3155 I915_WRITE(HWSTAM, 0xffffffff); 3156 3157 GEN5_IRQ_RESET(DE); 3158 if (IS_GEN7(dev)) 3159 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3160 3161 gen5_gt_irq_reset(dev); 3162 3163 ibx_irq_reset(dev); 3164 } 3165 3166 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3167 { 3168 enum pipe pipe; 3169 3170 I915_WRITE(PORT_HOTPLUG_EN, 0); 3171 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3172 3173 for_each_pipe(dev_priv, pipe) 3174 I915_WRITE(PIPESTAT(pipe), 0xffff); 3175 3176 GEN5_IRQ_RESET(VLV_); 3177 } 3178 3179 static void valleyview_irq_preinstall(struct drm_device *dev) 3180 { 3181 struct drm_i915_private *dev_priv = dev->dev_private; 3182 3183 /* VLV magic */ 3184 I915_WRITE(VLV_IMR, 0); 3185 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3186 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3187 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3188 3189 gen5_gt_irq_reset(dev); 3190 3191 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3192 3193 vlv_display_irq_reset(dev_priv); 3194 } 3195 3196 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3197 { 3198 GEN8_IRQ_RESET_NDX(GT, 0); 3199 GEN8_IRQ_RESET_NDX(GT, 1); 3200 GEN8_IRQ_RESET_NDX(GT, 2); 3201 GEN8_IRQ_RESET_NDX(GT, 3); 3202 } 3203 3204 static void gen8_irq_reset(struct drm_device *dev) 3205 { 3206 struct drm_i915_private *dev_priv = dev->dev_private; 3207 int pipe; 3208 3209 I915_WRITE(GEN8_MASTER_IRQ, 0); 3210 POSTING_READ(GEN8_MASTER_IRQ); 3211 3212 gen8_gt_irq_reset(dev_priv); 3213 3214 for_each_pipe(dev_priv, pipe) 3215 if (intel_display_power_is_enabled(dev_priv, 3216 POWER_DOMAIN_PIPE(pipe))) 3217 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3218 3219 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3220 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3221 GEN5_IRQ_RESET(GEN8_PCU_); 3222 3223 ibx_irq_reset(dev); 3224 } 3225 3226 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3227 { 3228 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3229 3230 spin_lock_irq(&dev_priv->irq_lock); 3231 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3232 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3233 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3234 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3235 spin_unlock_irq(&dev_priv->irq_lock); 3236 } 3237 3238 static void cherryview_irq_preinstall(struct drm_device *dev) 3239 { 3240 struct drm_i915_private *dev_priv = dev->dev_private; 3241 3242 I915_WRITE(GEN8_MASTER_IRQ, 0); 3243 POSTING_READ(GEN8_MASTER_IRQ); 3244 3245 gen8_gt_irq_reset(dev_priv); 3246 3247 GEN5_IRQ_RESET(GEN8_PCU_); 3248 3249 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3250 3251 vlv_display_irq_reset(dev_priv); 3252 } 3253 3254 static void ibx_hpd_irq_setup(struct drm_device *dev) 3255 { 3256 struct drm_i915_private *dev_priv = dev->dev_private; 3257 struct intel_encoder *intel_encoder; 3258 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3259 3260 if (HAS_PCH_IBX(dev)) { 3261 hotplug_irqs = SDE_HOTPLUG_MASK; 3262 for_each_intel_encoder(dev, intel_encoder) 3263 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3264 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3265 } else { 3266 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3267 for_each_intel_encoder(dev, intel_encoder) 3268 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3269 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3270 } 3271 3272 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3273 3274 /* 3275 * Enable digital hotplug on the PCH, and configure the DP short pulse 3276 * duration to 2ms (which is the minimum in the Display Port spec) 3277 * 3278 * This register is the same on all known PCH chips. 3279 */ 3280 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3281 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3282 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3283 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3284 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3285 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3286 } 3287 3288 static void ibx_irq_postinstall(struct drm_device *dev) 3289 { 3290 struct drm_i915_private *dev_priv = dev->dev_private; 3291 u32 mask; 3292 3293 if (HAS_PCH_NOP(dev)) 3294 return; 3295 3296 if (HAS_PCH_IBX(dev)) 3297 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3298 else 3299 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3300 3301 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3302 I915_WRITE(SDEIMR, ~mask); 3303 } 3304 3305 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3306 { 3307 struct drm_i915_private *dev_priv = dev->dev_private; 3308 u32 pm_irqs, gt_irqs; 3309 3310 pm_irqs = gt_irqs = 0; 3311 3312 dev_priv->gt_irq_mask = ~0; 3313 if (HAS_L3_DPF(dev)) { 3314 /* L3 parity interrupt is always unmasked. */ 3315 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3316 gt_irqs |= GT_PARITY_ERROR(dev); 3317 } 3318 3319 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3320 if (IS_GEN5(dev)) { 3321 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3322 ILK_BSD_USER_INTERRUPT; 3323 } else { 3324 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3325 } 3326 3327 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3328 3329 if (INTEL_INFO(dev)->gen >= 6) { 3330 /* 3331 * RPS interrupts will get enabled/disabled on demand when RPS 3332 * itself is enabled/disabled. 3333 */ 3334 if (HAS_VEBOX(dev)) 3335 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3336 3337 dev_priv->pm_irq_mask = 0xffffffff; 3338 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3339 } 3340 } 3341 3342 static int ironlake_irq_postinstall(struct drm_device *dev) 3343 { 3344 struct drm_i915_private *dev_priv = dev->dev_private; 3345 u32 display_mask, extra_mask; 3346 3347 if (INTEL_INFO(dev)->gen >= 7) { 3348 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3349 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3350 DE_PLANEB_FLIP_DONE_IVB | 3351 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3352 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3353 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3354 } else { 3355 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3356 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3357 DE_AUX_CHANNEL_A | 3358 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3359 DE_POISON); 3360 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3361 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3362 } 3363 3364 dev_priv->irq_mask = ~display_mask; 3365 3366 I915_WRITE(HWSTAM, 0xeffe); 3367 3368 ibx_irq_pre_postinstall(dev); 3369 3370 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3371 3372 gen5_gt_irq_postinstall(dev); 3373 3374 ibx_irq_postinstall(dev); 3375 3376 if (IS_IRONLAKE_M(dev)) { 3377 /* Enable PCU event interrupts 3378 * 3379 * spinlocking not required here for correctness since interrupt 3380 * setup is guaranteed to run in single-threaded context. But we 3381 * need it to make the assert_spin_locked happy. */ 3382 spin_lock_irq(&dev_priv->irq_lock); 3383 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3384 spin_unlock_irq(&dev_priv->irq_lock); 3385 } 3386 3387 return 0; 3388 } 3389 3390 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3391 { 3392 u32 pipestat_mask; 3393 u32 iir_mask; 3394 enum pipe pipe; 3395 3396 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3397 PIPE_FIFO_UNDERRUN_STATUS; 3398 3399 for_each_pipe(dev_priv, pipe) 3400 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3401 POSTING_READ(PIPESTAT(PIPE_A)); 3402 3403 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3404 PIPE_CRC_DONE_INTERRUPT_STATUS; 3405 3406 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3407 for_each_pipe(dev_priv, pipe) 3408 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3409 3410 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3411 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3412 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3413 if (IS_CHERRYVIEW(dev_priv)) 3414 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3415 dev_priv->irq_mask &= ~iir_mask; 3416 3417 I915_WRITE(VLV_IIR, iir_mask); 3418 I915_WRITE(VLV_IIR, iir_mask); 3419 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3420 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3421 POSTING_READ(VLV_IMR); 3422 } 3423 3424 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3425 { 3426 u32 pipestat_mask; 3427 u32 iir_mask; 3428 enum pipe pipe; 3429 3430 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3431 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3432 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3433 if (IS_CHERRYVIEW(dev_priv)) 3434 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3435 3436 dev_priv->irq_mask |= iir_mask; 3437 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3438 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3439 I915_WRITE(VLV_IIR, iir_mask); 3440 I915_WRITE(VLV_IIR, iir_mask); 3441 POSTING_READ(VLV_IIR); 3442 3443 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3444 PIPE_CRC_DONE_INTERRUPT_STATUS; 3445 3446 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3447 for_each_pipe(dev_priv, pipe) 3448 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3449 3450 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3451 PIPE_FIFO_UNDERRUN_STATUS; 3452 3453 for_each_pipe(dev_priv, pipe) 3454 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3455 POSTING_READ(PIPESTAT(PIPE_A)); 3456 } 3457 3458 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3459 { 3460 assert_spin_locked(&dev_priv->irq_lock); 3461 3462 if (dev_priv->display_irqs_enabled) 3463 return; 3464 3465 dev_priv->display_irqs_enabled = true; 3466 3467 if (intel_irqs_enabled(dev_priv)) 3468 valleyview_display_irqs_install(dev_priv); 3469 } 3470 3471 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3472 { 3473 assert_spin_locked(&dev_priv->irq_lock); 3474 3475 if (!dev_priv->display_irqs_enabled) 3476 return; 3477 3478 dev_priv->display_irqs_enabled = false; 3479 3480 if (intel_irqs_enabled(dev_priv)) 3481 valleyview_display_irqs_uninstall(dev_priv); 3482 } 3483 3484 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3485 { 3486 dev_priv->irq_mask = ~0; 3487 3488 I915_WRITE(PORT_HOTPLUG_EN, 0); 3489 POSTING_READ(PORT_HOTPLUG_EN); 3490 3491 I915_WRITE(VLV_IIR, 0xffffffff); 3492 I915_WRITE(VLV_IIR, 0xffffffff); 3493 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3494 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3495 POSTING_READ(VLV_IMR); 3496 3497 /* Interrupt setup is already guaranteed to be single-threaded, this is 3498 * just to make the assert_spin_locked check happy. */ 3499 spin_lock_irq(&dev_priv->irq_lock); 3500 if (dev_priv->display_irqs_enabled) 3501 valleyview_display_irqs_install(dev_priv); 3502 spin_unlock_irq(&dev_priv->irq_lock); 3503 } 3504 3505 static int valleyview_irq_postinstall(struct drm_device *dev) 3506 { 3507 struct drm_i915_private *dev_priv = dev->dev_private; 3508 3509 vlv_display_irq_postinstall(dev_priv); 3510 3511 gen5_gt_irq_postinstall(dev); 3512 3513 /* ack & enable invalid PTE error interrupts */ 3514 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3515 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3516 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3517 #endif 3518 3519 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3520 3521 return 0; 3522 } 3523 3524 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3525 { 3526 /* These are interrupts we'll toggle with the ring mask register */ 3527 uint32_t gt_interrupts[] = { 3528 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3529 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3530 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3531 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3532 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3533 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3534 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3535 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3536 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3537 0, 3538 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3539 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3540 }; 3541 3542 dev_priv->pm_irq_mask = 0xffffffff; 3543 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3544 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3545 /* 3546 * RPS interrupts will get enabled/disabled on demand when RPS itself 3547 * is enabled/disabled. 3548 */ 3549 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3550 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3551 } 3552 3553 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3554 { 3555 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3556 uint32_t de_pipe_enables; 3557 int pipe; 3558 u32 aux_en = GEN8_AUX_CHANNEL_A; 3559 3560 if (IS_GEN9(dev_priv)) { 3561 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3562 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3563 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3564 GEN9_AUX_CHANNEL_D; 3565 } else 3566 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3567 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3568 3569 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3570 GEN8_PIPE_FIFO_UNDERRUN; 3571 3572 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3573 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3574 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3575 3576 for_each_pipe(dev_priv, pipe) 3577 if (intel_display_power_is_enabled(dev_priv, 3578 POWER_DOMAIN_PIPE(pipe))) 3579 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3580 dev_priv->de_irq_mask[pipe], 3581 de_pipe_enables); 3582 3583 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); 3584 } 3585 3586 static int gen8_irq_postinstall(struct drm_device *dev) 3587 { 3588 struct drm_i915_private *dev_priv = dev->dev_private; 3589 3590 ibx_irq_pre_postinstall(dev); 3591 3592 gen8_gt_irq_postinstall(dev_priv); 3593 gen8_de_irq_postinstall(dev_priv); 3594 3595 ibx_irq_postinstall(dev); 3596 3597 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3598 POSTING_READ(GEN8_MASTER_IRQ); 3599 3600 return 0; 3601 } 3602 3603 static int cherryview_irq_postinstall(struct drm_device *dev) 3604 { 3605 struct drm_i915_private *dev_priv = dev->dev_private; 3606 3607 vlv_display_irq_postinstall(dev_priv); 3608 3609 gen8_gt_irq_postinstall(dev_priv); 3610 3611 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3612 POSTING_READ(GEN8_MASTER_IRQ); 3613 3614 return 0; 3615 } 3616 3617 static void gen8_irq_uninstall(struct drm_device *dev) 3618 { 3619 struct drm_i915_private *dev_priv = dev->dev_private; 3620 3621 if (!dev_priv) 3622 return; 3623 3624 gen8_irq_reset(dev); 3625 } 3626 3627 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3628 { 3629 /* Interrupt setup is already guaranteed to be single-threaded, this is 3630 * just to make the assert_spin_locked check happy. */ 3631 spin_lock_irq(&dev_priv->irq_lock); 3632 if (dev_priv->display_irqs_enabled) 3633 valleyview_display_irqs_uninstall(dev_priv); 3634 spin_unlock_irq(&dev_priv->irq_lock); 3635 3636 vlv_display_irq_reset(dev_priv); 3637 3638 dev_priv->irq_mask = ~0; 3639 } 3640 3641 static void valleyview_irq_uninstall(struct drm_device *dev) 3642 { 3643 struct drm_i915_private *dev_priv = dev->dev_private; 3644 3645 if (!dev_priv) 3646 return; 3647 3648 I915_WRITE(VLV_MASTER_IER, 0); 3649 3650 gen5_gt_irq_reset(dev); 3651 3652 I915_WRITE(HWSTAM, 0xffffffff); 3653 3654 vlv_display_irq_uninstall(dev_priv); 3655 } 3656 3657 static void cherryview_irq_uninstall(struct drm_device *dev) 3658 { 3659 struct drm_i915_private *dev_priv = dev->dev_private; 3660 3661 if (!dev_priv) 3662 return; 3663 3664 I915_WRITE(GEN8_MASTER_IRQ, 0); 3665 POSTING_READ(GEN8_MASTER_IRQ); 3666 3667 gen8_gt_irq_reset(dev_priv); 3668 3669 GEN5_IRQ_RESET(GEN8_PCU_); 3670 3671 vlv_display_irq_uninstall(dev_priv); 3672 } 3673 3674 static void ironlake_irq_uninstall(struct drm_device *dev) 3675 { 3676 struct drm_i915_private *dev_priv = dev->dev_private; 3677 3678 if (!dev_priv) 3679 return; 3680 3681 ironlake_irq_reset(dev); 3682 } 3683 3684 static void i8xx_irq_preinstall(struct drm_device * dev) 3685 { 3686 struct drm_i915_private *dev_priv = dev->dev_private; 3687 int pipe; 3688 3689 for_each_pipe(dev_priv, pipe) 3690 I915_WRITE(PIPESTAT(pipe), 0); 3691 I915_WRITE16(IMR, 0xffff); 3692 I915_WRITE16(IER, 0x0); 3693 POSTING_READ16(IER); 3694 } 3695 3696 static int i8xx_irq_postinstall(struct drm_device *dev) 3697 { 3698 struct drm_i915_private *dev_priv = dev->dev_private; 3699 3700 I915_WRITE16(EMR, 3701 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3702 3703 /* Unmask the interrupts that we always want on. */ 3704 dev_priv->irq_mask = 3705 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3706 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3707 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3708 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3709 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3710 I915_WRITE16(IMR, dev_priv->irq_mask); 3711 3712 I915_WRITE16(IER, 3713 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3714 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3715 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3716 I915_USER_INTERRUPT); 3717 POSTING_READ16(IER); 3718 3719 /* Interrupt setup is already guaranteed to be single-threaded, this is 3720 * just to make the assert_spin_locked check happy. */ 3721 spin_lock_irq(&dev_priv->irq_lock); 3722 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3723 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3724 spin_unlock_irq(&dev_priv->irq_lock); 3725 3726 return 0; 3727 } 3728 3729 /* 3730 * Returns true when a page flip has completed. 3731 */ 3732 static bool i8xx_handle_vblank(struct drm_device *dev, 3733 int plane, int pipe, u32 iir) 3734 { 3735 struct drm_i915_private *dev_priv = dev->dev_private; 3736 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3737 3738 if (!intel_pipe_handle_vblank(dev, pipe)) 3739 return false; 3740 3741 if ((iir & flip_pending) == 0) 3742 goto check_page_flip; 3743 3744 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3745 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3746 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3747 * the flip is completed (no longer pending). Since this doesn't raise 3748 * an interrupt per se, we watch for the change at vblank. 3749 */ 3750 if (I915_READ16(ISR) & flip_pending) 3751 goto check_page_flip; 3752 3753 intel_prepare_page_flip(dev, plane); 3754 intel_finish_page_flip(dev, pipe); 3755 return true; 3756 3757 check_page_flip: 3758 intel_check_page_flip(dev, pipe); 3759 return false; 3760 } 3761 3762 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3763 { 3764 struct drm_device *dev = arg; 3765 struct drm_i915_private *dev_priv = dev->dev_private; 3766 u16 iir, new_iir; 3767 u32 pipe_stats[2]; 3768 int pipe; 3769 u16 flip_mask = 3770 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3771 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3772 3773 iir = I915_READ16(IIR); 3774 if (iir == 0) 3775 return IRQ_NONE; 3776 3777 while (iir & ~flip_mask) { 3778 /* Can't rely on pipestat interrupt bit in iir as it might 3779 * have been cleared after the pipestat interrupt was received. 3780 * It doesn't set the bit in iir again, but it still produces 3781 * interrupts (for non-MSI). 3782 */ 3783 spin_lock(&dev_priv->irq_lock); 3784 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3785 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3786 3787 for_each_pipe(dev_priv, pipe) { 3788 int reg = PIPESTAT(pipe); 3789 pipe_stats[pipe] = I915_READ(reg); 3790 3791 /* 3792 * Clear the PIPE*STAT regs before the IIR 3793 */ 3794 if (pipe_stats[pipe] & 0x8000ffff) 3795 I915_WRITE(reg, pipe_stats[pipe]); 3796 } 3797 spin_unlock(&dev_priv->irq_lock); 3798 3799 I915_WRITE16(IIR, iir & ~flip_mask); 3800 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3801 3802 if (iir & I915_USER_INTERRUPT) 3803 notify_ring(dev, &dev_priv->ring[RCS]); 3804 3805 for_each_pipe(dev_priv, pipe) { 3806 int plane = pipe; 3807 if (HAS_FBC(dev)) 3808 plane = !plane; 3809 3810 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3811 i8xx_handle_vblank(dev, plane, pipe, iir)) 3812 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3813 3814 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3815 i9xx_pipe_crc_irq_handler(dev, pipe); 3816 3817 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3818 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3819 pipe); 3820 } 3821 3822 iir = new_iir; 3823 } 3824 3825 return IRQ_HANDLED; 3826 } 3827 3828 static void i8xx_irq_uninstall(struct drm_device * dev) 3829 { 3830 struct drm_i915_private *dev_priv = dev->dev_private; 3831 int pipe; 3832 3833 for_each_pipe(dev_priv, pipe) { 3834 /* Clear enable bits; then clear status bits */ 3835 I915_WRITE(PIPESTAT(pipe), 0); 3836 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3837 } 3838 I915_WRITE16(IMR, 0xffff); 3839 I915_WRITE16(IER, 0x0); 3840 I915_WRITE16(IIR, I915_READ16(IIR)); 3841 } 3842 3843 static void i915_irq_preinstall(struct drm_device * dev) 3844 { 3845 struct drm_i915_private *dev_priv = dev->dev_private; 3846 int pipe; 3847 3848 if (I915_HAS_HOTPLUG(dev)) { 3849 I915_WRITE(PORT_HOTPLUG_EN, 0); 3850 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3851 } 3852 3853 I915_WRITE16(HWSTAM, 0xeffe); 3854 for_each_pipe(dev_priv, pipe) 3855 I915_WRITE(PIPESTAT(pipe), 0); 3856 I915_WRITE(IMR, 0xffffffff); 3857 I915_WRITE(IER, 0x0); 3858 POSTING_READ(IER); 3859 } 3860 3861 static int i915_irq_postinstall(struct drm_device *dev) 3862 { 3863 struct drm_i915_private *dev_priv = dev->dev_private; 3864 u32 enable_mask; 3865 3866 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3867 3868 /* Unmask the interrupts that we always want on. */ 3869 dev_priv->irq_mask = 3870 ~(I915_ASLE_INTERRUPT | 3871 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3872 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3873 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3874 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3875 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3876 3877 enable_mask = 3878 I915_ASLE_INTERRUPT | 3879 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3880 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3881 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3882 I915_USER_INTERRUPT; 3883 3884 if (I915_HAS_HOTPLUG(dev)) { 3885 I915_WRITE(PORT_HOTPLUG_EN, 0); 3886 POSTING_READ(PORT_HOTPLUG_EN); 3887 3888 /* Enable in IER... */ 3889 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3890 /* and unmask in IMR */ 3891 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3892 } 3893 3894 I915_WRITE(IMR, dev_priv->irq_mask); 3895 I915_WRITE(IER, enable_mask); 3896 POSTING_READ(IER); 3897 3898 i915_enable_asle_pipestat(dev); 3899 3900 /* Interrupt setup is already guaranteed to be single-threaded, this is 3901 * just to make the assert_spin_locked check happy. */ 3902 spin_lock_irq(&dev_priv->irq_lock); 3903 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3904 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3905 spin_unlock_irq(&dev_priv->irq_lock); 3906 3907 return 0; 3908 } 3909 3910 /* 3911 * Returns true when a page flip has completed. 3912 */ 3913 static bool i915_handle_vblank(struct drm_device *dev, 3914 int plane, int pipe, u32 iir) 3915 { 3916 struct drm_i915_private *dev_priv = dev->dev_private; 3917 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3918 3919 if (!intel_pipe_handle_vblank(dev, pipe)) 3920 return false; 3921 3922 if ((iir & flip_pending) == 0) 3923 goto check_page_flip; 3924 3925 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3926 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3927 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3928 * the flip is completed (no longer pending). Since this doesn't raise 3929 * an interrupt per se, we watch for the change at vblank. 3930 */ 3931 if (I915_READ(ISR) & flip_pending) 3932 goto check_page_flip; 3933 3934 intel_prepare_page_flip(dev, plane); 3935 intel_finish_page_flip(dev, pipe); 3936 return true; 3937 3938 check_page_flip: 3939 intel_check_page_flip(dev, pipe); 3940 return false; 3941 } 3942 3943 static irqreturn_t i915_irq_handler(int irq, void *arg) 3944 { 3945 struct drm_device *dev = arg; 3946 struct drm_i915_private *dev_priv = dev->dev_private; 3947 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3948 u32 flip_mask = 3949 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3950 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3951 int pipe, ret = IRQ_NONE; 3952 3953 iir = I915_READ(IIR); 3954 do { 3955 bool irq_received = (iir & ~flip_mask) != 0; 3956 bool blc_event = false; 3957 3958 /* Can't rely on pipestat interrupt bit in iir as it might 3959 * have been cleared after the pipestat interrupt was received. 3960 * It doesn't set the bit in iir again, but it still produces 3961 * interrupts (for non-MSI). 3962 */ 3963 spin_lock(&dev_priv->irq_lock); 3964 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3965 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3966 3967 for_each_pipe(dev_priv, pipe) { 3968 int reg = PIPESTAT(pipe); 3969 pipe_stats[pipe] = I915_READ(reg); 3970 3971 /* Clear the PIPE*STAT regs before the IIR */ 3972 if (pipe_stats[pipe] & 0x8000ffff) { 3973 I915_WRITE(reg, pipe_stats[pipe]); 3974 irq_received = true; 3975 } 3976 } 3977 spin_unlock(&dev_priv->irq_lock); 3978 3979 if (!irq_received) 3980 break; 3981 3982 /* Consume port. Then clear IIR or we'll miss events */ 3983 if (I915_HAS_HOTPLUG(dev) && 3984 iir & I915_DISPLAY_PORT_INTERRUPT) 3985 i9xx_hpd_irq_handler(dev); 3986 3987 I915_WRITE(IIR, iir & ~flip_mask); 3988 new_iir = I915_READ(IIR); /* Flush posted writes */ 3989 3990 if (iir & I915_USER_INTERRUPT) 3991 notify_ring(dev, &dev_priv->ring[RCS]); 3992 3993 for_each_pipe(dev_priv, pipe) { 3994 int plane = pipe; 3995 if (HAS_FBC(dev)) 3996 plane = !plane; 3997 3998 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3999 i915_handle_vblank(dev, plane, pipe, iir)) 4000 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4001 4002 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4003 blc_event = true; 4004 4005 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4006 i9xx_pipe_crc_irq_handler(dev, pipe); 4007 4008 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4009 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4010 pipe); 4011 } 4012 4013 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4014 intel_opregion_asle_intr(dev); 4015 4016 /* With MSI, interrupts are only generated when iir 4017 * transitions from zero to nonzero. If another bit got 4018 * set while we were handling the existing iir bits, then 4019 * we would never get another interrupt. 4020 * 4021 * This is fine on non-MSI as well, as if we hit this path 4022 * we avoid exiting the interrupt handler only to generate 4023 * another one. 4024 * 4025 * Note that for MSI this could cause a stray interrupt report 4026 * if an interrupt landed in the time between writing IIR and 4027 * the posting read. This should be rare enough to never 4028 * trigger the 99% of 100,000 interrupts test for disabling 4029 * stray interrupts. 4030 */ 4031 ret = IRQ_HANDLED; 4032 iir = new_iir; 4033 } while (iir & ~flip_mask); 4034 4035 return ret; 4036 } 4037 4038 static void i915_irq_uninstall(struct drm_device * dev) 4039 { 4040 struct drm_i915_private *dev_priv = dev->dev_private; 4041 int pipe; 4042 4043 if (I915_HAS_HOTPLUG(dev)) { 4044 I915_WRITE(PORT_HOTPLUG_EN, 0); 4045 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4046 } 4047 4048 I915_WRITE16(HWSTAM, 0xffff); 4049 for_each_pipe(dev_priv, pipe) { 4050 /* Clear enable bits; then clear status bits */ 4051 I915_WRITE(PIPESTAT(pipe), 0); 4052 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4053 } 4054 I915_WRITE(IMR, 0xffffffff); 4055 I915_WRITE(IER, 0x0); 4056 4057 I915_WRITE(IIR, I915_READ(IIR)); 4058 } 4059 4060 static void i965_irq_preinstall(struct drm_device * dev) 4061 { 4062 struct drm_i915_private *dev_priv = dev->dev_private; 4063 int pipe; 4064 4065 I915_WRITE(PORT_HOTPLUG_EN, 0); 4066 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4067 4068 I915_WRITE(HWSTAM, 0xeffe); 4069 for_each_pipe(dev_priv, pipe) 4070 I915_WRITE(PIPESTAT(pipe), 0); 4071 I915_WRITE(IMR, 0xffffffff); 4072 I915_WRITE(IER, 0x0); 4073 POSTING_READ(IER); 4074 } 4075 4076 static int i965_irq_postinstall(struct drm_device *dev) 4077 { 4078 struct drm_i915_private *dev_priv = dev->dev_private; 4079 u32 enable_mask; 4080 u32 error_mask; 4081 4082 /* Unmask the interrupts that we always want on. */ 4083 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4084 I915_DISPLAY_PORT_INTERRUPT | 4085 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4086 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4087 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4088 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4089 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4090 4091 enable_mask = ~dev_priv->irq_mask; 4092 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4093 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4094 enable_mask |= I915_USER_INTERRUPT; 4095 4096 if (IS_G4X(dev)) 4097 enable_mask |= I915_BSD_USER_INTERRUPT; 4098 4099 /* Interrupt setup is already guaranteed to be single-threaded, this is 4100 * just to make the assert_spin_locked check happy. */ 4101 spin_lock_irq(&dev_priv->irq_lock); 4102 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4103 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4104 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4105 spin_unlock_irq(&dev_priv->irq_lock); 4106 4107 /* 4108 * Enable some error detection, note the instruction error mask 4109 * bit is reserved, so we leave it masked. 4110 */ 4111 if (IS_G4X(dev)) { 4112 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4113 GM45_ERROR_MEM_PRIV | 4114 GM45_ERROR_CP_PRIV | 4115 I915_ERROR_MEMORY_REFRESH); 4116 } else { 4117 error_mask = ~(I915_ERROR_PAGE_TABLE | 4118 I915_ERROR_MEMORY_REFRESH); 4119 } 4120 I915_WRITE(EMR, error_mask); 4121 4122 I915_WRITE(IMR, dev_priv->irq_mask); 4123 I915_WRITE(IER, enable_mask); 4124 POSTING_READ(IER); 4125 4126 I915_WRITE(PORT_HOTPLUG_EN, 0); 4127 POSTING_READ(PORT_HOTPLUG_EN); 4128 4129 i915_enable_asle_pipestat(dev); 4130 4131 return 0; 4132 } 4133 4134 static void i915_hpd_irq_setup(struct drm_device *dev) 4135 { 4136 struct drm_i915_private *dev_priv = dev->dev_private; 4137 struct intel_encoder *intel_encoder; 4138 u32 hotplug_en; 4139 4140 assert_spin_locked(&dev_priv->irq_lock); 4141 4142 if (I915_HAS_HOTPLUG(dev)) { 4143 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4144 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4145 /* Note HDMI and DP share hotplug bits */ 4146 /* enable bits are the same for all generations */ 4147 for_each_intel_encoder(dev, intel_encoder) 4148 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4149 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4150 /* Programming the CRT detection parameters tends 4151 to generate a spurious hotplug event about three 4152 seconds later. So just do it once. 4153 */ 4154 if (IS_G4X(dev)) 4155 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4156 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4157 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4158 4159 /* Ignore TV since it's buggy */ 4160 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4161 } 4162 } 4163 4164 static irqreturn_t i965_irq_handler(int irq, void *arg) 4165 { 4166 struct drm_device *dev = arg; 4167 struct drm_i915_private *dev_priv = dev->dev_private; 4168 u32 iir, new_iir; 4169 u32 pipe_stats[I915_MAX_PIPES]; 4170 int ret = IRQ_NONE, pipe; 4171 u32 flip_mask = 4172 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4173 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4174 4175 iir = I915_READ(IIR); 4176 4177 for (;;) { 4178 bool irq_received = (iir & ~flip_mask) != 0; 4179 bool blc_event = false; 4180 4181 /* Can't rely on pipestat interrupt bit in iir as it might 4182 * have been cleared after the pipestat interrupt was received. 4183 * It doesn't set the bit in iir again, but it still produces 4184 * interrupts (for non-MSI). 4185 */ 4186 spin_lock(&dev_priv->irq_lock); 4187 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4188 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4189 4190 for_each_pipe(dev_priv, pipe) { 4191 int reg = PIPESTAT(pipe); 4192 pipe_stats[pipe] = I915_READ(reg); 4193 4194 /* 4195 * Clear the PIPE*STAT regs before the IIR 4196 */ 4197 if (pipe_stats[pipe] & 0x8000ffff) { 4198 I915_WRITE(reg, pipe_stats[pipe]); 4199 irq_received = true; 4200 } 4201 } 4202 spin_unlock(&dev_priv->irq_lock); 4203 4204 if (!irq_received) 4205 break; 4206 4207 ret = IRQ_HANDLED; 4208 4209 /* Consume port. Then clear IIR or we'll miss events */ 4210 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4211 i9xx_hpd_irq_handler(dev); 4212 4213 I915_WRITE(IIR, iir & ~flip_mask); 4214 new_iir = I915_READ(IIR); /* Flush posted writes */ 4215 4216 if (iir & I915_USER_INTERRUPT) 4217 notify_ring(dev, &dev_priv->ring[RCS]); 4218 if (iir & I915_BSD_USER_INTERRUPT) 4219 notify_ring(dev, &dev_priv->ring[VCS]); 4220 4221 for_each_pipe(dev_priv, pipe) { 4222 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4223 i915_handle_vblank(dev, pipe, pipe, iir)) 4224 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4225 4226 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4227 blc_event = true; 4228 4229 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4230 i9xx_pipe_crc_irq_handler(dev, pipe); 4231 4232 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4233 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4234 } 4235 4236 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4237 intel_opregion_asle_intr(dev); 4238 4239 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4240 gmbus_irq_handler(dev); 4241 4242 /* With MSI, interrupts are only generated when iir 4243 * transitions from zero to nonzero. If another bit got 4244 * set while we were handling the existing iir bits, then 4245 * we would never get another interrupt. 4246 * 4247 * This is fine on non-MSI as well, as if we hit this path 4248 * we avoid exiting the interrupt handler only to generate 4249 * another one. 4250 * 4251 * Note that for MSI this could cause a stray interrupt report 4252 * if an interrupt landed in the time between writing IIR and 4253 * the posting read. This should be rare enough to never 4254 * trigger the 99% of 100,000 interrupts test for disabling 4255 * stray interrupts. 4256 */ 4257 iir = new_iir; 4258 } 4259 4260 return ret; 4261 } 4262 4263 static void i965_irq_uninstall(struct drm_device * dev) 4264 { 4265 struct drm_i915_private *dev_priv = dev->dev_private; 4266 int pipe; 4267 4268 if (!dev_priv) 4269 return; 4270 4271 I915_WRITE(PORT_HOTPLUG_EN, 0); 4272 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4273 4274 I915_WRITE(HWSTAM, 0xffffffff); 4275 for_each_pipe(dev_priv, pipe) 4276 I915_WRITE(PIPESTAT(pipe), 0); 4277 I915_WRITE(IMR, 0xffffffff); 4278 I915_WRITE(IER, 0x0); 4279 4280 for_each_pipe(dev_priv, pipe) 4281 I915_WRITE(PIPESTAT(pipe), 4282 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4283 I915_WRITE(IIR, I915_READ(IIR)); 4284 } 4285 4286 static void intel_hpd_irq_reenable_work(struct work_struct *work) 4287 { 4288 struct drm_i915_private *dev_priv = 4289 container_of(work, typeof(*dev_priv), 4290 hotplug_reenable_work.work); 4291 struct drm_device *dev = dev_priv->dev; 4292 struct drm_mode_config *mode_config = &dev->mode_config; 4293 int i; 4294 4295 intel_runtime_pm_get(dev_priv); 4296 4297 spin_lock_irq(&dev_priv->irq_lock); 4298 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4299 struct drm_connector *connector; 4300 4301 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4302 continue; 4303 4304 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4305 4306 list_for_each_entry(connector, &mode_config->connector_list, head) { 4307 struct intel_connector *intel_connector = to_intel_connector(connector); 4308 4309 if (intel_connector->encoder->hpd_pin == i) { 4310 if (connector->polled != intel_connector->polled) 4311 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4312 connector->name); 4313 connector->polled = intel_connector->polled; 4314 if (!connector->polled) 4315 connector->polled = DRM_CONNECTOR_POLL_HPD; 4316 } 4317 } 4318 } 4319 if (dev_priv->display.hpd_irq_setup) 4320 dev_priv->display.hpd_irq_setup(dev); 4321 spin_unlock_irq(&dev_priv->irq_lock); 4322 4323 intel_runtime_pm_put(dev_priv); 4324 } 4325 4326 /** 4327 * intel_irq_init - initializes irq support 4328 * @dev_priv: i915 device instance 4329 * 4330 * This function initializes all the irq support including work items, timers 4331 * and all the vtables. It does not setup the interrupt itself though. 4332 */ 4333 void intel_irq_init(struct drm_i915_private *dev_priv) 4334 { 4335 struct drm_device *dev = dev_priv->dev; 4336 4337 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4338 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4339 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4340 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4341 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4342 4343 /* Let's track the enabled rps events */ 4344 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4345 /* WaGsvRC0ResidencyMethod:vlv */ 4346 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4347 else 4348 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4349 4350 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4351 i915_hangcheck_elapsed, 4352 (unsigned long) dev); 4353 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4354 intel_hpd_irq_reenable_work); 4355 4356 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4357 4358 if (IS_GEN2(dev_priv)) { 4359 dev->max_vblank_count = 0; 4360 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4361 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4362 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4363 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4364 } else { 4365 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4366 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4367 } 4368 4369 /* 4370 * Opt out of the vblank disable timer on everything except gen2. 4371 * Gen2 doesn't have a hardware frame counter and so depends on 4372 * vblank interrupts to produce sane vblank seuquence numbers. 4373 */ 4374 if (!IS_GEN2(dev_priv)) 4375 dev->vblank_disable_immediate = true; 4376 4377 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4378 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4379 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4380 } 4381 4382 if (IS_CHERRYVIEW(dev_priv)) { 4383 dev->driver->irq_handler = cherryview_irq_handler; 4384 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4385 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4386 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4387 dev->driver->enable_vblank = valleyview_enable_vblank; 4388 dev->driver->disable_vblank = valleyview_disable_vblank; 4389 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4390 } else if (IS_VALLEYVIEW(dev_priv)) { 4391 dev->driver->irq_handler = valleyview_irq_handler; 4392 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4393 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4394 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4395 dev->driver->enable_vblank = valleyview_enable_vblank; 4396 dev->driver->disable_vblank = valleyview_disable_vblank; 4397 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4398 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4399 dev->driver->irq_handler = gen8_irq_handler; 4400 dev->driver->irq_preinstall = gen8_irq_reset; 4401 dev->driver->irq_postinstall = gen8_irq_postinstall; 4402 dev->driver->irq_uninstall = gen8_irq_uninstall; 4403 dev->driver->enable_vblank = gen8_enable_vblank; 4404 dev->driver->disable_vblank = gen8_disable_vblank; 4405 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4406 } else if (HAS_PCH_SPLIT(dev)) { 4407 dev->driver->irq_handler = ironlake_irq_handler; 4408 dev->driver->irq_preinstall = ironlake_irq_reset; 4409 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4410 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4411 dev->driver->enable_vblank = ironlake_enable_vblank; 4412 dev->driver->disable_vblank = ironlake_disable_vblank; 4413 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4414 } else { 4415 if (INTEL_INFO(dev_priv)->gen == 2) { 4416 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4417 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4418 dev->driver->irq_handler = i8xx_irq_handler; 4419 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4420 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4421 dev->driver->irq_preinstall = i915_irq_preinstall; 4422 dev->driver->irq_postinstall = i915_irq_postinstall; 4423 dev->driver->irq_uninstall = i915_irq_uninstall; 4424 dev->driver->irq_handler = i915_irq_handler; 4425 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4426 } else { 4427 dev->driver->irq_preinstall = i965_irq_preinstall; 4428 dev->driver->irq_postinstall = i965_irq_postinstall; 4429 dev->driver->irq_uninstall = i965_irq_uninstall; 4430 dev->driver->irq_handler = i965_irq_handler; 4431 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4432 } 4433 dev->driver->enable_vblank = i915_enable_vblank; 4434 dev->driver->disable_vblank = i915_disable_vblank; 4435 } 4436 } 4437 4438 /** 4439 * intel_hpd_init - initializes and enables hpd support 4440 * @dev_priv: i915 device instance 4441 * 4442 * This function enables the hotplug support. It requires that interrupts have 4443 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 4444 * poll request can run concurrently to other code, so locking rules must be 4445 * obeyed. 4446 * 4447 * This is a separate step from interrupt enabling to simplify the locking rules 4448 * in the driver load and resume code. 4449 */ 4450 void intel_hpd_init(struct drm_i915_private *dev_priv) 4451 { 4452 struct drm_device *dev = dev_priv->dev; 4453 struct drm_mode_config *mode_config = &dev->mode_config; 4454 struct drm_connector *connector; 4455 int i; 4456 4457 for (i = 1; i < HPD_NUM_PINS; i++) { 4458 dev_priv->hpd_stats[i].hpd_cnt = 0; 4459 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4460 } 4461 list_for_each_entry(connector, &mode_config->connector_list, head) { 4462 struct intel_connector *intel_connector = to_intel_connector(connector); 4463 connector->polled = intel_connector->polled; 4464 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4465 connector->polled = DRM_CONNECTOR_POLL_HPD; 4466 if (intel_connector->mst_port) 4467 connector->polled = DRM_CONNECTOR_POLL_HPD; 4468 } 4469 4470 /* Interrupt setup is already guaranteed to be single-threaded, this is 4471 * just to make the assert_spin_locked checks happy. */ 4472 spin_lock_irq(&dev_priv->irq_lock); 4473 if (dev_priv->display.hpd_irq_setup) 4474 dev_priv->display.hpd_irq_setup(dev); 4475 spin_unlock_irq(&dev_priv->irq_lock); 4476 } 4477 4478 /** 4479 * intel_irq_install - enables the hardware interrupt 4480 * @dev_priv: i915 device instance 4481 * 4482 * This function enables the hardware interrupt handling, but leaves the hotplug 4483 * handling still disabled. It is called after intel_irq_init(). 4484 * 4485 * In the driver load and resume code we need working interrupts in a few places 4486 * but don't want to deal with the hassle of concurrent probe and hotplug 4487 * workers. Hence the split into this two-stage approach. 4488 */ 4489 int intel_irq_install(struct drm_i915_private *dev_priv) 4490 { 4491 /* 4492 * We enable some interrupt sources in our postinstall hooks, so mark 4493 * interrupts as enabled _before_ actually enabling them to avoid 4494 * special cases in our ordering checks. 4495 */ 4496 dev_priv->pm.irqs_enabled = true; 4497 4498 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4499 } 4500 4501 /** 4502 * intel_irq_uninstall - finilizes all irq handling 4503 * @dev_priv: i915 device instance 4504 * 4505 * This stops interrupt and hotplug handling and unregisters and frees all 4506 * resources acquired in the init functions. 4507 */ 4508 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4509 { 4510 drm_irq_uninstall(dev_priv->dev); 4511 intel_hpd_cancel_work(dev_priv); 4512 dev_priv->pm.irqs_enabled = false; 4513 } 4514 4515 /** 4516 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4517 * @dev_priv: i915 device instance 4518 * 4519 * This function is used to disable interrupts at runtime, both in the runtime 4520 * pm and the system suspend/resume code. 4521 */ 4522 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4523 { 4524 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4525 dev_priv->pm.irqs_enabled = false; 4526 } 4527 4528 /** 4529 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4530 * @dev_priv: i915 device instance 4531 * 4532 * This function is used to enable interrupts at runtime, both in the runtime 4533 * pm and the system suspend/resume code. 4534 */ 4535 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4536 { 4537 dev_priv->pm.irqs_enabled = true; 4538 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4539 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4540 } 4541