1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <drm/drmP.h> 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include "intel_drv.h" 38 39 static const u32 hpd_ibx[] = { 40 [HPD_CRT] = SDE_CRT_HOTPLUG, 41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45 }; 46 47 static const u32 hpd_cpt[] = { 48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53 }; 54 55 static const u32 hpd_mask_i915[] = { 56 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62 }; 63 64 static const u32 hpd_status_gen4[] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71 }; 72 73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 /* For display hotplug interrupt */ 83 static void 84 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85 { 86 assert_spin_locked(&dev_priv->irq_lock); 87 88 if (dev_priv->pc8.irqs_disabled) { 89 WARN(1, "IRQs disabled\n"); 90 dev_priv->pc8.regsave.deimr &= ~mask; 91 return; 92 } 93 94 if ((dev_priv->irq_mask & mask) != 0) { 95 dev_priv->irq_mask &= ~mask; 96 I915_WRITE(DEIMR, dev_priv->irq_mask); 97 POSTING_READ(DEIMR); 98 } 99 } 100 101 static void 102 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 103 { 104 assert_spin_locked(&dev_priv->irq_lock); 105 106 if (dev_priv->pc8.irqs_disabled) { 107 WARN(1, "IRQs disabled\n"); 108 dev_priv->pc8.regsave.deimr |= mask; 109 return; 110 } 111 112 if ((dev_priv->irq_mask & mask) != mask) { 113 dev_priv->irq_mask |= mask; 114 I915_WRITE(DEIMR, dev_priv->irq_mask); 115 POSTING_READ(DEIMR); 116 } 117 } 118 119 /** 120 * ilk_update_gt_irq - update GTIMR 121 * @dev_priv: driver private 122 * @interrupt_mask: mask of interrupt bits to update 123 * @enabled_irq_mask: mask of interrupt bits to enable 124 */ 125 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 126 uint32_t interrupt_mask, 127 uint32_t enabled_irq_mask) 128 { 129 assert_spin_locked(&dev_priv->irq_lock); 130 131 if (dev_priv->pc8.irqs_disabled) { 132 WARN(1, "IRQs disabled\n"); 133 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 134 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 135 interrupt_mask); 136 return; 137 } 138 139 dev_priv->gt_irq_mask &= ~interrupt_mask; 140 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 141 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 142 POSTING_READ(GTIMR); 143 } 144 145 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 146 { 147 ilk_update_gt_irq(dev_priv, mask, mask); 148 } 149 150 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 151 { 152 ilk_update_gt_irq(dev_priv, mask, 0); 153 } 154 155 /** 156 * snb_update_pm_irq - update GEN6_PMIMR 157 * @dev_priv: driver private 158 * @interrupt_mask: mask of interrupt bits to update 159 * @enabled_irq_mask: mask of interrupt bits to enable 160 */ 161 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 162 uint32_t interrupt_mask, 163 uint32_t enabled_irq_mask) 164 { 165 uint32_t new_val; 166 167 assert_spin_locked(&dev_priv->irq_lock); 168 169 if (dev_priv->pc8.irqs_disabled) { 170 WARN(1, "IRQs disabled\n"); 171 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 172 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 173 interrupt_mask); 174 return; 175 } 176 177 new_val = dev_priv->pm_irq_mask; 178 new_val &= ~interrupt_mask; 179 new_val |= (~enabled_irq_mask & interrupt_mask); 180 181 if (new_val != dev_priv->pm_irq_mask) { 182 dev_priv->pm_irq_mask = new_val; 183 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 184 POSTING_READ(GEN6_PMIMR); 185 } 186 } 187 188 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 189 { 190 snb_update_pm_irq(dev_priv, mask, mask); 191 } 192 193 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 194 { 195 snb_update_pm_irq(dev_priv, mask, 0); 196 } 197 198 static bool ivb_can_enable_err_int(struct drm_device *dev) 199 { 200 struct drm_i915_private *dev_priv = dev->dev_private; 201 struct intel_crtc *crtc; 202 enum pipe pipe; 203 204 assert_spin_locked(&dev_priv->irq_lock); 205 206 for_each_pipe(pipe) { 207 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 208 209 if (crtc->cpu_fifo_underrun_disabled) 210 return false; 211 } 212 213 return true; 214 } 215 216 static bool cpt_can_enable_serr_int(struct drm_device *dev) 217 { 218 struct drm_i915_private *dev_priv = dev->dev_private; 219 enum pipe pipe; 220 struct intel_crtc *crtc; 221 222 assert_spin_locked(&dev_priv->irq_lock); 223 224 for_each_pipe(pipe) { 225 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 226 227 if (crtc->pch_fifo_underrun_disabled) 228 return false; 229 } 230 231 return true; 232 } 233 234 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 235 enum pipe pipe, bool enable) 236 { 237 struct drm_i915_private *dev_priv = dev->dev_private; 238 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 239 DE_PIPEB_FIFO_UNDERRUN; 240 241 if (enable) 242 ironlake_enable_display_irq(dev_priv, bit); 243 else 244 ironlake_disable_display_irq(dev_priv, bit); 245 } 246 247 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 248 enum pipe pipe, bool enable) 249 { 250 struct drm_i915_private *dev_priv = dev->dev_private; 251 if (enable) { 252 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 253 254 if (!ivb_can_enable_err_int(dev)) 255 return; 256 257 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 258 } else { 259 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 260 261 /* Change the state _after_ we've read out the current one. */ 262 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 263 264 if (!was_enabled && 265 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 266 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 267 pipe_name(pipe)); 268 } 269 } 270 } 271 272 /** 273 * ibx_display_interrupt_update - update SDEIMR 274 * @dev_priv: driver private 275 * @interrupt_mask: mask of interrupt bits to update 276 * @enabled_irq_mask: mask of interrupt bits to enable 277 */ 278 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 279 uint32_t interrupt_mask, 280 uint32_t enabled_irq_mask) 281 { 282 uint32_t sdeimr = I915_READ(SDEIMR); 283 sdeimr &= ~interrupt_mask; 284 sdeimr |= (~enabled_irq_mask & interrupt_mask); 285 286 assert_spin_locked(&dev_priv->irq_lock); 287 288 if (dev_priv->pc8.irqs_disabled && 289 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 290 WARN(1, "IRQs disabled\n"); 291 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 292 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 293 interrupt_mask); 294 return; 295 } 296 297 I915_WRITE(SDEIMR, sdeimr); 298 POSTING_READ(SDEIMR); 299 } 300 #define ibx_enable_display_interrupt(dev_priv, bits) \ 301 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 302 #define ibx_disable_display_interrupt(dev_priv, bits) \ 303 ibx_display_interrupt_update((dev_priv), (bits), 0) 304 305 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 306 enum transcoder pch_transcoder, 307 bool enable) 308 { 309 struct drm_i915_private *dev_priv = dev->dev_private; 310 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 311 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 312 313 if (enable) 314 ibx_enable_display_interrupt(dev_priv, bit); 315 else 316 ibx_disable_display_interrupt(dev_priv, bit); 317 } 318 319 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 320 enum transcoder pch_transcoder, 321 bool enable) 322 { 323 struct drm_i915_private *dev_priv = dev->dev_private; 324 325 if (enable) { 326 I915_WRITE(SERR_INT, 327 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 328 329 if (!cpt_can_enable_serr_int(dev)) 330 return; 331 332 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 333 } else { 334 uint32_t tmp = I915_READ(SERR_INT); 335 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 336 337 /* Change the state _after_ we've read out the current one. */ 338 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 339 340 if (!was_enabled && 341 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 342 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 343 transcoder_name(pch_transcoder)); 344 } 345 } 346 } 347 348 /** 349 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 350 * @dev: drm device 351 * @pipe: pipe 352 * @enable: true if we want to report FIFO underrun errors, false otherwise 353 * 354 * This function makes us disable or enable CPU fifo underruns for a specific 355 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 356 * reporting for one pipe may also disable all the other CPU error interruts for 357 * the other pipes, due to the fact that there's just one interrupt mask/enable 358 * bit for all the pipes. 359 * 360 * Returns the previous state of underrun reporting. 361 */ 362 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 363 enum pipe pipe, bool enable) 364 { 365 struct drm_i915_private *dev_priv = dev->dev_private; 366 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 368 unsigned long flags; 369 bool ret; 370 371 spin_lock_irqsave(&dev_priv->irq_lock, flags); 372 373 ret = !intel_crtc->cpu_fifo_underrun_disabled; 374 375 if (enable == ret) 376 goto done; 377 378 intel_crtc->cpu_fifo_underrun_disabled = !enable; 379 380 if (IS_GEN5(dev) || IS_GEN6(dev)) 381 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 382 else if (IS_GEN7(dev)) 383 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 384 385 done: 386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 387 return ret; 388 } 389 390 /** 391 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 392 * @dev: drm device 393 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 394 * @enable: true if we want to report FIFO underrun errors, false otherwise 395 * 396 * This function makes us disable or enable PCH fifo underruns for a specific 397 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 398 * underrun reporting for one transcoder may also disable all the other PCH 399 * error interruts for the other transcoders, due to the fact that there's just 400 * one interrupt mask/enable bit for all the transcoders. 401 * 402 * Returns the previous state of underrun reporting. 403 */ 404 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 405 enum transcoder pch_transcoder, 406 bool enable) 407 { 408 struct drm_i915_private *dev_priv = dev->dev_private; 409 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 410 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 411 unsigned long flags; 412 bool ret; 413 414 /* 415 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 416 * has only one pch transcoder A that all pipes can use. To avoid racy 417 * pch transcoder -> pipe lookups from interrupt code simply store the 418 * underrun statistics in crtc A. Since we never expose this anywhere 419 * nor use it outside of the fifo underrun code here using the "wrong" 420 * crtc on LPT won't cause issues. 421 */ 422 423 spin_lock_irqsave(&dev_priv->irq_lock, flags); 424 425 ret = !intel_crtc->pch_fifo_underrun_disabled; 426 427 if (enable == ret) 428 goto done; 429 430 intel_crtc->pch_fifo_underrun_disabled = !enable; 431 432 if (HAS_PCH_IBX(dev)) 433 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 434 else 435 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 436 437 done: 438 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 439 return ret; 440 } 441 442 443 void 444 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 445 { 446 u32 reg = PIPESTAT(pipe); 447 u32 pipestat = I915_READ(reg) & 0x7fff0000; 448 449 assert_spin_locked(&dev_priv->irq_lock); 450 451 if ((pipestat & mask) == mask) 452 return; 453 454 /* Enable the interrupt, clear any pending status */ 455 pipestat |= mask | (mask >> 16); 456 I915_WRITE(reg, pipestat); 457 POSTING_READ(reg); 458 } 459 460 void 461 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 462 { 463 u32 reg = PIPESTAT(pipe); 464 u32 pipestat = I915_READ(reg) & 0x7fff0000; 465 466 assert_spin_locked(&dev_priv->irq_lock); 467 468 if ((pipestat & mask) == 0) 469 return; 470 471 pipestat &= ~mask; 472 I915_WRITE(reg, pipestat); 473 POSTING_READ(reg); 474 } 475 476 /** 477 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 478 */ 479 static void i915_enable_asle_pipestat(struct drm_device *dev) 480 { 481 drm_i915_private_t *dev_priv = dev->dev_private; 482 unsigned long irqflags; 483 484 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 485 return; 486 487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 488 489 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 490 if (INTEL_INFO(dev)->gen >= 4) 491 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 492 493 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 494 } 495 496 /** 497 * i915_pipe_enabled - check if a pipe is enabled 498 * @dev: DRM device 499 * @pipe: pipe to check 500 * 501 * Reading certain registers when the pipe is disabled can hang the chip. 502 * Use this routine to make sure the PLL is running and the pipe is active 503 * before reading such registers if unsure. 504 */ 505 static int 506 i915_pipe_enabled(struct drm_device *dev, int pipe) 507 { 508 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 509 510 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 511 /* Locking is horribly broken here, but whatever. */ 512 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 513 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 514 515 return intel_crtc->active; 516 } else { 517 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 518 } 519 } 520 521 /* Called from drm generic code, passed a 'crtc', which 522 * we use as a pipe index 523 */ 524 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 525 { 526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 527 unsigned long high_frame; 528 unsigned long low_frame; 529 u32 high1, high2, low; 530 531 if (!i915_pipe_enabled(dev, pipe)) { 532 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 533 "pipe %c\n", pipe_name(pipe)); 534 return 0; 535 } 536 537 high_frame = PIPEFRAME(pipe); 538 low_frame = PIPEFRAMEPIXEL(pipe); 539 540 /* 541 * High & low register fields aren't synchronized, so make sure 542 * we get a low value that's stable across two reads of the high 543 * register. 544 */ 545 do { 546 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 547 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 548 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 549 } while (high1 != high2); 550 551 high1 >>= PIPE_FRAME_HIGH_SHIFT; 552 low >>= PIPE_FRAME_LOW_SHIFT; 553 return (high1 << 8) | low; 554 } 555 556 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 557 { 558 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 559 int reg = PIPE_FRMCOUNT_GM45(pipe); 560 561 if (!i915_pipe_enabled(dev, pipe)) { 562 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 563 "pipe %c\n", pipe_name(pipe)); 564 return 0; 565 } 566 567 return I915_READ(reg); 568 } 569 570 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 571 int *vpos, int *hpos) 572 { 573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 574 u32 vbl = 0, position = 0; 575 int vbl_start, vbl_end, htotal, vtotal; 576 bool in_vbl = true; 577 int ret = 0; 578 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 579 pipe); 580 581 if (!i915_pipe_enabled(dev, pipe)) { 582 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 583 "pipe %c\n", pipe_name(pipe)); 584 return 0; 585 } 586 587 /* Get vtotal. */ 588 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 589 590 if (INTEL_INFO(dev)->gen >= 4) { 591 /* No obvious pixelcount register. Only query vertical 592 * scanout position from Display scan line register. 593 */ 594 position = I915_READ(PIPEDSL(pipe)); 595 596 /* Decode into vertical scanout position. Don't have 597 * horizontal scanout position. 598 */ 599 *vpos = position & 0x1fff; 600 *hpos = 0; 601 } else { 602 /* Have access to pixelcount since start of frame. 603 * We can split this into vertical and horizontal 604 * scanout position. 605 */ 606 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 607 608 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 609 *vpos = position / htotal; 610 *hpos = position - (*vpos * htotal); 611 } 612 613 /* Query vblank area. */ 614 vbl = I915_READ(VBLANK(cpu_transcoder)); 615 616 /* Test position against vblank region. */ 617 vbl_start = vbl & 0x1fff; 618 vbl_end = (vbl >> 16) & 0x1fff; 619 620 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 621 in_vbl = false; 622 623 /* Inside "upper part" of vblank area? Apply corrective offset: */ 624 if (in_vbl && (*vpos >= vbl_start)) 625 *vpos = *vpos - vtotal; 626 627 /* Readouts valid? */ 628 if (vbl > 0) 629 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 630 631 /* In vblank? */ 632 if (in_vbl) 633 ret |= DRM_SCANOUTPOS_INVBL; 634 635 return ret; 636 } 637 638 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 639 int *max_error, 640 struct timeval *vblank_time, 641 unsigned flags) 642 { 643 struct drm_crtc *crtc; 644 645 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 646 DRM_ERROR("Invalid crtc %d\n", pipe); 647 return -EINVAL; 648 } 649 650 /* Get drm_crtc to timestamp: */ 651 crtc = intel_get_crtc_for_pipe(dev, pipe); 652 if (crtc == NULL) { 653 DRM_ERROR("Invalid crtc %d\n", pipe); 654 return -EINVAL; 655 } 656 657 if (!crtc->enabled) { 658 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 659 return -EBUSY; 660 } 661 662 /* Helper routine in DRM core does all the work: */ 663 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 664 vblank_time, flags, 665 crtc); 666 } 667 668 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 669 { 670 enum drm_connector_status old_status; 671 672 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 673 old_status = connector->status; 674 675 connector->status = connector->funcs->detect(connector, false); 676 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 677 connector->base.id, 678 drm_get_connector_name(connector), 679 old_status, connector->status); 680 return (old_status != connector->status); 681 } 682 683 /* 684 * Handle hotplug events outside the interrupt handler proper. 685 */ 686 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 687 688 static void i915_hotplug_work_func(struct work_struct *work) 689 { 690 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 691 hotplug_work); 692 struct drm_device *dev = dev_priv->dev; 693 struct drm_mode_config *mode_config = &dev->mode_config; 694 struct intel_connector *intel_connector; 695 struct intel_encoder *intel_encoder; 696 struct drm_connector *connector; 697 unsigned long irqflags; 698 bool hpd_disabled = false; 699 bool changed = false; 700 u32 hpd_event_bits; 701 702 /* HPD irq before everything is fully set up. */ 703 if (!dev_priv->enable_hotplug_processing) 704 return; 705 706 mutex_lock(&mode_config->mutex); 707 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 708 709 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 710 711 hpd_event_bits = dev_priv->hpd_event_bits; 712 dev_priv->hpd_event_bits = 0; 713 list_for_each_entry(connector, &mode_config->connector_list, head) { 714 intel_connector = to_intel_connector(connector); 715 intel_encoder = intel_connector->encoder; 716 if (intel_encoder->hpd_pin > HPD_NONE && 717 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 718 connector->polled == DRM_CONNECTOR_POLL_HPD) { 719 DRM_INFO("HPD interrupt storm detected on connector %s: " 720 "switching from hotplug detection to polling\n", 721 drm_get_connector_name(connector)); 722 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 723 connector->polled = DRM_CONNECTOR_POLL_CONNECT 724 | DRM_CONNECTOR_POLL_DISCONNECT; 725 hpd_disabled = true; 726 } 727 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 728 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 729 drm_get_connector_name(connector), intel_encoder->hpd_pin); 730 } 731 } 732 /* if there were no outputs to poll, poll was disabled, 733 * therefore make sure it's enabled when disabling HPD on 734 * some connectors */ 735 if (hpd_disabled) { 736 drm_kms_helper_poll_enable(dev); 737 mod_timer(&dev_priv->hotplug_reenable_timer, 738 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 739 } 740 741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 742 743 list_for_each_entry(connector, &mode_config->connector_list, head) { 744 intel_connector = to_intel_connector(connector); 745 intel_encoder = intel_connector->encoder; 746 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 747 if (intel_encoder->hot_plug) 748 intel_encoder->hot_plug(intel_encoder); 749 if (intel_hpd_irq_event(dev, connector)) 750 changed = true; 751 } 752 } 753 mutex_unlock(&mode_config->mutex); 754 755 if (changed) 756 drm_kms_helper_hotplug_event(dev); 757 } 758 759 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 760 { 761 drm_i915_private_t *dev_priv = dev->dev_private; 762 u32 busy_up, busy_down, max_avg, min_avg; 763 u8 new_delay; 764 765 spin_lock(&mchdev_lock); 766 767 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 768 769 new_delay = dev_priv->ips.cur_delay; 770 771 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 772 busy_up = I915_READ(RCPREVBSYTUPAVG); 773 busy_down = I915_READ(RCPREVBSYTDNAVG); 774 max_avg = I915_READ(RCBMAXAVG); 775 min_avg = I915_READ(RCBMINAVG); 776 777 /* Handle RCS change request from hw */ 778 if (busy_up > max_avg) { 779 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 780 new_delay = dev_priv->ips.cur_delay - 1; 781 if (new_delay < dev_priv->ips.max_delay) 782 new_delay = dev_priv->ips.max_delay; 783 } else if (busy_down < min_avg) { 784 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 785 new_delay = dev_priv->ips.cur_delay + 1; 786 if (new_delay > dev_priv->ips.min_delay) 787 new_delay = dev_priv->ips.min_delay; 788 } 789 790 if (ironlake_set_drps(dev, new_delay)) 791 dev_priv->ips.cur_delay = new_delay; 792 793 spin_unlock(&mchdev_lock); 794 795 return; 796 } 797 798 static void notify_ring(struct drm_device *dev, 799 struct intel_ring_buffer *ring) 800 { 801 if (ring->obj == NULL) 802 return; 803 804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 805 806 wake_up_all(&ring->irq_queue); 807 i915_queue_hangcheck(dev); 808 } 809 810 static void gen6_pm_rps_work(struct work_struct *work) 811 { 812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 813 rps.work); 814 u32 pm_iir; 815 u8 new_delay; 816 817 spin_lock_irq(&dev_priv->irq_lock); 818 pm_iir = dev_priv->rps.pm_iir; 819 dev_priv->rps.pm_iir = 0; 820 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 821 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 822 spin_unlock_irq(&dev_priv->irq_lock); 823 824 /* Make sure we didn't queue anything we're not going to process. */ 825 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 826 827 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 828 return; 829 830 mutex_lock(&dev_priv->rps.hw_lock); 831 832 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 833 new_delay = dev_priv->rps.cur_delay + 1; 834 835 /* 836 * For better performance, jump directly 837 * to RPe if we're below it. 838 */ 839 if (IS_VALLEYVIEW(dev_priv->dev) && 840 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 841 new_delay = dev_priv->rps.rpe_delay; 842 } else 843 new_delay = dev_priv->rps.cur_delay - 1; 844 845 /* sysfs frequency interfaces may have snuck in while servicing the 846 * interrupt 847 */ 848 if (new_delay >= dev_priv->rps.min_delay && 849 new_delay <= dev_priv->rps.max_delay) { 850 if (IS_VALLEYVIEW(dev_priv->dev)) 851 valleyview_set_rps(dev_priv->dev, new_delay); 852 else 853 gen6_set_rps(dev_priv->dev, new_delay); 854 } 855 856 if (IS_VALLEYVIEW(dev_priv->dev)) { 857 /* 858 * On VLV, when we enter RC6 we may not be at the minimum 859 * voltage level, so arm a timer to check. It should only 860 * fire when there's activity or once after we've entered 861 * RC6, and then won't be re-armed until the next RPS interrupt. 862 */ 863 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 864 msecs_to_jiffies(100)); 865 } 866 867 mutex_unlock(&dev_priv->rps.hw_lock); 868 } 869 870 871 /** 872 * ivybridge_parity_work - Workqueue called when a parity error interrupt 873 * occurred. 874 * @work: workqueue struct 875 * 876 * Doesn't actually do anything except notify userspace. As a consequence of 877 * this event, userspace should try to remap the bad rows since statistically 878 * it is likely the same row is more likely to go bad again. 879 */ 880 static void ivybridge_parity_work(struct work_struct *work) 881 { 882 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 883 l3_parity.error_work); 884 u32 error_status, row, bank, subbank; 885 char *parity_event[5]; 886 uint32_t misccpctl; 887 unsigned long flags; 888 889 /* We must turn off DOP level clock gating to access the L3 registers. 890 * In order to prevent a get/put style interface, acquire struct mutex 891 * any time we access those registers. 892 */ 893 mutex_lock(&dev_priv->dev->struct_mutex); 894 895 misccpctl = I915_READ(GEN7_MISCCPCTL); 896 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 897 POSTING_READ(GEN7_MISCCPCTL); 898 899 error_status = I915_READ(GEN7_L3CDERRST1); 900 row = GEN7_PARITY_ERROR_ROW(error_status); 901 bank = GEN7_PARITY_ERROR_BANK(error_status); 902 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 903 904 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 905 GEN7_L3CDERRST1_ENABLE); 906 POSTING_READ(GEN7_L3CDERRST1); 907 908 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 909 910 spin_lock_irqsave(&dev_priv->irq_lock, flags); 911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 913 914 mutex_unlock(&dev_priv->dev->struct_mutex); 915 916 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 920 parity_event[4] = NULL; 921 922 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 923 KOBJ_CHANGE, parity_event); 924 925 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 926 row, bank, subbank); 927 928 kfree(parity_event[3]); 929 kfree(parity_event[2]); 930 kfree(parity_event[1]); 931 } 932 933 static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 934 { 935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 936 937 if (!HAS_L3_GPU_CACHE(dev)) 938 return; 939 940 spin_lock(&dev_priv->irq_lock); 941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 942 spin_unlock(&dev_priv->irq_lock); 943 944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 945 } 946 947 static void ilk_gt_irq_handler(struct drm_device *dev, 948 struct drm_i915_private *dev_priv, 949 u32 gt_iir) 950 { 951 if (gt_iir & 952 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 953 notify_ring(dev, &dev_priv->ring[RCS]); 954 if (gt_iir & ILK_BSD_USER_INTERRUPT) 955 notify_ring(dev, &dev_priv->ring[VCS]); 956 } 957 958 static void snb_gt_irq_handler(struct drm_device *dev, 959 struct drm_i915_private *dev_priv, 960 u32 gt_iir) 961 { 962 963 if (gt_iir & 964 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 965 notify_ring(dev, &dev_priv->ring[RCS]); 966 if (gt_iir & GT_BSD_USER_INTERRUPT) 967 notify_ring(dev, &dev_priv->ring[VCS]); 968 if (gt_iir & GT_BLT_USER_INTERRUPT) 969 notify_ring(dev, &dev_priv->ring[BCS]); 970 971 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 972 GT_BSD_CS_ERROR_INTERRUPT | 973 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 974 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 975 i915_handle_error(dev, false); 976 } 977 978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 979 ivybridge_parity_error_irq_handler(dev); 980 } 981 982 #define HPD_STORM_DETECT_PERIOD 1000 983 #define HPD_STORM_THRESHOLD 5 984 985 static inline void intel_hpd_irq_handler(struct drm_device *dev, 986 u32 hotplug_trigger, 987 const u32 *hpd) 988 { 989 drm_i915_private_t *dev_priv = dev->dev_private; 990 int i; 991 bool storm_detected = false; 992 993 if (!hotplug_trigger) 994 return; 995 996 spin_lock(&dev_priv->irq_lock); 997 for (i = 1; i < HPD_NUM_PINS; i++) { 998 999 WARN(((hpd[i] & hotplug_trigger) && 1000 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1001 "Received HPD interrupt although disabled\n"); 1002 1003 if (!(hpd[i] & hotplug_trigger) || 1004 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1005 continue; 1006 1007 dev_priv->hpd_event_bits |= (1 << i); 1008 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1009 dev_priv->hpd_stats[i].hpd_last_jiffies 1010 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1011 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1012 dev_priv->hpd_stats[i].hpd_cnt = 0; 1013 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1014 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1015 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1016 dev_priv->hpd_event_bits &= ~(1 << i); 1017 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1018 storm_detected = true; 1019 } else { 1020 dev_priv->hpd_stats[i].hpd_cnt++; 1021 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1022 dev_priv->hpd_stats[i].hpd_cnt); 1023 } 1024 } 1025 1026 if (storm_detected) 1027 dev_priv->display.hpd_irq_setup(dev); 1028 spin_unlock(&dev_priv->irq_lock); 1029 1030 queue_work(dev_priv->wq, 1031 &dev_priv->hotplug_work); 1032 } 1033 1034 static void gmbus_irq_handler(struct drm_device *dev) 1035 { 1036 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1037 1038 wake_up_all(&dev_priv->gmbus_wait_queue); 1039 } 1040 1041 static void dp_aux_irq_handler(struct drm_device *dev) 1042 { 1043 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1044 1045 wake_up_all(&dev_priv->gmbus_wait_queue); 1046 } 1047 1048 /* The RPS events need forcewake, so we add them to a work queue and mask their 1049 * IMR bits until the work is done. Other interrupts can be processed without 1050 * the work queue. */ 1051 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1052 { 1053 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1054 spin_lock(&dev_priv->irq_lock); 1055 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1056 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1057 spin_unlock(&dev_priv->irq_lock); 1058 1059 queue_work(dev_priv->wq, &dev_priv->rps.work); 1060 } 1061 1062 if (HAS_VEBOX(dev_priv->dev)) { 1063 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1064 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1065 1066 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1067 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1068 i915_handle_error(dev_priv->dev, false); 1069 } 1070 } 1071 } 1072 1073 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1074 { 1075 struct drm_device *dev = (struct drm_device *) arg; 1076 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1077 u32 iir, gt_iir, pm_iir; 1078 irqreturn_t ret = IRQ_NONE; 1079 unsigned long irqflags; 1080 int pipe; 1081 u32 pipe_stats[I915_MAX_PIPES]; 1082 1083 atomic_inc(&dev_priv->irq_received); 1084 1085 while (true) { 1086 iir = I915_READ(VLV_IIR); 1087 gt_iir = I915_READ(GTIIR); 1088 pm_iir = I915_READ(GEN6_PMIIR); 1089 1090 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1091 goto out; 1092 1093 ret = IRQ_HANDLED; 1094 1095 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1096 1097 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1098 for_each_pipe(pipe) { 1099 int reg = PIPESTAT(pipe); 1100 pipe_stats[pipe] = I915_READ(reg); 1101 1102 /* 1103 * Clear the PIPE*STAT regs before the IIR 1104 */ 1105 if (pipe_stats[pipe] & 0x8000ffff) { 1106 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1107 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1108 pipe_name(pipe)); 1109 I915_WRITE(reg, pipe_stats[pipe]); 1110 } 1111 } 1112 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1113 1114 for_each_pipe(pipe) { 1115 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1116 drm_handle_vblank(dev, pipe); 1117 1118 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1119 intel_prepare_page_flip(dev, pipe); 1120 intel_finish_page_flip(dev, pipe); 1121 } 1122 } 1123 1124 /* Consume port. Then clear IIR or we'll miss events */ 1125 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1126 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1127 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1128 1129 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1130 hotplug_status); 1131 1132 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1133 1134 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1135 I915_READ(PORT_HOTPLUG_STAT); 1136 } 1137 1138 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1139 gmbus_irq_handler(dev); 1140 1141 if (pm_iir) 1142 gen6_rps_irq_handler(dev_priv, pm_iir); 1143 1144 I915_WRITE(GTIIR, gt_iir); 1145 I915_WRITE(GEN6_PMIIR, pm_iir); 1146 I915_WRITE(VLV_IIR, iir); 1147 } 1148 1149 out: 1150 return ret; 1151 } 1152 1153 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1154 { 1155 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1156 int pipe; 1157 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1158 1159 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1160 1161 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1162 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1163 SDE_AUDIO_POWER_SHIFT); 1164 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1165 port_name(port)); 1166 } 1167 1168 if (pch_iir & SDE_AUX_MASK) 1169 dp_aux_irq_handler(dev); 1170 1171 if (pch_iir & SDE_GMBUS) 1172 gmbus_irq_handler(dev); 1173 1174 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1175 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1176 1177 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1178 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1179 1180 if (pch_iir & SDE_POISON) 1181 DRM_ERROR("PCH poison interrupt\n"); 1182 1183 if (pch_iir & SDE_FDI_MASK) 1184 for_each_pipe(pipe) 1185 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1186 pipe_name(pipe), 1187 I915_READ(FDI_RX_IIR(pipe))); 1188 1189 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1190 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1191 1192 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1193 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1194 1195 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1196 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1197 false)) 1198 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1199 1200 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1201 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1202 false)) 1203 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1204 } 1205 1206 static void ivb_err_int_handler(struct drm_device *dev) 1207 { 1208 struct drm_i915_private *dev_priv = dev->dev_private; 1209 u32 err_int = I915_READ(GEN7_ERR_INT); 1210 1211 if (err_int & ERR_INT_POISON) 1212 DRM_ERROR("Poison interrupt\n"); 1213 1214 if (err_int & ERR_INT_FIFO_UNDERRUN_A) 1215 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1216 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1217 1218 if (err_int & ERR_INT_FIFO_UNDERRUN_B) 1219 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1220 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1221 1222 if (err_int & ERR_INT_FIFO_UNDERRUN_C) 1223 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 1224 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 1225 1226 I915_WRITE(GEN7_ERR_INT, err_int); 1227 } 1228 1229 static void cpt_serr_int_handler(struct drm_device *dev) 1230 { 1231 struct drm_i915_private *dev_priv = dev->dev_private; 1232 u32 serr_int = I915_READ(SERR_INT); 1233 1234 if (serr_int & SERR_INT_POISON) 1235 DRM_ERROR("PCH poison interrupt\n"); 1236 1237 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1238 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1239 false)) 1240 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1241 1242 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1243 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1244 false)) 1245 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1246 1247 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1248 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1249 false)) 1250 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1251 1252 I915_WRITE(SERR_INT, serr_int); 1253 } 1254 1255 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1256 { 1257 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1258 int pipe; 1259 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1260 1261 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1262 1263 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1264 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1265 SDE_AUDIO_POWER_SHIFT_CPT); 1266 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1267 port_name(port)); 1268 } 1269 1270 if (pch_iir & SDE_AUX_MASK_CPT) 1271 dp_aux_irq_handler(dev); 1272 1273 if (pch_iir & SDE_GMBUS_CPT) 1274 gmbus_irq_handler(dev); 1275 1276 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1277 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1278 1279 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1280 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1281 1282 if (pch_iir & SDE_FDI_MASK_CPT) 1283 for_each_pipe(pipe) 1284 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1285 pipe_name(pipe), 1286 I915_READ(FDI_RX_IIR(pipe))); 1287 1288 if (pch_iir & SDE_ERROR_CPT) 1289 cpt_serr_int_handler(dev); 1290 } 1291 1292 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1293 { 1294 struct drm_i915_private *dev_priv = dev->dev_private; 1295 1296 if (de_iir & DE_AUX_CHANNEL_A) 1297 dp_aux_irq_handler(dev); 1298 1299 if (de_iir & DE_GSE) 1300 intel_opregion_asle_intr(dev); 1301 1302 if (de_iir & DE_PIPEA_VBLANK) 1303 drm_handle_vblank(dev, 0); 1304 1305 if (de_iir & DE_PIPEB_VBLANK) 1306 drm_handle_vblank(dev, 1); 1307 1308 if (de_iir & DE_POISON) 1309 DRM_ERROR("Poison interrupt\n"); 1310 1311 if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1312 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1313 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1314 1315 if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1316 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1317 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1318 1319 if (de_iir & DE_PLANEA_FLIP_DONE) { 1320 intel_prepare_page_flip(dev, 0); 1321 intel_finish_page_flip_plane(dev, 0); 1322 } 1323 1324 if (de_iir & DE_PLANEB_FLIP_DONE) { 1325 intel_prepare_page_flip(dev, 1); 1326 intel_finish_page_flip_plane(dev, 1); 1327 } 1328 1329 /* check event from PCH */ 1330 if (de_iir & DE_PCH_EVENT) { 1331 u32 pch_iir = I915_READ(SDEIIR); 1332 1333 if (HAS_PCH_CPT(dev)) 1334 cpt_irq_handler(dev, pch_iir); 1335 else 1336 ibx_irq_handler(dev, pch_iir); 1337 1338 /* should clear PCH hotplug event before clear CPU irq */ 1339 I915_WRITE(SDEIIR, pch_iir); 1340 } 1341 1342 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1343 ironlake_rps_change_irq_handler(dev); 1344 } 1345 1346 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1347 { 1348 struct drm_i915_private *dev_priv = dev->dev_private; 1349 int i; 1350 1351 if (de_iir & DE_ERR_INT_IVB) 1352 ivb_err_int_handler(dev); 1353 1354 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1355 dp_aux_irq_handler(dev); 1356 1357 if (de_iir & DE_GSE_IVB) 1358 intel_opregion_asle_intr(dev); 1359 1360 for (i = 0; i < 3; i++) { 1361 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1362 drm_handle_vblank(dev, i); 1363 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 1364 intel_prepare_page_flip(dev, i); 1365 intel_finish_page_flip_plane(dev, i); 1366 } 1367 } 1368 1369 /* check event from PCH */ 1370 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1371 u32 pch_iir = I915_READ(SDEIIR); 1372 1373 cpt_irq_handler(dev, pch_iir); 1374 1375 /* clear PCH hotplug event before clear CPU irq */ 1376 I915_WRITE(SDEIIR, pch_iir); 1377 } 1378 } 1379 1380 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1381 { 1382 struct drm_device *dev = (struct drm_device *) arg; 1383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1384 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1385 irqreturn_t ret = IRQ_NONE; 1386 bool err_int_reenable = false; 1387 1388 atomic_inc(&dev_priv->irq_received); 1389 1390 /* We get interrupts on unclaimed registers, so check for this before we 1391 * do any I915_{READ,WRITE}. */ 1392 intel_uncore_check_errors(dev); 1393 1394 /* disable master interrupt before clearing iir */ 1395 de_ier = I915_READ(DEIER); 1396 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1397 POSTING_READ(DEIER); 1398 1399 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1400 * interrupts will will be stored on its back queue, and then we'll be 1401 * able to process them after we restore SDEIER (as soon as we restore 1402 * it, we'll get an interrupt if SDEIIR still has something to process 1403 * due to its back queue). */ 1404 if (!HAS_PCH_NOP(dev)) { 1405 sde_ier = I915_READ(SDEIER); 1406 I915_WRITE(SDEIER, 0); 1407 POSTING_READ(SDEIER); 1408 } 1409 1410 /* On Haswell, also mask ERR_INT because we don't want to risk 1411 * generating "unclaimed register" interrupts from inside the interrupt 1412 * handler. */ 1413 if (IS_HASWELL(dev)) { 1414 spin_lock(&dev_priv->irq_lock); 1415 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; 1416 if (err_int_reenable) 1417 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1418 spin_unlock(&dev_priv->irq_lock); 1419 } 1420 1421 gt_iir = I915_READ(GTIIR); 1422 if (gt_iir) { 1423 if (INTEL_INFO(dev)->gen >= 6) 1424 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1425 else 1426 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1427 I915_WRITE(GTIIR, gt_iir); 1428 ret = IRQ_HANDLED; 1429 } 1430 1431 de_iir = I915_READ(DEIIR); 1432 if (de_iir) { 1433 if (INTEL_INFO(dev)->gen >= 7) 1434 ivb_display_irq_handler(dev, de_iir); 1435 else 1436 ilk_display_irq_handler(dev, de_iir); 1437 I915_WRITE(DEIIR, de_iir); 1438 ret = IRQ_HANDLED; 1439 } 1440 1441 if (INTEL_INFO(dev)->gen >= 6) { 1442 u32 pm_iir = I915_READ(GEN6_PMIIR); 1443 if (pm_iir) { 1444 gen6_rps_irq_handler(dev_priv, pm_iir); 1445 I915_WRITE(GEN6_PMIIR, pm_iir); 1446 ret = IRQ_HANDLED; 1447 } 1448 } 1449 1450 if (err_int_reenable) { 1451 spin_lock(&dev_priv->irq_lock); 1452 if (ivb_can_enable_err_int(dev)) 1453 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1454 spin_unlock(&dev_priv->irq_lock); 1455 } 1456 1457 I915_WRITE(DEIER, de_ier); 1458 POSTING_READ(DEIER); 1459 if (!HAS_PCH_NOP(dev)) { 1460 I915_WRITE(SDEIER, sde_ier); 1461 POSTING_READ(SDEIER); 1462 } 1463 1464 return ret; 1465 } 1466 1467 /** 1468 * i915_error_work_func - do process context error handling work 1469 * @work: work struct 1470 * 1471 * Fire an error uevent so userspace can see that a hang or error 1472 * was detected. 1473 */ 1474 static void i915_error_work_func(struct work_struct *work) 1475 { 1476 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1477 work); 1478 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1479 gpu_error); 1480 struct drm_device *dev = dev_priv->dev; 1481 struct intel_ring_buffer *ring; 1482 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1483 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1484 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1485 int i, ret; 1486 1487 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1488 1489 /* 1490 * Note that there's only one work item which does gpu resets, so we 1491 * need not worry about concurrent gpu resets potentially incrementing 1492 * error->reset_counter twice. We only need to take care of another 1493 * racing irq/hangcheck declaring the gpu dead for a second time. A 1494 * quick check for that is good enough: schedule_work ensures the 1495 * correct ordering between hang detection and this work item, and since 1496 * the reset in-progress bit is only ever set by code outside of this 1497 * work we don't need to worry about any other races. 1498 */ 1499 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1500 DRM_DEBUG_DRIVER("resetting chip\n"); 1501 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1502 reset_event); 1503 1504 ret = i915_reset(dev); 1505 1506 if (ret == 0) { 1507 /* 1508 * After all the gem state is reset, increment the reset 1509 * counter and wake up everyone waiting for the reset to 1510 * complete. 1511 * 1512 * Since unlock operations are a one-sided barrier only, 1513 * we need to insert a barrier here to order any seqno 1514 * updates before 1515 * the counter increment. 1516 */ 1517 smp_mb__before_atomic_inc(); 1518 atomic_inc(&dev_priv->gpu_error.reset_counter); 1519 1520 kobject_uevent_env(&dev->primary->kdev.kobj, 1521 KOBJ_CHANGE, reset_done_event); 1522 } else { 1523 atomic_set(&error->reset_counter, I915_WEDGED); 1524 } 1525 1526 for_each_ring(ring, dev_priv, i) 1527 wake_up_all(&ring->irq_queue); 1528 1529 intel_display_handle_reset(dev); 1530 1531 wake_up_all(&dev_priv->gpu_error.reset_queue); 1532 } 1533 } 1534 1535 static void i915_report_and_clear_eir(struct drm_device *dev) 1536 { 1537 struct drm_i915_private *dev_priv = dev->dev_private; 1538 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1539 u32 eir = I915_READ(EIR); 1540 int pipe, i; 1541 1542 if (!eir) 1543 return; 1544 1545 pr_err("render error detected, EIR: 0x%08x\n", eir); 1546 1547 i915_get_extra_instdone(dev, instdone); 1548 1549 if (IS_G4X(dev)) { 1550 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1551 u32 ipeir = I915_READ(IPEIR_I965); 1552 1553 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1554 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1555 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1556 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1557 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1558 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1559 I915_WRITE(IPEIR_I965, ipeir); 1560 POSTING_READ(IPEIR_I965); 1561 } 1562 if (eir & GM45_ERROR_PAGE_TABLE) { 1563 u32 pgtbl_err = I915_READ(PGTBL_ER); 1564 pr_err("page table error\n"); 1565 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1566 I915_WRITE(PGTBL_ER, pgtbl_err); 1567 POSTING_READ(PGTBL_ER); 1568 } 1569 } 1570 1571 if (!IS_GEN2(dev)) { 1572 if (eir & I915_ERROR_PAGE_TABLE) { 1573 u32 pgtbl_err = I915_READ(PGTBL_ER); 1574 pr_err("page table error\n"); 1575 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1576 I915_WRITE(PGTBL_ER, pgtbl_err); 1577 POSTING_READ(PGTBL_ER); 1578 } 1579 } 1580 1581 if (eir & I915_ERROR_MEMORY_REFRESH) { 1582 pr_err("memory refresh error:\n"); 1583 for_each_pipe(pipe) 1584 pr_err("pipe %c stat: 0x%08x\n", 1585 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1586 /* pipestat has already been acked */ 1587 } 1588 if (eir & I915_ERROR_INSTRUCTION) { 1589 pr_err("instruction error\n"); 1590 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1591 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1592 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1593 if (INTEL_INFO(dev)->gen < 4) { 1594 u32 ipeir = I915_READ(IPEIR); 1595 1596 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1597 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1598 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1599 I915_WRITE(IPEIR, ipeir); 1600 POSTING_READ(IPEIR); 1601 } else { 1602 u32 ipeir = I915_READ(IPEIR_I965); 1603 1604 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1605 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1606 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1607 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1608 I915_WRITE(IPEIR_I965, ipeir); 1609 POSTING_READ(IPEIR_I965); 1610 } 1611 } 1612 1613 I915_WRITE(EIR, eir); 1614 POSTING_READ(EIR); 1615 eir = I915_READ(EIR); 1616 if (eir) { 1617 /* 1618 * some errors might have become stuck, 1619 * mask them. 1620 */ 1621 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1622 I915_WRITE(EMR, I915_READ(EMR) | eir); 1623 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1624 } 1625 } 1626 1627 /** 1628 * i915_handle_error - handle an error interrupt 1629 * @dev: drm device 1630 * 1631 * Do some basic checking of regsiter state at error interrupt time and 1632 * dump it to the syslog. Also call i915_capture_error_state() to make 1633 * sure we get a record and make it available in debugfs. Fire a uevent 1634 * so userspace knows something bad happened (should trigger collection 1635 * of a ring dump etc.). 1636 */ 1637 void i915_handle_error(struct drm_device *dev, bool wedged) 1638 { 1639 struct drm_i915_private *dev_priv = dev->dev_private; 1640 struct intel_ring_buffer *ring; 1641 int i; 1642 1643 i915_capture_error_state(dev); 1644 i915_report_and_clear_eir(dev); 1645 1646 if (wedged) { 1647 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1648 &dev_priv->gpu_error.reset_counter); 1649 1650 /* 1651 * Wakeup waiting processes so that the reset work item 1652 * doesn't deadlock trying to grab various locks. 1653 */ 1654 for_each_ring(ring, dev_priv, i) 1655 wake_up_all(&ring->irq_queue); 1656 } 1657 1658 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1659 } 1660 1661 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1662 { 1663 drm_i915_private_t *dev_priv = dev->dev_private; 1664 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1665 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1666 struct drm_i915_gem_object *obj; 1667 struct intel_unpin_work *work; 1668 unsigned long flags; 1669 bool stall_detected; 1670 1671 /* Ignore early vblank irqs */ 1672 if (intel_crtc == NULL) 1673 return; 1674 1675 spin_lock_irqsave(&dev->event_lock, flags); 1676 work = intel_crtc->unpin_work; 1677 1678 if (work == NULL || 1679 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1680 !work->enable_stall_check) { 1681 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1682 spin_unlock_irqrestore(&dev->event_lock, flags); 1683 return; 1684 } 1685 1686 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1687 obj = work->pending_flip_obj; 1688 if (INTEL_INFO(dev)->gen >= 4) { 1689 int dspsurf = DSPSURF(intel_crtc->plane); 1690 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1691 i915_gem_obj_ggtt_offset(obj); 1692 } else { 1693 int dspaddr = DSPADDR(intel_crtc->plane); 1694 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 1695 crtc->y * crtc->fb->pitches[0] + 1696 crtc->x * crtc->fb->bits_per_pixel/8); 1697 } 1698 1699 spin_unlock_irqrestore(&dev->event_lock, flags); 1700 1701 if (stall_detected) { 1702 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1703 intel_prepare_page_flip(dev, intel_crtc->plane); 1704 } 1705 } 1706 1707 /* Called from drm generic code, passed 'crtc' which 1708 * we use as a pipe index 1709 */ 1710 static int i915_enable_vblank(struct drm_device *dev, int pipe) 1711 { 1712 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1713 unsigned long irqflags; 1714 1715 if (!i915_pipe_enabled(dev, pipe)) 1716 return -EINVAL; 1717 1718 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1719 if (INTEL_INFO(dev)->gen >= 4) 1720 i915_enable_pipestat(dev_priv, pipe, 1721 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1722 else 1723 i915_enable_pipestat(dev_priv, pipe, 1724 PIPE_VBLANK_INTERRUPT_ENABLE); 1725 1726 /* maintain vblank delivery even in deep C-states */ 1727 if (dev_priv->info->gen == 3) 1728 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1729 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1730 1731 return 0; 1732 } 1733 1734 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1735 { 1736 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1737 unsigned long irqflags; 1738 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1739 DE_PIPE_VBLANK_ILK(pipe); 1740 1741 if (!i915_pipe_enabled(dev, pipe)) 1742 return -EINVAL; 1743 1744 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1745 ironlake_enable_display_irq(dev_priv, bit); 1746 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1747 1748 return 0; 1749 } 1750 1751 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1752 { 1753 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1754 unsigned long irqflags; 1755 u32 imr; 1756 1757 if (!i915_pipe_enabled(dev, pipe)) 1758 return -EINVAL; 1759 1760 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1761 imr = I915_READ(VLV_IMR); 1762 if (pipe == 0) 1763 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1764 else 1765 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1766 I915_WRITE(VLV_IMR, imr); 1767 i915_enable_pipestat(dev_priv, pipe, 1768 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1769 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1770 1771 return 0; 1772 } 1773 1774 /* Called from drm generic code, passed 'crtc' which 1775 * we use as a pipe index 1776 */ 1777 static void i915_disable_vblank(struct drm_device *dev, int pipe) 1778 { 1779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1780 unsigned long irqflags; 1781 1782 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1783 if (dev_priv->info->gen == 3) 1784 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1785 1786 i915_disable_pipestat(dev_priv, pipe, 1787 PIPE_VBLANK_INTERRUPT_ENABLE | 1788 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1789 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1790 } 1791 1792 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1793 { 1794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1795 unsigned long irqflags; 1796 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1797 DE_PIPE_VBLANK_ILK(pipe); 1798 1799 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1800 ironlake_disable_display_irq(dev_priv, bit); 1801 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1802 } 1803 1804 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1805 { 1806 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1807 unsigned long irqflags; 1808 u32 imr; 1809 1810 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1811 i915_disable_pipestat(dev_priv, pipe, 1812 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1813 imr = I915_READ(VLV_IMR); 1814 if (pipe == 0) 1815 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1816 else 1817 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1818 I915_WRITE(VLV_IMR, imr); 1819 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1820 } 1821 1822 static u32 1823 ring_last_seqno(struct intel_ring_buffer *ring) 1824 { 1825 return list_entry(ring->request_list.prev, 1826 struct drm_i915_gem_request, list)->seqno; 1827 } 1828 1829 static bool 1830 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 1831 { 1832 return (list_empty(&ring->request_list) || 1833 i915_seqno_passed(seqno, ring_last_seqno(ring))); 1834 } 1835 1836 static struct intel_ring_buffer * 1837 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 1838 { 1839 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1840 u32 cmd, ipehr, acthd, acthd_min; 1841 1842 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 1843 if ((ipehr & ~(0x3 << 16)) != 1844 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 1845 return NULL; 1846 1847 /* ACTHD is likely pointing to the dword after the actual command, 1848 * so scan backwards until we find the MBOX. 1849 */ 1850 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 1851 acthd_min = max((int)acthd - 3 * 4, 0); 1852 do { 1853 cmd = ioread32(ring->virtual_start + acthd); 1854 if (cmd == ipehr) 1855 break; 1856 1857 acthd -= 4; 1858 if (acthd < acthd_min) 1859 return NULL; 1860 } while (1); 1861 1862 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 1863 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 1864 } 1865 1866 static int semaphore_passed(struct intel_ring_buffer *ring) 1867 { 1868 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1869 struct intel_ring_buffer *signaller; 1870 u32 seqno, ctl; 1871 1872 ring->hangcheck.deadlock = true; 1873 1874 signaller = semaphore_waits_for(ring, &seqno); 1875 if (signaller == NULL || signaller->hangcheck.deadlock) 1876 return -1; 1877 1878 /* cursory check for an unkickable deadlock */ 1879 ctl = I915_READ_CTL(signaller); 1880 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 1881 return -1; 1882 1883 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 1884 } 1885 1886 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 1887 { 1888 struct intel_ring_buffer *ring; 1889 int i; 1890 1891 for_each_ring(ring, dev_priv, i) 1892 ring->hangcheck.deadlock = false; 1893 } 1894 1895 static enum intel_ring_hangcheck_action 1896 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 1897 { 1898 struct drm_device *dev = ring->dev; 1899 struct drm_i915_private *dev_priv = dev->dev_private; 1900 u32 tmp; 1901 1902 if (ring->hangcheck.acthd != acthd) 1903 return HANGCHECK_ACTIVE; 1904 1905 if (IS_GEN2(dev)) 1906 return HANGCHECK_HUNG; 1907 1908 /* Is the chip hanging on a WAIT_FOR_EVENT? 1909 * If so we can simply poke the RB_WAIT bit 1910 * and break the hang. This should work on 1911 * all but the second generation chipsets. 1912 */ 1913 tmp = I915_READ_CTL(ring); 1914 if (tmp & RING_WAIT) { 1915 DRM_ERROR("Kicking stuck wait on %s\n", 1916 ring->name); 1917 I915_WRITE_CTL(ring, tmp); 1918 return HANGCHECK_KICK; 1919 } 1920 1921 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 1922 switch (semaphore_passed(ring)) { 1923 default: 1924 return HANGCHECK_HUNG; 1925 case 1: 1926 DRM_ERROR("Kicking stuck semaphore on %s\n", 1927 ring->name); 1928 I915_WRITE_CTL(ring, tmp); 1929 return HANGCHECK_KICK; 1930 case 0: 1931 return HANGCHECK_WAIT; 1932 } 1933 } 1934 1935 return HANGCHECK_HUNG; 1936 } 1937 1938 /** 1939 * This is called when the chip hasn't reported back with completed 1940 * batchbuffers in a long time. We keep track per ring seqno progress and 1941 * if there are no progress, hangcheck score for that ring is increased. 1942 * Further, acthd is inspected to see if the ring is stuck. On stuck case 1943 * we kick the ring. If we see no progress on three subsequent calls 1944 * we assume chip is wedged and try to fix it by resetting the chip. 1945 */ 1946 static void i915_hangcheck_elapsed(unsigned long data) 1947 { 1948 struct drm_device *dev = (struct drm_device *)data; 1949 drm_i915_private_t *dev_priv = dev->dev_private; 1950 struct intel_ring_buffer *ring; 1951 int i; 1952 int busy_count = 0, rings_hung = 0; 1953 bool stuck[I915_NUM_RINGS] = { 0 }; 1954 #define BUSY 1 1955 #define KICK 5 1956 #define HUNG 20 1957 #define FIRE 30 1958 1959 if (!i915_enable_hangcheck) 1960 return; 1961 1962 for_each_ring(ring, dev_priv, i) { 1963 u32 seqno, acthd; 1964 bool busy = true; 1965 1966 semaphore_clear_deadlocks(dev_priv); 1967 1968 seqno = ring->get_seqno(ring, false); 1969 acthd = intel_ring_get_active_head(ring); 1970 1971 if (ring->hangcheck.seqno == seqno) { 1972 if (ring_idle(ring, seqno)) { 1973 if (waitqueue_active(&ring->irq_queue)) { 1974 /* Issue a wake-up to catch stuck h/w. */ 1975 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1976 ring->name); 1977 wake_up_all(&ring->irq_queue); 1978 ring->hangcheck.score += HUNG; 1979 } else 1980 busy = false; 1981 } else { 1982 /* We always increment the hangcheck score 1983 * if the ring is busy and still processing 1984 * the same request, so that no single request 1985 * can run indefinitely (such as a chain of 1986 * batches). The only time we do not increment 1987 * the hangcheck score on this ring, if this 1988 * ring is in a legitimate wait for another 1989 * ring. In that case the waiting ring is a 1990 * victim and we want to be sure we catch the 1991 * right culprit. Then every time we do kick 1992 * the ring, add a small increment to the 1993 * score so that we can catch a batch that is 1994 * being repeatedly kicked and so responsible 1995 * for stalling the machine. 1996 */ 1997 ring->hangcheck.action = ring_stuck(ring, 1998 acthd); 1999 2000 switch (ring->hangcheck.action) { 2001 case HANGCHECK_WAIT: 2002 break; 2003 case HANGCHECK_ACTIVE: 2004 ring->hangcheck.score += BUSY; 2005 break; 2006 case HANGCHECK_KICK: 2007 ring->hangcheck.score += KICK; 2008 break; 2009 case HANGCHECK_HUNG: 2010 ring->hangcheck.score += HUNG; 2011 stuck[i] = true; 2012 break; 2013 } 2014 } 2015 } else { 2016 /* Gradually reduce the count so that we catch DoS 2017 * attempts across multiple batches. 2018 */ 2019 if (ring->hangcheck.score > 0) 2020 ring->hangcheck.score--; 2021 } 2022 2023 ring->hangcheck.seqno = seqno; 2024 ring->hangcheck.acthd = acthd; 2025 busy_count += busy; 2026 } 2027 2028 for_each_ring(ring, dev_priv, i) { 2029 if (ring->hangcheck.score > FIRE) { 2030 DRM_ERROR("%s on %s\n", 2031 stuck[i] ? "stuck" : "no progress", 2032 ring->name); 2033 rings_hung++; 2034 } 2035 } 2036 2037 if (rings_hung) 2038 return i915_handle_error(dev, true); 2039 2040 if (busy_count) 2041 /* Reset timer case chip hangs without another request 2042 * being added */ 2043 i915_queue_hangcheck(dev); 2044 } 2045 2046 void i915_queue_hangcheck(struct drm_device *dev) 2047 { 2048 struct drm_i915_private *dev_priv = dev->dev_private; 2049 if (!i915_enable_hangcheck) 2050 return; 2051 2052 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2053 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2054 } 2055 2056 static void ibx_irq_preinstall(struct drm_device *dev) 2057 { 2058 struct drm_i915_private *dev_priv = dev->dev_private; 2059 2060 if (HAS_PCH_NOP(dev)) 2061 return; 2062 2063 /* south display irq */ 2064 I915_WRITE(SDEIMR, 0xffffffff); 2065 /* 2066 * SDEIER is also touched by the interrupt handler to work around missed 2067 * PCH interrupts. Hence we can't update it after the interrupt handler 2068 * is enabled - instead we unconditionally enable all PCH interrupt 2069 * sources here, but then only unmask them as needed with SDEIMR. 2070 */ 2071 I915_WRITE(SDEIER, 0xffffffff); 2072 POSTING_READ(SDEIER); 2073 } 2074 2075 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2076 { 2077 struct drm_i915_private *dev_priv = dev->dev_private; 2078 2079 /* and GT */ 2080 I915_WRITE(GTIMR, 0xffffffff); 2081 I915_WRITE(GTIER, 0x0); 2082 POSTING_READ(GTIER); 2083 2084 if (INTEL_INFO(dev)->gen >= 6) { 2085 /* and PM */ 2086 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2087 I915_WRITE(GEN6_PMIER, 0x0); 2088 POSTING_READ(GEN6_PMIER); 2089 } 2090 } 2091 2092 /* drm_dma.h hooks 2093 */ 2094 static void ironlake_irq_preinstall(struct drm_device *dev) 2095 { 2096 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2097 2098 atomic_set(&dev_priv->irq_received, 0); 2099 2100 I915_WRITE(HWSTAM, 0xeffe); 2101 2102 I915_WRITE(DEIMR, 0xffffffff); 2103 I915_WRITE(DEIER, 0x0); 2104 POSTING_READ(DEIER); 2105 2106 gen5_gt_irq_preinstall(dev); 2107 2108 ibx_irq_preinstall(dev); 2109 } 2110 2111 static void valleyview_irq_preinstall(struct drm_device *dev) 2112 { 2113 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2114 int pipe; 2115 2116 atomic_set(&dev_priv->irq_received, 0); 2117 2118 /* VLV magic */ 2119 I915_WRITE(VLV_IMR, 0); 2120 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2121 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2122 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2123 2124 /* and GT */ 2125 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2126 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2127 2128 gen5_gt_irq_preinstall(dev); 2129 2130 I915_WRITE(DPINVGTT, 0xff); 2131 2132 I915_WRITE(PORT_HOTPLUG_EN, 0); 2133 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2134 for_each_pipe(pipe) 2135 I915_WRITE(PIPESTAT(pipe), 0xffff); 2136 I915_WRITE(VLV_IIR, 0xffffffff); 2137 I915_WRITE(VLV_IMR, 0xffffffff); 2138 I915_WRITE(VLV_IER, 0x0); 2139 POSTING_READ(VLV_IER); 2140 } 2141 2142 static void ibx_hpd_irq_setup(struct drm_device *dev) 2143 { 2144 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2145 struct drm_mode_config *mode_config = &dev->mode_config; 2146 struct intel_encoder *intel_encoder; 2147 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2148 2149 if (HAS_PCH_IBX(dev)) { 2150 hotplug_irqs = SDE_HOTPLUG_MASK; 2151 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2152 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2153 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2154 } else { 2155 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2156 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2157 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2158 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2159 } 2160 2161 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2162 2163 /* 2164 * Enable digital hotplug on the PCH, and configure the DP short pulse 2165 * duration to 2ms (which is the minimum in the Display Port spec) 2166 * 2167 * This register is the same on all known PCH chips. 2168 */ 2169 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2170 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2171 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2172 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2173 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2174 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2175 } 2176 2177 static void ibx_irq_postinstall(struct drm_device *dev) 2178 { 2179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2180 u32 mask; 2181 2182 if (HAS_PCH_NOP(dev)) 2183 return; 2184 2185 if (HAS_PCH_IBX(dev)) { 2186 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2187 SDE_TRANSA_FIFO_UNDER | SDE_POISON; 2188 } else { 2189 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 2190 2191 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2192 } 2193 2194 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2195 I915_WRITE(SDEIMR, ~mask); 2196 } 2197 2198 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2199 { 2200 struct drm_i915_private *dev_priv = dev->dev_private; 2201 u32 pm_irqs, gt_irqs; 2202 2203 pm_irqs = gt_irqs = 0; 2204 2205 dev_priv->gt_irq_mask = ~0; 2206 if (HAS_L3_GPU_CACHE(dev)) { 2207 /* L3 parity interrupt is always unmasked. */ 2208 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2209 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2210 } 2211 2212 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2213 if (IS_GEN5(dev)) { 2214 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2215 ILK_BSD_USER_INTERRUPT; 2216 } else { 2217 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2218 } 2219 2220 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2221 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2222 I915_WRITE(GTIER, gt_irqs); 2223 POSTING_READ(GTIER); 2224 2225 if (INTEL_INFO(dev)->gen >= 6) { 2226 pm_irqs |= GEN6_PM_RPS_EVENTS; 2227 2228 if (HAS_VEBOX(dev)) 2229 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2230 2231 dev_priv->pm_irq_mask = 0xffffffff; 2232 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2233 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2234 I915_WRITE(GEN6_PMIER, pm_irqs); 2235 POSTING_READ(GEN6_PMIER); 2236 } 2237 } 2238 2239 static int ironlake_irq_postinstall(struct drm_device *dev) 2240 { 2241 unsigned long irqflags; 2242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2243 u32 display_mask, extra_mask; 2244 2245 if (INTEL_INFO(dev)->gen >= 7) { 2246 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2247 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2248 DE_PLANEB_FLIP_DONE_IVB | 2249 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 2250 DE_ERR_INT_IVB); 2251 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2252 DE_PIPEA_VBLANK_IVB); 2253 2254 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2255 } else { 2256 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2257 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2258 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2259 DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 2260 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2261 } 2262 2263 dev_priv->irq_mask = ~display_mask; 2264 2265 /* should always can generate irq */ 2266 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2267 I915_WRITE(DEIMR, dev_priv->irq_mask); 2268 I915_WRITE(DEIER, display_mask | extra_mask); 2269 POSTING_READ(DEIER); 2270 2271 gen5_gt_irq_postinstall(dev); 2272 2273 ibx_irq_postinstall(dev); 2274 2275 if (IS_IRONLAKE_M(dev)) { 2276 /* Enable PCU event interrupts 2277 * 2278 * spinlocking not required here for correctness since interrupt 2279 * setup is guaranteed to run in single-threaded context. But we 2280 * need it to make the assert_spin_locked happy. */ 2281 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2282 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2283 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2284 } 2285 2286 return 0; 2287 } 2288 2289 static int valleyview_irq_postinstall(struct drm_device *dev) 2290 { 2291 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2292 u32 enable_mask; 2293 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2294 unsigned long irqflags; 2295 2296 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2297 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2298 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2299 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2300 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2301 2302 /* 2303 *Leave vblank interrupts masked initially. enable/disable will 2304 * toggle them based on usage. 2305 */ 2306 dev_priv->irq_mask = (~enable_mask) | 2307 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2308 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2309 2310 I915_WRITE(PORT_HOTPLUG_EN, 0); 2311 POSTING_READ(PORT_HOTPLUG_EN); 2312 2313 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2314 I915_WRITE(VLV_IER, enable_mask); 2315 I915_WRITE(VLV_IIR, 0xffffffff); 2316 I915_WRITE(PIPESTAT(0), 0xffff); 2317 I915_WRITE(PIPESTAT(1), 0xffff); 2318 POSTING_READ(VLV_IER); 2319 2320 /* Interrupt setup is already guaranteed to be single-threaded, this is 2321 * just to make the assert_spin_locked check happy. */ 2322 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2323 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2324 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2325 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2326 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2327 2328 I915_WRITE(VLV_IIR, 0xffffffff); 2329 I915_WRITE(VLV_IIR, 0xffffffff); 2330 2331 gen5_gt_irq_postinstall(dev); 2332 2333 /* ack & enable invalid PTE error interrupts */ 2334 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2335 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2336 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2337 #endif 2338 2339 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2340 2341 return 0; 2342 } 2343 2344 static void valleyview_irq_uninstall(struct drm_device *dev) 2345 { 2346 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2347 int pipe; 2348 2349 if (!dev_priv) 2350 return; 2351 2352 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2353 2354 for_each_pipe(pipe) 2355 I915_WRITE(PIPESTAT(pipe), 0xffff); 2356 2357 I915_WRITE(HWSTAM, 0xffffffff); 2358 I915_WRITE(PORT_HOTPLUG_EN, 0); 2359 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2360 for_each_pipe(pipe) 2361 I915_WRITE(PIPESTAT(pipe), 0xffff); 2362 I915_WRITE(VLV_IIR, 0xffffffff); 2363 I915_WRITE(VLV_IMR, 0xffffffff); 2364 I915_WRITE(VLV_IER, 0x0); 2365 POSTING_READ(VLV_IER); 2366 } 2367 2368 static void ironlake_irq_uninstall(struct drm_device *dev) 2369 { 2370 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2371 2372 if (!dev_priv) 2373 return; 2374 2375 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2376 2377 I915_WRITE(HWSTAM, 0xffffffff); 2378 2379 I915_WRITE(DEIMR, 0xffffffff); 2380 I915_WRITE(DEIER, 0x0); 2381 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2382 if (IS_GEN7(dev)) 2383 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2384 2385 I915_WRITE(GTIMR, 0xffffffff); 2386 I915_WRITE(GTIER, 0x0); 2387 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2388 2389 if (HAS_PCH_NOP(dev)) 2390 return; 2391 2392 I915_WRITE(SDEIMR, 0xffffffff); 2393 I915_WRITE(SDEIER, 0x0); 2394 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2395 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 2396 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2397 } 2398 2399 static void i8xx_irq_preinstall(struct drm_device * dev) 2400 { 2401 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2402 int pipe; 2403 2404 atomic_set(&dev_priv->irq_received, 0); 2405 2406 for_each_pipe(pipe) 2407 I915_WRITE(PIPESTAT(pipe), 0); 2408 I915_WRITE16(IMR, 0xffff); 2409 I915_WRITE16(IER, 0x0); 2410 POSTING_READ16(IER); 2411 } 2412 2413 static int i8xx_irq_postinstall(struct drm_device *dev) 2414 { 2415 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2416 2417 I915_WRITE16(EMR, 2418 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2419 2420 /* Unmask the interrupts that we always want on. */ 2421 dev_priv->irq_mask = 2422 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2423 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2424 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2425 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2426 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2427 I915_WRITE16(IMR, dev_priv->irq_mask); 2428 2429 I915_WRITE16(IER, 2430 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2431 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2432 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2433 I915_USER_INTERRUPT); 2434 POSTING_READ16(IER); 2435 2436 return 0; 2437 } 2438 2439 /* 2440 * Returns true when a page flip has completed. 2441 */ 2442 static bool i8xx_handle_vblank(struct drm_device *dev, 2443 int pipe, u16 iir) 2444 { 2445 drm_i915_private_t *dev_priv = dev->dev_private; 2446 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 2447 2448 if (!drm_handle_vblank(dev, pipe)) 2449 return false; 2450 2451 if ((iir & flip_pending) == 0) 2452 return false; 2453 2454 intel_prepare_page_flip(dev, pipe); 2455 2456 /* We detect FlipDone by looking for the change in PendingFlip from '1' 2457 * to '0' on the following vblank, i.e. IIR has the Pendingflip 2458 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 2459 * the flip is completed (no longer pending). Since this doesn't raise 2460 * an interrupt per se, we watch for the change at vblank. 2461 */ 2462 if (I915_READ16(ISR) & flip_pending) 2463 return false; 2464 2465 intel_finish_page_flip(dev, pipe); 2466 2467 return true; 2468 } 2469 2470 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2471 { 2472 struct drm_device *dev = (struct drm_device *) arg; 2473 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2474 u16 iir, new_iir; 2475 u32 pipe_stats[2]; 2476 unsigned long irqflags; 2477 int pipe; 2478 u16 flip_mask = 2479 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2480 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2481 2482 atomic_inc(&dev_priv->irq_received); 2483 2484 iir = I915_READ16(IIR); 2485 if (iir == 0) 2486 return IRQ_NONE; 2487 2488 while (iir & ~flip_mask) { 2489 /* Can't rely on pipestat interrupt bit in iir as it might 2490 * have been cleared after the pipestat interrupt was received. 2491 * It doesn't set the bit in iir again, but it still produces 2492 * interrupts (for non-MSI). 2493 */ 2494 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2495 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2496 i915_handle_error(dev, false); 2497 2498 for_each_pipe(pipe) { 2499 int reg = PIPESTAT(pipe); 2500 pipe_stats[pipe] = I915_READ(reg); 2501 2502 /* 2503 * Clear the PIPE*STAT regs before the IIR 2504 */ 2505 if (pipe_stats[pipe] & 0x8000ffff) { 2506 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2507 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2508 pipe_name(pipe)); 2509 I915_WRITE(reg, pipe_stats[pipe]); 2510 } 2511 } 2512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2513 2514 I915_WRITE16(IIR, iir & ~flip_mask); 2515 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2516 2517 i915_update_dri1_breadcrumb(dev); 2518 2519 if (iir & I915_USER_INTERRUPT) 2520 notify_ring(dev, &dev_priv->ring[RCS]); 2521 2522 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2523 i8xx_handle_vblank(dev, 0, iir)) 2524 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2525 2526 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2527 i8xx_handle_vblank(dev, 1, iir)) 2528 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2529 2530 iir = new_iir; 2531 } 2532 2533 return IRQ_HANDLED; 2534 } 2535 2536 static void i8xx_irq_uninstall(struct drm_device * dev) 2537 { 2538 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2539 int pipe; 2540 2541 for_each_pipe(pipe) { 2542 /* Clear enable bits; then clear status bits */ 2543 I915_WRITE(PIPESTAT(pipe), 0); 2544 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2545 } 2546 I915_WRITE16(IMR, 0xffff); 2547 I915_WRITE16(IER, 0x0); 2548 I915_WRITE16(IIR, I915_READ16(IIR)); 2549 } 2550 2551 static void i915_irq_preinstall(struct drm_device * dev) 2552 { 2553 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2554 int pipe; 2555 2556 atomic_set(&dev_priv->irq_received, 0); 2557 2558 if (I915_HAS_HOTPLUG(dev)) { 2559 I915_WRITE(PORT_HOTPLUG_EN, 0); 2560 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2561 } 2562 2563 I915_WRITE16(HWSTAM, 0xeffe); 2564 for_each_pipe(pipe) 2565 I915_WRITE(PIPESTAT(pipe), 0); 2566 I915_WRITE(IMR, 0xffffffff); 2567 I915_WRITE(IER, 0x0); 2568 POSTING_READ(IER); 2569 } 2570 2571 static int i915_irq_postinstall(struct drm_device *dev) 2572 { 2573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2574 u32 enable_mask; 2575 2576 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2577 2578 /* Unmask the interrupts that we always want on. */ 2579 dev_priv->irq_mask = 2580 ~(I915_ASLE_INTERRUPT | 2581 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2582 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2583 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2584 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2585 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2586 2587 enable_mask = 2588 I915_ASLE_INTERRUPT | 2589 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2590 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2591 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2592 I915_USER_INTERRUPT; 2593 2594 if (I915_HAS_HOTPLUG(dev)) { 2595 I915_WRITE(PORT_HOTPLUG_EN, 0); 2596 POSTING_READ(PORT_HOTPLUG_EN); 2597 2598 /* Enable in IER... */ 2599 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2600 /* and unmask in IMR */ 2601 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2602 } 2603 2604 I915_WRITE(IMR, dev_priv->irq_mask); 2605 I915_WRITE(IER, enable_mask); 2606 POSTING_READ(IER); 2607 2608 i915_enable_asle_pipestat(dev); 2609 2610 return 0; 2611 } 2612 2613 /* 2614 * Returns true when a page flip has completed. 2615 */ 2616 static bool i915_handle_vblank(struct drm_device *dev, 2617 int plane, int pipe, u32 iir) 2618 { 2619 drm_i915_private_t *dev_priv = dev->dev_private; 2620 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 2621 2622 if (!drm_handle_vblank(dev, pipe)) 2623 return false; 2624 2625 if ((iir & flip_pending) == 0) 2626 return false; 2627 2628 intel_prepare_page_flip(dev, plane); 2629 2630 /* We detect FlipDone by looking for the change in PendingFlip from '1' 2631 * to '0' on the following vblank, i.e. IIR has the Pendingflip 2632 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 2633 * the flip is completed (no longer pending). Since this doesn't raise 2634 * an interrupt per se, we watch for the change at vblank. 2635 */ 2636 if (I915_READ(ISR) & flip_pending) 2637 return false; 2638 2639 intel_finish_page_flip(dev, pipe); 2640 2641 return true; 2642 } 2643 2644 static irqreturn_t i915_irq_handler(int irq, void *arg) 2645 { 2646 struct drm_device *dev = (struct drm_device *) arg; 2647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2648 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2649 unsigned long irqflags; 2650 u32 flip_mask = 2651 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2652 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2653 int pipe, ret = IRQ_NONE; 2654 2655 atomic_inc(&dev_priv->irq_received); 2656 2657 iir = I915_READ(IIR); 2658 do { 2659 bool irq_received = (iir & ~flip_mask) != 0; 2660 bool blc_event = false; 2661 2662 /* Can't rely on pipestat interrupt bit in iir as it might 2663 * have been cleared after the pipestat interrupt was received. 2664 * It doesn't set the bit in iir again, but it still produces 2665 * interrupts (for non-MSI). 2666 */ 2667 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2668 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2669 i915_handle_error(dev, false); 2670 2671 for_each_pipe(pipe) { 2672 int reg = PIPESTAT(pipe); 2673 pipe_stats[pipe] = I915_READ(reg); 2674 2675 /* Clear the PIPE*STAT regs before the IIR */ 2676 if (pipe_stats[pipe] & 0x8000ffff) { 2677 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2678 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2679 pipe_name(pipe)); 2680 I915_WRITE(reg, pipe_stats[pipe]); 2681 irq_received = true; 2682 } 2683 } 2684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2685 2686 if (!irq_received) 2687 break; 2688 2689 /* Consume port. Then clear IIR or we'll miss events */ 2690 if ((I915_HAS_HOTPLUG(dev)) && 2691 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2692 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2693 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2694 2695 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2696 hotplug_status); 2697 2698 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 2699 2700 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2701 POSTING_READ(PORT_HOTPLUG_STAT); 2702 } 2703 2704 I915_WRITE(IIR, iir & ~flip_mask); 2705 new_iir = I915_READ(IIR); /* Flush posted writes */ 2706 2707 if (iir & I915_USER_INTERRUPT) 2708 notify_ring(dev, &dev_priv->ring[RCS]); 2709 2710 for_each_pipe(pipe) { 2711 int plane = pipe; 2712 if (IS_MOBILE(dev)) 2713 plane = !plane; 2714 2715 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2716 i915_handle_vblank(dev, plane, pipe, iir)) 2717 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 2718 2719 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2720 blc_event = true; 2721 } 2722 2723 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2724 intel_opregion_asle_intr(dev); 2725 2726 /* With MSI, interrupts are only generated when iir 2727 * transitions from zero to nonzero. If another bit got 2728 * set while we were handling the existing iir bits, then 2729 * we would never get another interrupt. 2730 * 2731 * This is fine on non-MSI as well, as if we hit this path 2732 * we avoid exiting the interrupt handler only to generate 2733 * another one. 2734 * 2735 * Note that for MSI this could cause a stray interrupt report 2736 * if an interrupt landed in the time between writing IIR and 2737 * the posting read. This should be rare enough to never 2738 * trigger the 99% of 100,000 interrupts test for disabling 2739 * stray interrupts. 2740 */ 2741 ret = IRQ_HANDLED; 2742 iir = new_iir; 2743 } while (iir & ~flip_mask); 2744 2745 i915_update_dri1_breadcrumb(dev); 2746 2747 return ret; 2748 } 2749 2750 static void i915_irq_uninstall(struct drm_device * dev) 2751 { 2752 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2753 int pipe; 2754 2755 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2756 2757 if (I915_HAS_HOTPLUG(dev)) { 2758 I915_WRITE(PORT_HOTPLUG_EN, 0); 2759 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2760 } 2761 2762 I915_WRITE16(HWSTAM, 0xffff); 2763 for_each_pipe(pipe) { 2764 /* Clear enable bits; then clear status bits */ 2765 I915_WRITE(PIPESTAT(pipe), 0); 2766 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2767 } 2768 I915_WRITE(IMR, 0xffffffff); 2769 I915_WRITE(IER, 0x0); 2770 2771 I915_WRITE(IIR, I915_READ(IIR)); 2772 } 2773 2774 static void i965_irq_preinstall(struct drm_device * dev) 2775 { 2776 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2777 int pipe; 2778 2779 atomic_set(&dev_priv->irq_received, 0); 2780 2781 I915_WRITE(PORT_HOTPLUG_EN, 0); 2782 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2783 2784 I915_WRITE(HWSTAM, 0xeffe); 2785 for_each_pipe(pipe) 2786 I915_WRITE(PIPESTAT(pipe), 0); 2787 I915_WRITE(IMR, 0xffffffff); 2788 I915_WRITE(IER, 0x0); 2789 POSTING_READ(IER); 2790 } 2791 2792 static int i965_irq_postinstall(struct drm_device *dev) 2793 { 2794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2795 u32 enable_mask; 2796 u32 error_mask; 2797 unsigned long irqflags; 2798 2799 /* Unmask the interrupts that we always want on. */ 2800 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2801 I915_DISPLAY_PORT_INTERRUPT | 2802 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2803 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2804 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2805 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2806 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2807 2808 enable_mask = ~dev_priv->irq_mask; 2809 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2810 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 2811 enable_mask |= I915_USER_INTERRUPT; 2812 2813 if (IS_G4X(dev)) 2814 enable_mask |= I915_BSD_USER_INTERRUPT; 2815 2816 /* Interrupt setup is already guaranteed to be single-threaded, this is 2817 * just to make the assert_spin_locked check happy. */ 2818 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2819 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2820 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2821 2822 /* 2823 * Enable some error detection, note the instruction error mask 2824 * bit is reserved, so we leave it masked. 2825 */ 2826 if (IS_G4X(dev)) { 2827 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2828 GM45_ERROR_MEM_PRIV | 2829 GM45_ERROR_CP_PRIV | 2830 I915_ERROR_MEMORY_REFRESH); 2831 } else { 2832 error_mask = ~(I915_ERROR_PAGE_TABLE | 2833 I915_ERROR_MEMORY_REFRESH); 2834 } 2835 I915_WRITE(EMR, error_mask); 2836 2837 I915_WRITE(IMR, dev_priv->irq_mask); 2838 I915_WRITE(IER, enable_mask); 2839 POSTING_READ(IER); 2840 2841 I915_WRITE(PORT_HOTPLUG_EN, 0); 2842 POSTING_READ(PORT_HOTPLUG_EN); 2843 2844 i915_enable_asle_pipestat(dev); 2845 2846 return 0; 2847 } 2848 2849 static void i915_hpd_irq_setup(struct drm_device *dev) 2850 { 2851 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2852 struct drm_mode_config *mode_config = &dev->mode_config; 2853 struct intel_encoder *intel_encoder; 2854 u32 hotplug_en; 2855 2856 assert_spin_locked(&dev_priv->irq_lock); 2857 2858 if (I915_HAS_HOTPLUG(dev)) { 2859 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2860 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 2861 /* Note HDMI and DP share hotplug bits */ 2862 /* enable bits are the same for all generations */ 2863 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2864 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2865 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 2866 /* Programming the CRT detection parameters tends 2867 to generate a spurious hotplug event about three 2868 seconds later. So just do it once. 2869 */ 2870 if (IS_G4X(dev)) 2871 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2872 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 2873 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2874 2875 /* Ignore TV since it's buggy */ 2876 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2877 } 2878 } 2879 2880 static irqreturn_t i965_irq_handler(int irq, void *arg) 2881 { 2882 struct drm_device *dev = (struct drm_device *) arg; 2883 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2884 u32 iir, new_iir; 2885 u32 pipe_stats[I915_MAX_PIPES]; 2886 unsigned long irqflags; 2887 int irq_received; 2888 int ret = IRQ_NONE, pipe; 2889 u32 flip_mask = 2890 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2891 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2892 2893 atomic_inc(&dev_priv->irq_received); 2894 2895 iir = I915_READ(IIR); 2896 2897 for (;;) { 2898 bool blc_event = false; 2899 2900 irq_received = (iir & ~flip_mask) != 0; 2901 2902 /* Can't rely on pipestat interrupt bit in iir as it might 2903 * have been cleared after the pipestat interrupt was received. 2904 * It doesn't set the bit in iir again, but it still produces 2905 * interrupts (for non-MSI). 2906 */ 2907 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2908 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2909 i915_handle_error(dev, false); 2910 2911 for_each_pipe(pipe) { 2912 int reg = PIPESTAT(pipe); 2913 pipe_stats[pipe] = I915_READ(reg); 2914 2915 /* 2916 * Clear the PIPE*STAT regs before the IIR 2917 */ 2918 if (pipe_stats[pipe] & 0x8000ffff) { 2919 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2920 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2921 pipe_name(pipe)); 2922 I915_WRITE(reg, pipe_stats[pipe]); 2923 irq_received = 1; 2924 } 2925 } 2926 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2927 2928 if (!irq_received) 2929 break; 2930 2931 ret = IRQ_HANDLED; 2932 2933 /* Consume port. Then clear IIR or we'll miss events */ 2934 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2935 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2936 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 2937 HOTPLUG_INT_STATUS_G4X : 2938 HOTPLUG_INT_STATUS_I915); 2939 2940 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2941 hotplug_status); 2942 2943 intel_hpd_irq_handler(dev, hotplug_trigger, 2944 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 2945 2946 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2947 I915_READ(PORT_HOTPLUG_STAT); 2948 } 2949 2950 I915_WRITE(IIR, iir & ~flip_mask); 2951 new_iir = I915_READ(IIR); /* Flush posted writes */ 2952 2953 if (iir & I915_USER_INTERRUPT) 2954 notify_ring(dev, &dev_priv->ring[RCS]); 2955 if (iir & I915_BSD_USER_INTERRUPT) 2956 notify_ring(dev, &dev_priv->ring[VCS]); 2957 2958 for_each_pipe(pipe) { 2959 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2960 i915_handle_vblank(dev, pipe, pipe, iir)) 2961 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 2962 2963 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2964 blc_event = true; 2965 } 2966 2967 2968 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2969 intel_opregion_asle_intr(dev); 2970 2971 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2972 gmbus_irq_handler(dev); 2973 2974 /* With MSI, interrupts are only generated when iir 2975 * transitions from zero to nonzero. If another bit got 2976 * set while we were handling the existing iir bits, then 2977 * we would never get another interrupt. 2978 * 2979 * This is fine on non-MSI as well, as if we hit this path 2980 * we avoid exiting the interrupt handler only to generate 2981 * another one. 2982 * 2983 * Note that for MSI this could cause a stray interrupt report 2984 * if an interrupt landed in the time between writing IIR and 2985 * the posting read. This should be rare enough to never 2986 * trigger the 99% of 100,000 interrupts test for disabling 2987 * stray interrupts. 2988 */ 2989 iir = new_iir; 2990 } 2991 2992 i915_update_dri1_breadcrumb(dev); 2993 2994 return ret; 2995 } 2996 2997 static void i965_irq_uninstall(struct drm_device * dev) 2998 { 2999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3000 int pipe; 3001 3002 if (!dev_priv) 3003 return; 3004 3005 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3006 3007 I915_WRITE(PORT_HOTPLUG_EN, 0); 3008 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3009 3010 I915_WRITE(HWSTAM, 0xffffffff); 3011 for_each_pipe(pipe) 3012 I915_WRITE(PIPESTAT(pipe), 0); 3013 I915_WRITE(IMR, 0xffffffff); 3014 I915_WRITE(IER, 0x0); 3015 3016 for_each_pipe(pipe) 3017 I915_WRITE(PIPESTAT(pipe), 3018 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3019 I915_WRITE(IIR, I915_READ(IIR)); 3020 } 3021 3022 static void i915_reenable_hotplug_timer_func(unsigned long data) 3023 { 3024 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3025 struct drm_device *dev = dev_priv->dev; 3026 struct drm_mode_config *mode_config = &dev->mode_config; 3027 unsigned long irqflags; 3028 int i; 3029 3030 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3031 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3032 struct drm_connector *connector; 3033 3034 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3035 continue; 3036 3037 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3038 3039 list_for_each_entry(connector, &mode_config->connector_list, head) { 3040 struct intel_connector *intel_connector = to_intel_connector(connector); 3041 3042 if (intel_connector->encoder->hpd_pin == i) { 3043 if (connector->polled != intel_connector->polled) 3044 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3045 drm_get_connector_name(connector)); 3046 connector->polled = intel_connector->polled; 3047 if (!connector->polled) 3048 connector->polled = DRM_CONNECTOR_POLL_HPD; 3049 } 3050 } 3051 } 3052 if (dev_priv->display.hpd_irq_setup) 3053 dev_priv->display.hpd_irq_setup(dev); 3054 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3055 } 3056 3057 void intel_irq_init(struct drm_device *dev) 3058 { 3059 struct drm_i915_private *dev_priv = dev->dev_private; 3060 3061 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3062 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3063 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3064 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3065 3066 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3067 i915_hangcheck_elapsed, 3068 (unsigned long) dev); 3069 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3070 (unsigned long) dev_priv); 3071 3072 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3073 3074 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3075 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3076 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3077 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3078 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3079 } 3080 3081 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3082 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3083 else 3084 dev->driver->get_vblank_timestamp = NULL; 3085 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3086 3087 if (IS_VALLEYVIEW(dev)) { 3088 dev->driver->irq_handler = valleyview_irq_handler; 3089 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3090 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3091 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3092 dev->driver->enable_vblank = valleyview_enable_vblank; 3093 dev->driver->disable_vblank = valleyview_disable_vblank; 3094 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3095 } else if (HAS_PCH_SPLIT(dev)) { 3096 dev->driver->irq_handler = ironlake_irq_handler; 3097 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3098 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3099 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3100 dev->driver->enable_vblank = ironlake_enable_vblank; 3101 dev->driver->disable_vblank = ironlake_disable_vblank; 3102 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3103 } else { 3104 if (INTEL_INFO(dev)->gen == 2) { 3105 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3106 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3107 dev->driver->irq_handler = i8xx_irq_handler; 3108 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3109 } else if (INTEL_INFO(dev)->gen == 3) { 3110 dev->driver->irq_preinstall = i915_irq_preinstall; 3111 dev->driver->irq_postinstall = i915_irq_postinstall; 3112 dev->driver->irq_uninstall = i915_irq_uninstall; 3113 dev->driver->irq_handler = i915_irq_handler; 3114 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3115 } else { 3116 dev->driver->irq_preinstall = i965_irq_preinstall; 3117 dev->driver->irq_postinstall = i965_irq_postinstall; 3118 dev->driver->irq_uninstall = i965_irq_uninstall; 3119 dev->driver->irq_handler = i965_irq_handler; 3120 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3121 } 3122 dev->driver->enable_vblank = i915_enable_vblank; 3123 dev->driver->disable_vblank = i915_disable_vblank; 3124 } 3125 } 3126 3127 void intel_hpd_init(struct drm_device *dev) 3128 { 3129 struct drm_i915_private *dev_priv = dev->dev_private; 3130 struct drm_mode_config *mode_config = &dev->mode_config; 3131 struct drm_connector *connector; 3132 unsigned long irqflags; 3133 int i; 3134 3135 for (i = 1; i < HPD_NUM_PINS; i++) { 3136 dev_priv->hpd_stats[i].hpd_cnt = 0; 3137 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3138 } 3139 list_for_each_entry(connector, &mode_config->connector_list, head) { 3140 struct intel_connector *intel_connector = to_intel_connector(connector); 3141 connector->polled = intel_connector->polled; 3142 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3143 connector->polled = DRM_CONNECTOR_POLL_HPD; 3144 } 3145 3146 /* Interrupt setup is already guaranteed to be single-threaded, this is 3147 * just to make the assert_spin_locked checks happy. */ 3148 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3149 if (dev_priv->display.hpd_irq_setup) 3150 dev_priv->display.hpd_irq_setup(dev); 3151 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3152 } 3153 3154 /* Disable interrupts so we can allow Package C8+. */ 3155 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3156 { 3157 struct drm_i915_private *dev_priv = dev->dev_private; 3158 unsigned long irqflags; 3159 3160 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3161 3162 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3163 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3164 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3165 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3166 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3167 3168 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3169 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3170 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3171 snb_disable_pm_irq(dev_priv, 0xffffffff); 3172 3173 dev_priv->pc8.irqs_disabled = true; 3174 3175 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3176 } 3177 3178 /* Restore interrupts so we can recover from Package C8+. */ 3179 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3180 { 3181 struct drm_i915_private *dev_priv = dev->dev_private; 3182 unsigned long irqflags; 3183 uint32_t val, expected; 3184 3185 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3186 3187 val = I915_READ(DEIMR); 3188 expected = ~DE_PCH_EVENT_IVB; 3189 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); 3190 3191 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3192 expected = ~SDE_HOTPLUG_MASK_CPT; 3193 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", 3194 val, expected); 3195 3196 val = I915_READ(GTIMR); 3197 expected = 0xffffffff; 3198 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); 3199 3200 val = I915_READ(GEN6_PMIMR); 3201 expected = 0xffffffff; 3202 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, 3203 expected); 3204 3205 dev_priv->pc8.irqs_disabled = false; 3206 3207 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3208 ibx_enable_display_interrupt(dev_priv, 3209 ~dev_priv->pc8.regsave.sdeimr & 3210 ~SDE_HOTPLUG_MASK_CPT); 3211 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3212 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3213 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3214 3215 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3216 } 3217