1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <drm/drmP.h> 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include "intel_drv.h" 38 39 static const u32 hpd_ibx[] = { 40 [HPD_CRT] = SDE_CRT_HOTPLUG, 41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45 }; 46 47 static const u32 hpd_cpt[] = { 48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53 }; 54 55 static const u32 hpd_mask_i915[] = { 56 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62 }; 63 64 static const u32 hpd_status_gen4[] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71 }; 72 73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 /* For display hotplug interrupt */ 83 static void 84 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85 { 86 assert_spin_locked(&dev_priv->irq_lock); 87 88 if ((dev_priv->irq_mask & mask) != 0) { 89 dev_priv->irq_mask &= ~mask; 90 I915_WRITE(DEIMR, dev_priv->irq_mask); 91 POSTING_READ(DEIMR); 92 } 93 } 94 95 static void 96 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 97 { 98 assert_spin_locked(&dev_priv->irq_lock); 99 100 if ((dev_priv->irq_mask & mask) != mask) { 101 dev_priv->irq_mask |= mask; 102 I915_WRITE(DEIMR, dev_priv->irq_mask); 103 POSTING_READ(DEIMR); 104 } 105 } 106 107 static bool ivb_can_enable_err_int(struct drm_device *dev) 108 { 109 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct intel_crtc *crtc; 111 enum pipe pipe; 112 113 assert_spin_locked(&dev_priv->irq_lock); 114 115 for_each_pipe(pipe) { 116 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 117 118 if (crtc->cpu_fifo_underrun_disabled) 119 return false; 120 } 121 122 return true; 123 } 124 125 static bool cpt_can_enable_serr_int(struct drm_device *dev) 126 { 127 struct drm_i915_private *dev_priv = dev->dev_private; 128 enum pipe pipe; 129 struct intel_crtc *crtc; 130 131 for_each_pipe(pipe) { 132 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 133 134 if (crtc->pch_fifo_underrun_disabled) 135 return false; 136 } 137 138 return true; 139 } 140 141 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 142 enum pipe pipe, bool enable) 143 { 144 struct drm_i915_private *dev_priv = dev->dev_private; 145 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 146 DE_PIPEB_FIFO_UNDERRUN; 147 148 if (enable) 149 ironlake_enable_display_irq(dev_priv, bit); 150 else 151 ironlake_disable_display_irq(dev_priv, bit); 152 } 153 154 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 155 bool enable) 156 { 157 struct drm_i915_private *dev_priv = dev->dev_private; 158 159 if (enable) { 160 if (!ivb_can_enable_err_int(dev)) 161 return; 162 163 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A | 164 ERR_INT_FIFO_UNDERRUN_B | 165 ERR_INT_FIFO_UNDERRUN_C); 166 167 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 168 } else { 169 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 170 } 171 } 172 173 static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, 174 bool enable) 175 { 176 struct drm_device *dev = crtc->base.dev; 177 struct drm_i915_private *dev_priv = dev->dev_private; 178 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : 179 SDE_TRANSB_FIFO_UNDER; 180 181 if (enable) 182 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit); 183 else 184 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit); 185 186 POSTING_READ(SDEIMR); 187 } 188 189 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 190 enum transcoder pch_transcoder, 191 bool enable) 192 { 193 struct drm_i915_private *dev_priv = dev->dev_private; 194 195 if (enable) { 196 if (!cpt_can_enable_serr_int(dev)) 197 return; 198 199 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN | 200 SERR_INT_TRANS_B_FIFO_UNDERRUN | 201 SERR_INT_TRANS_C_FIFO_UNDERRUN); 202 203 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT); 204 } else { 205 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT); 206 } 207 208 POSTING_READ(SDEIMR); 209 } 210 211 /** 212 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 213 * @dev: drm device 214 * @pipe: pipe 215 * @enable: true if we want to report FIFO underrun errors, false otherwise 216 * 217 * This function makes us disable or enable CPU fifo underruns for a specific 218 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 219 * reporting for one pipe may also disable all the other CPU error interruts for 220 * the other pipes, due to the fact that there's just one interrupt mask/enable 221 * bit for all the pipes. 222 * 223 * Returns the previous state of underrun reporting. 224 */ 225 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 226 enum pipe pipe, bool enable) 227 { 228 struct drm_i915_private *dev_priv = dev->dev_private; 229 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 230 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 231 unsigned long flags; 232 bool ret; 233 234 spin_lock_irqsave(&dev_priv->irq_lock, flags); 235 236 ret = !intel_crtc->cpu_fifo_underrun_disabled; 237 238 if (enable == ret) 239 goto done; 240 241 intel_crtc->cpu_fifo_underrun_disabled = !enable; 242 243 if (IS_GEN5(dev) || IS_GEN6(dev)) 244 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 245 else if (IS_GEN7(dev)) 246 ivybridge_set_fifo_underrun_reporting(dev, enable); 247 248 done: 249 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 250 return ret; 251 } 252 253 /** 254 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 255 * @dev: drm device 256 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 257 * @enable: true if we want to report FIFO underrun errors, false otherwise 258 * 259 * This function makes us disable or enable PCH fifo underruns for a specific 260 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 261 * underrun reporting for one transcoder may also disable all the other PCH 262 * error interruts for the other transcoders, due to the fact that there's just 263 * one interrupt mask/enable bit for all the transcoders. 264 * 265 * Returns the previous state of underrun reporting. 266 */ 267 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 268 enum transcoder pch_transcoder, 269 bool enable) 270 { 271 struct drm_i915_private *dev_priv = dev->dev_private; 272 enum pipe p; 273 struct drm_crtc *crtc; 274 struct intel_crtc *intel_crtc; 275 unsigned long flags; 276 bool ret; 277 278 if (HAS_PCH_LPT(dev)) { 279 crtc = NULL; 280 for_each_pipe(p) { 281 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; 282 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { 283 crtc = c; 284 break; 285 } 286 } 287 if (!crtc) { 288 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n"); 289 return false; 290 } 291 } else { 292 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 293 } 294 intel_crtc = to_intel_crtc(crtc); 295 296 spin_lock_irqsave(&dev_priv->irq_lock, flags); 297 298 ret = !intel_crtc->pch_fifo_underrun_disabled; 299 300 if (enable == ret) 301 goto done; 302 303 intel_crtc->pch_fifo_underrun_disabled = !enable; 304 305 if (HAS_PCH_IBX(dev)) 306 ibx_set_fifo_underrun_reporting(intel_crtc, enable); 307 else 308 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 309 310 done: 311 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 312 return ret; 313 } 314 315 316 void 317 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 318 { 319 u32 reg = PIPESTAT(pipe); 320 u32 pipestat = I915_READ(reg) & 0x7fff0000; 321 322 if ((pipestat & mask) == mask) 323 return; 324 325 /* Enable the interrupt, clear any pending status */ 326 pipestat |= mask | (mask >> 16); 327 I915_WRITE(reg, pipestat); 328 POSTING_READ(reg); 329 } 330 331 void 332 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 333 { 334 u32 reg = PIPESTAT(pipe); 335 u32 pipestat = I915_READ(reg) & 0x7fff0000; 336 337 if ((pipestat & mask) == 0) 338 return; 339 340 pipestat &= ~mask; 341 I915_WRITE(reg, pipestat); 342 POSTING_READ(reg); 343 } 344 345 /** 346 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 347 */ 348 static void i915_enable_asle_pipestat(struct drm_device *dev) 349 { 350 drm_i915_private_t *dev_priv = dev->dev_private; 351 unsigned long irqflags; 352 353 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 354 return; 355 356 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 357 358 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 359 if (INTEL_INFO(dev)->gen >= 4) 360 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 361 362 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 363 } 364 365 /** 366 * i915_pipe_enabled - check if a pipe is enabled 367 * @dev: DRM device 368 * @pipe: pipe to check 369 * 370 * Reading certain registers when the pipe is disabled can hang the chip. 371 * Use this routine to make sure the PLL is running and the pipe is active 372 * before reading such registers if unsure. 373 */ 374 static int 375 i915_pipe_enabled(struct drm_device *dev, int pipe) 376 { 377 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 378 379 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 380 /* Locking is horribly broken here, but whatever. */ 381 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 382 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 383 384 return intel_crtc->active; 385 } else { 386 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 387 } 388 } 389 390 /* Called from drm generic code, passed a 'crtc', which 391 * we use as a pipe index 392 */ 393 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 394 { 395 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 396 unsigned long high_frame; 397 unsigned long low_frame; 398 u32 high1, high2, low; 399 400 if (!i915_pipe_enabled(dev, pipe)) { 401 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 402 "pipe %c\n", pipe_name(pipe)); 403 return 0; 404 } 405 406 high_frame = PIPEFRAME(pipe); 407 low_frame = PIPEFRAMEPIXEL(pipe); 408 409 /* 410 * High & low register fields aren't synchronized, so make sure 411 * we get a low value that's stable across two reads of the high 412 * register. 413 */ 414 do { 415 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 416 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 417 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 418 } while (high1 != high2); 419 420 high1 >>= PIPE_FRAME_HIGH_SHIFT; 421 low >>= PIPE_FRAME_LOW_SHIFT; 422 return (high1 << 8) | low; 423 } 424 425 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 426 { 427 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 428 int reg = PIPE_FRMCOUNT_GM45(pipe); 429 430 if (!i915_pipe_enabled(dev, pipe)) { 431 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 432 "pipe %c\n", pipe_name(pipe)); 433 return 0; 434 } 435 436 return I915_READ(reg); 437 } 438 439 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 440 int *vpos, int *hpos) 441 { 442 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 443 u32 vbl = 0, position = 0; 444 int vbl_start, vbl_end, htotal, vtotal; 445 bool in_vbl = true; 446 int ret = 0; 447 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 448 pipe); 449 450 if (!i915_pipe_enabled(dev, pipe)) { 451 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 452 "pipe %c\n", pipe_name(pipe)); 453 return 0; 454 } 455 456 /* Get vtotal. */ 457 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 458 459 if (INTEL_INFO(dev)->gen >= 4) { 460 /* No obvious pixelcount register. Only query vertical 461 * scanout position from Display scan line register. 462 */ 463 position = I915_READ(PIPEDSL(pipe)); 464 465 /* Decode into vertical scanout position. Don't have 466 * horizontal scanout position. 467 */ 468 *vpos = position & 0x1fff; 469 *hpos = 0; 470 } else { 471 /* Have access to pixelcount since start of frame. 472 * We can split this into vertical and horizontal 473 * scanout position. 474 */ 475 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 476 477 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 478 *vpos = position / htotal; 479 *hpos = position - (*vpos * htotal); 480 } 481 482 /* Query vblank area. */ 483 vbl = I915_READ(VBLANK(cpu_transcoder)); 484 485 /* Test position against vblank region. */ 486 vbl_start = vbl & 0x1fff; 487 vbl_end = (vbl >> 16) & 0x1fff; 488 489 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 490 in_vbl = false; 491 492 /* Inside "upper part" of vblank area? Apply corrective offset: */ 493 if (in_vbl && (*vpos >= vbl_start)) 494 *vpos = *vpos - vtotal; 495 496 /* Readouts valid? */ 497 if (vbl > 0) 498 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 499 500 /* In vblank? */ 501 if (in_vbl) 502 ret |= DRM_SCANOUTPOS_INVBL; 503 504 return ret; 505 } 506 507 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 508 int *max_error, 509 struct timeval *vblank_time, 510 unsigned flags) 511 { 512 struct drm_crtc *crtc; 513 514 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 515 DRM_ERROR("Invalid crtc %d\n", pipe); 516 return -EINVAL; 517 } 518 519 /* Get drm_crtc to timestamp: */ 520 crtc = intel_get_crtc_for_pipe(dev, pipe); 521 if (crtc == NULL) { 522 DRM_ERROR("Invalid crtc %d\n", pipe); 523 return -EINVAL; 524 } 525 526 if (!crtc->enabled) { 527 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 528 return -EBUSY; 529 } 530 531 /* Helper routine in DRM core does all the work: */ 532 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 533 vblank_time, flags, 534 crtc); 535 } 536 537 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 538 { 539 enum drm_connector_status old_status; 540 541 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 542 old_status = connector->status; 543 544 connector->status = connector->funcs->detect(connector, false); 545 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 546 connector->base.id, 547 drm_get_connector_name(connector), 548 old_status, connector->status); 549 return (old_status != connector->status); 550 } 551 552 /* 553 * Handle hotplug events outside the interrupt handler proper. 554 */ 555 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 556 557 static void i915_hotplug_work_func(struct work_struct *work) 558 { 559 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 560 hotplug_work); 561 struct drm_device *dev = dev_priv->dev; 562 struct drm_mode_config *mode_config = &dev->mode_config; 563 struct intel_connector *intel_connector; 564 struct intel_encoder *intel_encoder; 565 struct drm_connector *connector; 566 unsigned long irqflags; 567 bool hpd_disabled = false; 568 bool changed = false; 569 u32 hpd_event_bits; 570 571 /* HPD irq before everything is fully set up. */ 572 if (!dev_priv->enable_hotplug_processing) 573 return; 574 575 mutex_lock(&mode_config->mutex); 576 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 577 578 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 579 580 hpd_event_bits = dev_priv->hpd_event_bits; 581 dev_priv->hpd_event_bits = 0; 582 list_for_each_entry(connector, &mode_config->connector_list, head) { 583 intel_connector = to_intel_connector(connector); 584 intel_encoder = intel_connector->encoder; 585 if (intel_encoder->hpd_pin > HPD_NONE && 586 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 587 connector->polled == DRM_CONNECTOR_POLL_HPD) { 588 DRM_INFO("HPD interrupt storm detected on connector %s: " 589 "switching from hotplug detection to polling\n", 590 drm_get_connector_name(connector)); 591 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 592 connector->polled = DRM_CONNECTOR_POLL_CONNECT 593 | DRM_CONNECTOR_POLL_DISCONNECT; 594 hpd_disabled = true; 595 } 596 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 597 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 598 drm_get_connector_name(connector), intel_encoder->hpd_pin); 599 } 600 } 601 /* if there were no outputs to poll, poll was disabled, 602 * therefore make sure it's enabled when disabling HPD on 603 * some connectors */ 604 if (hpd_disabled) { 605 drm_kms_helper_poll_enable(dev); 606 mod_timer(&dev_priv->hotplug_reenable_timer, 607 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 608 } 609 610 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 611 612 list_for_each_entry(connector, &mode_config->connector_list, head) { 613 intel_connector = to_intel_connector(connector); 614 intel_encoder = intel_connector->encoder; 615 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 616 if (intel_encoder->hot_plug) 617 intel_encoder->hot_plug(intel_encoder); 618 if (intel_hpd_irq_event(dev, connector)) 619 changed = true; 620 } 621 } 622 mutex_unlock(&mode_config->mutex); 623 624 if (changed) 625 drm_kms_helper_hotplug_event(dev); 626 } 627 628 static void ironlake_handle_rps_change(struct drm_device *dev) 629 { 630 drm_i915_private_t *dev_priv = dev->dev_private; 631 u32 busy_up, busy_down, max_avg, min_avg; 632 u8 new_delay; 633 unsigned long flags; 634 635 spin_lock_irqsave(&mchdev_lock, flags); 636 637 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 638 639 new_delay = dev_priv->ips.cur_delay; 640 641 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 642 busy_up = I915_READ(RCPREVBSYTUPAVG); 643 busy_down = I915_READ(RCPREVBSYTDNAVG); 644 max_avg = I915_READ(RCBMAXAVG); 645 min_avg = I915_READ(RCBMINAVG); 646 647 /* Handle RCS change request from hw */ 648 if (busy_up > max_avg) { 649 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 650 new_delay = dev_priv->ips.cur_delay - 1; 651 if (new_delay < dev_priv->ips.max_delay) 652 new_delay = dev_priv->ips.max_delay; 653 } else if (busy_down < min_avg) { 654 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 655 new_delay = dev_priv->ips.cur_delay + 1; 656 if (new_delay > dev_priv->ips.min_delay) 657 new_delay = dev_priv->ips.min_delay; 658 } 659 660 if (ironlake_set_drps(dev, new_delay)) 661 dev_priv->ips.cur_delay = new_delay; 662 663 spin_unlock_irqrestore(&mchdev_lock, flags); 664 665 return; 666 } 667 668 static void notify_ring(struct drm_device *dev, 669 struct intel_ring_buffer *ring) 670 { 671 struct drm_i915_private *dev_priv = dev->dev_private; 672 673 if (ring->obj == NULL) 674 return; 675 676 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 677 678 wake_up_all(&ring->irq_queue); 679 if (i915_enable_hangcheck) { 680 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 681 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 682 } 683 } 684 685 static void gen6_pm_rps_work(struct work_struct *work) 686 { 687 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 688 rps.work); 689 u32 pm_iir, pm_imr; 690 u8 new_delay; 691 692 spin_lock_irq(&dev_priv->rps.lock); 693 pm_iir = dev_priv->rps.pm_iir; 694 dev_priv->rps.pm_iir = 0; 695 pm_imr = I915_READ(GEN6_PMIMR); 696 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 697 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 698 spin_unlock_irq(&dev_priv->rps.lock); 699 700 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 701 return; 702 703 mutex_lock(&dev_priv->rps.hw_lock); 704 705 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 706 new_delay = dev_priv->rps.cur_delay + 1; 707 708 /* 709 * For better performance, jump directly 710 * to RPe if we're below it. 711 */ 712 if (IS_VALLEYVIEW(dev_priv->dev) && 713 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 714 new_delay = dev_priv->rps.rpe_delay; 715 } else 716 new_delay = dev_priv->rps.cur_delay - 1; 717 718 /* sysfs frequency interfaces may have snuck in while servicing the 719 * interrupt 720 */ 721 if (new_delay >= dev_priv->rps.min_delay && 722 new_delay <= dev_priv->rps.max_delay) { 723 if (IS_VALLEYVIEW(dev_priv->dev)) 724 valleyview_set_rps(dev_priv->dev, new_delay); 725 else 726 gen6_set_rps(dev_priv->dev, new_delay); 727 } 728 729 if (IS_VALLEYVIEW(dev_priv->dev)) { 730 /* 731 * On VLV, when we enter RC6 we may not be at the minimum 732 * voltage level, so arm a timer to check. It should only 733 * fire when there's activity or once after we've entered 734 * RC6, and then won't be re-armed until the next RPS interrupt. 735 */ 736 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 737 msecs_to_jiffies(100)); 738 } 739 740 mutex_unlock(&dev_priv->rps.hw_lock); 741 } 742 743 744 /** 745 * ivybridge_parity_work - Workqueue called when a parity error interrupt 746 * occurred. 747 * @work: workqueue struct 748 * 749 * Doesn't actually do anything except notify userspace. As a consequence of 750 * this event, userspace should try to remap the bad rows since statistically 751 * it is likely the same row is more likely to go bad again. 752 */ 753 static void ivybridge_parity_work(struct work_struct *work) 754 { 755 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 756 l3_parity.error_work); 757 u32 error_status, row, bank, subbank; 758 char *parity_event[5]; 759 uint32_t misccpctl; 760 unsigned long flags; 761 762 /* We must turn off DOP level clock gating to access the L3 registers. 763 * In order to prevent a get/put style interface, acquire struct mutex 764 * any time we access those registers. 765 */ 766 mutex_lock(&dev_priv->dev->struct_mutex); 767 768 misccpctl = I915_READ(GEN7_MISCCPCTL); 769 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 770 POSTING_READ(GEN7_MISCCPCTL); 771 772 error_status = I915_READ(GEN7_L3CDERRST1); 773 row = GEN7_PARITY_ERROR_ROW(error_status); 774 bank = GEN7_PARITY_ERROR_BANK(error_status); 775 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 776 777 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 778 GEN7_L3CDERRST1_ENABLE); 779 POSTING_READ(GEN7_L3CDERRST1); 780 781 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 782 783 spin_lock_irqsave(&dev_priv->irq_lock, flags); 784 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 785 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 786 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 787 788 mutex_unlock(&dev_priv->dev->struct_mutex); 789 790 parity_event[0] = "L3_PARITY_ERROR=1"; 791 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 792 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 793 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 794 parity_event[4] = NULL; 795 796 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 797 KOBJ_CHANGE, parity_event); 798 799 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 800 row, bank, subbank); 801 802 kfree(parity_event[3]); 803 kfree(parity_event[2]); 804 kfree(parity_event[1]); 805 } 806 807 static void ivybridge_handle_parity_error(struct drm_device *dev) 808 { 809 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 810 unsigned long flags; 811 812 if (!HAS_L3_GPU_CACHE(dev)) 813 return; 814 815 spin_lock_irqsave(&dev_priv->irq_lock, flags); 816 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 817 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 818 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 819 820 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 821 } 822 823 static void snb_gt_irq_handler(struct drm_device *dev, 824 struct drm_i915_private *dev_priv, 825 u32 gt_iir) 826 { 827 828 if (gt_iir & 829 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 830 notify_ring(dev, &dev_priv->ring[RCS]); 831 if (gt_iir & GT_BSD_USER_INTERRUPT) 832 notify_ring(dev, &dev_priv->ring[VCS]); 833 if (gt_iir & GT_BLT_USER_INTERRUPT) 834 notify_ring(dev, &dev_priv->ring[BCS]); 835 836 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 837 GT_BSD_CS_ERROR_INTERRUPT | 838 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 839 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 840 i915_handle_error(dev, false); 841 } 842 843 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 844 ivybridge_handle_parity_error(dev); 845 } 846 847 /* Legacy way of handling PM interrupts */ 848 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 849 u32 pm_iir) 850 { 851 unsigned long flags; 852 853 /* 854 * IIR bits should never already be set because IMR should 855 * prevent an interrupt from being shown in IIR. The warning 856 * displays a case where we've unsafely cleared 857 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 858 * type is not a problem, it displays a problem in the logic. 859 * 860 * The mask bit in IMR is cleared by dev_priv->rps.work. 861 */ 862 863 spin_lock_irqsave(&dev_priv->rps.lock, flags); 864 dev_priv->rps.pm_iir |= pm_iir; 865 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 866 POSTING_READ(GEN6_PMIMR); 867 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 868 869 queue_work(dev_priv->wq, &dev_priv->rps.work); 870 } 871 872 #define HPD_STORM_DETECT_PERIOD 1000 873 #define HPD_STORM_THRESHOLD 5 874 875 static inline void intel_hpd_irq_handler(struct drm_device *dev, 876 u32 hotplug_trigger, 877 const u32 *hpd) 878 { 879 drm_i915_private_t *dev_priv = dev->dev_private; 880 int i; 881 bool storm_detected = false; 882 883 if (!hotplug_trigger) 884 return; 885 886 spin_lock(&dev_priv->irq_lock); 887 for (i = 1; i < HPD_NUM_PINS; i++) { 888 889 if (!(hpd[i] & hotplug_trigger) || 890 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 891 continue; 892 893 dev_priv->hpd_event_bits |= (1 << i); 894 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 895 dev_priv->hpd_stats[i].hpd_last_jiffies 896 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 897 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 898 dev_priv->hpd_stats[i].hpd_cnt = 0; 899 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 900 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 901 dev_priv->hpd_event_bits &= ~(1 << i); 902 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 903 storm_detected = true; 904 } else { 905 dev_priv->hpd_stats[i].hpd_cnt++; 906 } 907 } 908 909 if (storm_detected) 910 dev_priv->display.hpd_irq_setup(dev); 911 spin_unlock(&dev_priv->irq_lock); 912 913 queue_work(dev_priv->wq, 914 &dev_priv->hotplug_work); 915 } 916 917 static void gmbus_irq_handler(struct drm_device *dev) 918 { 919 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 920 921 wake_up_all(&dev_priv->gmbus_wait_queue); 922 } 923 924 static void dp_aux_irq_handler(struct drm_device *dev) 925 { 926 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 927 928 wake_up_all(&dev_priv->gmbus_wait_queue); 929 } 930 931 /* Unlike gen6_queue_rps_work() from which this function is originally derived, 932 * we must be able to deal with other PM interrupts. This is complicated because 933 * of the way in which we use the masks to defer the RPS work (which for 934 * posterity is necessary because of forcewake). 935 */ 936 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, 937 u32 pm_iir) 938 { 939 unsigned long flags; 940 941 spin_lock_irqsave(&dev_priv->rps.lock, flags); 942 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 943 if (dev_priv->rps.pm_iir) { 944 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 945 /* never want to mask useful interrupts. (also posting read) */ 946 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 947 /* TODO: if queue_work is slow, move it out of the spinlock */ 948 queue_work(dev_priv->wq, &dev_priv->rps.work); 949 } 950 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 951 952 if (pm_iir & ~GEN6_PM_RPS_EVENTS) { 953 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 954 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 955 956 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 957 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 958 i915_handle_error(dev_priv->dev, false); 959 } 960 } 961 } 962 963 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 964 { 965 struct drm_device *dev = (struct drm_device *) arg; 966 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 967 u32 iir, gt_iir, pm_iir; 968 irqreturn_t ret = IRQ_NONE; 969 unsigned long irqflags; 970 int pipe; 971 u32 pipe_stats[I915_MAX_PIPES]; 972 973 atomic_inc(&dev_priv->irq_received); 974 975 while (true) { 976 iir = I915_READ(VLV_IIR); 977 gt_iir = I915_READ(GTIIR); 978 pm_iir = I915_READ(GEN6_PMIIR); 979 980 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 981 goto out; 982 983 ret = IRQ_HANDLED; 984 985 snb_gt_irq_handler(dev, dev_priv, gt_iir); 986 987 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 988 for_each_pipe(pipe) { 989 int reg = PIPESTAT(pipe); 990 pipe_stats[pipe] = I915_READ(reg); 991 992 /* 993 * Clear the PIPE*STAT regs before the IIR 994 */ 995 if (pipe_stats[pipe] & 0x8000ffff) { 996 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 997 DRM_DEBUG_DRIVER("pipe %c underrun\n", 998 pipe_name(pipe)); 999 I915_WRITE(reg, pipe_stats[pipe]); 1000 } 1001 } 1002 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1003 1004 for_each_pipe(pipe) { 1005 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1006 drm_handle_vblank(dev, pipe); 1007 1008 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1009 intel_prepare_page_flip(dev, pipe); 1010 intel_finish_page_flip(dev, pipe); 1011 } 1012 } 1013 1014 /* Consume port. Then clear IIR or we'll miss events */ 1015 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1016 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1017 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1018 1019 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1020 hotplug_status); 1021 1022 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1023 1024 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1025 I915_READ(PORT_HOTPLUG_STAT); 1026 } 1027 1028 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1029 gmbus_irq_handler(dev); 1030 1031 if (pm_iir & GEN6_PM_RPS_EVENTS) 1032 gen6_queue_rps_work(dev_priv, pm_iir); 1033 1034 I915_WRITE(GTIIR, gt_iir); 1035 I915_WRITE(GEN6_PMIIR, pm_iir); 1036 I915_WRITE(VLV_IIR, iir); 1037 } 1038 1039 out: 1040 return ret; 1041 } 1042 1043 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1044 { 1045 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1046 int pipe; 1047 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1048 1049 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1050 1051 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1052 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1053 SDE_AUDIO_POWER_SHIFT); 1054 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1055 port_name(port)); 1056 } 1057 1058 if (pch_iir & SDE_AUX_MASK) 1059 dp_aux_irq_handler(dev); 1060 1061 if (pch_iir & SDE_GMBUS) 1062 gmbus_irq_handler(dev); 1063 1064 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1065 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1066 1067 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1068 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1069 1070 if (pch_iir & SDE_POISON) 1071 DRM_ERROR("PCH poison interrupt\n"); 1072 1073 if (pch_iir & SDE_FDI_MASK) 1074 for_each_pipe(pipe) 1075 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1076 pipe_name(pipe), 1077 I915_READ(FDI_RX_IIR(pipe))); 1078 1079 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1080 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1081 1082 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1083 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1084 1085 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1086 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1087 false)) 1088 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1089 1090 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1091 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1092 false)) 1093 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1094 } 1095 1096 static void ivb_err_int_handler(struct drm_device *dev) 1097 { 1098 struct drm_i915_private *dev_priv = dev->dev_private; 1099 u32 err_int = I915_READ(GEN7_ERR_INT); 1100 1101 if (err_int & ERR_INT_POISON) 1102 DRM_ERROR("Poison interrupt\n"); 1103 1104 if (err_int & ERR_INT_FIFO_UNDERRUN_A) 1105 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1106 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1107 1108 if (err_int & ERR_INT_FIFO_UNDERRUN_B) 1109 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1110 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1111 1112 if (err_int & ERR_INT_FIFO_UNDERRUN_C) 1113 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 1114 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 1115 1116 I915_WRITE(GEN7_ERR_INT, err_int); 1117 } 1118 1119 static void cpt_serr_int_handler(struct drm_device *dev) 1120 { 1121 struct drm_i915_private *dev_priv = dev->dev_private; 1122 u32 serr_int = I915_READ(SERR_INT); 1123 1124 if (serr_int & SERR_INT_POISON) 1125 DRM_ERROR("PCH poison interrupt\n"); 1126 1127 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1128 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1129 false)) 1130 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1131 1132 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1133 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1134 false)) 1135 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1136 1137 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1138 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1139 false)) 1140 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1141 1142 I915_WRITE(SERR_INT, serr_int); 1143 } 1144 1145 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1146 { 1147 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1148 int pipe; 1149 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1150 1151 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1152 1153 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1154 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1155 SDE_AUDIO_POWER_SHIFT_CPT); 1156 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1157 port_name(port)); 1158 } 1159 1160 if (pch_iir & SDE_AUX_MASK_CPT) 1161 dp_aux_irq_handler(dev); 1162 1163 if (pch_iir & SDE_GMBUS_CPT) 1164 gmbus_irq_handler(dev); 1165 1166 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1167 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1168 1169 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1170 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1171 1172 if (pch_iir & SDE_FDI_MASK_CPT) 1173 for_each_pipe(pipe) 1174 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1175 pipe_name(pipe), 1176 I915_READ(FDI_RX_IIR(pipe))); 1177 1178 if (pch_iir & SDE_ERROR_CPT) 1179 cpt_serr_int_handler(dev); 1180 } 1181 1182 static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 1183 { 1184 struct drm_device *dev = (struct drm_device *) arg; 1185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1186 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; 1187 irqreturn_t ret = IRQ_NONE; 1188 int i; 1189 1190 atomic_inc(&dev_priv->irq_received); 1191 1192 /* We get interrupts on unclaimed registers, so check for this before we 1193 * do any I915_{READ,WRITE}. */ 1194 if (IS_HASWELL(dev) && 1195 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1196 DRM_ERROR("Unclaimed register before interrupt\n"); 1197 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1198 } 1199 1200 /* disable master interrupt before clearing iir */ 1201 de_ier = I915_READ(DEIER); 1202 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1203 1204 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1205 * interrupts will will be stored on its back queue, and then we'll be 1206 * able to process them after we restore SDEIER (as soon as we restore 1207 * it, we'll get an interrupt if SDEIIR still has something to process 1208 * due to its back queue). */ 1209 if (!HAS_PCH_NOP(dev)) { 1210 sde_ier = I915_READ(SDEIER); 1211 I915_WRITE(SDEIER, 0); 1212 POSTING_READ(SDEIER); 1213 } 1214 1215 /* On Haswell, also mask ERR_INT because we don't want to risk 1216 * generating "unclaimed register" interrupts from inside the interrupt 1217 * handler. */ 1218 if (IS_HASWELL(dev)) { 1219 spin_lock(&dev_priv->irq_lock); 1220 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1221 spin_unlock(&dev_priv->irq_lock); 1222 } 1223 1224 gt_iir = I915_READ(GTIIR); 1225 if (gt_iir) { 1226 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1227 I915_WRITE(GTIIR, gt_iir); 1228 ret = IRQ_HANDLED; 1229 } 1230 1231 de_iir = I915_READ(DEIIR); 1232 if (de_iir) { 1233 if (de_iir & DE_ERR_INT_IVB) 1234 ivb_err_int_handler(dev); 1235 1236 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1237 dp_aux_irq_handler(dev); 1238 1239 if (de_iir & DE_GSE_IVB) 1240 intel_opregion_asle_intr(dev); 1241 1242 for (i = 0; i < 3; i++) { 1243 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1244 drm_handle_vblank(dev, i); 1245 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 1246 intel_prepare_page_flip(dev, i); 1247 intel_finish_page_flip_plane(dev, i); 1248 } 1249 } 1250 1251 /* check event from PCH */ 1252 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1253 u32 pch_iir = I915_READ(SDEIIR); 1254 1255 cpt_irq_handler(dev, pch_iir); 1256 1257 /* clear PCH hotplug event before clear CPU irq */ 1258 I915_WRITE(SDEIIR, pch_iir); 1259 } 1260 1261 I915_WRITE(DEIIR, de_iir); 1262 ret = IRQ_HANDLED; 1263 } 1264 1265 pm_iir = I915_READ(GEN6_PMIIR); 1266 if (pm_iir) { 1267 if (IS_HASWELL(dev)) 1268 hsw_pm_irq_handler(dev_priv, pm_iir); 1269 else if (pm_iir & GEN6_PM_RPS_EVENTS) 1270 gen6_queue_rps_work(dev_priv, pm_iir); 1271 I915_WRITE(GEN6_PMIIR, pm_iir); 1272 ret = IRQ_HANDLED; 1273 } 1274 1275 if (IS_HASWELL(dev)) { 1276 spin_lock(&dev_priv->irq_lock); 1277 if (ivb_can_enable_err_int(dev)) 1278 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1279 spin_unlock(&dev_priv->irq_lock); 1280 } 1281 1282 I915_WRITE(DEIER, de_ier); 1283 POSTING_READ(DEIER); 1284 if (!HAS_PCH_NOP(dev)) { 1285 I915_WRITE(SDEIER, sde_ier); 1286 POSTING_READ(SDEIER); 1287 } 1288 1289 return ret; 1290 } 1291 1292 static void ilk_gt_irq_handler(struct drm_device *dev, 1293 struct drm_i915_private *dev_priv, 1294 u32 gt_iir) 1295 { 1296 if (gt_iir & 1297 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1298 notify_ring(dev, &dev_priv->ring[RCS]); 1299 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1300 notify_ring(dev, &dev_priv->ring[VCS]); 1301 } 1302 1303 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1304 { 1305 struct drm_device *dev = (struct drm_device *) arg; 1306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1307 int ret = IRQ_NONE; 1308 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; 1309 1310 atomic_inc(&dev_priv->irq_received); 1311 1312 /* disable master interrupt before clearing iir */ 1313 de_ier = I915_READ(DEIER); 1314 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1315 POSTING_READ(DEIER); 1316 1317 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1318 * interrupts will will be stored on its back queue, and then we'll be 1319 * able to process them after we restore SDEIER (as soon as we restore 1320 * it, we'll get an interrupt if SDEIIR still has something to process 1321 * due to its back queue). */ 1322 sde_ier = I915_READ(SDEIER); 1323 I915_WRITE(SDEIER, 0); 1324 POSTING_READ(SDEIER); 1325 1326 de_iir = I915_READ(DEIIR); 1327 gt_iir = I915_READ(GTIIR); 1328 pm_iir = I915_READ(GEN6_PMIIR); 1329 1330 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) 1331 goto done; 1332 1333 ret = IRQ_HANDLED; 1334 1335 if (IS_GEN5(dev)) 1336 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1337 else 1338 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1339 1340 if (de_iir & DE_AUX_CHANNEL_A) 1341 dp_aux_irq_handler(dev); 1342 1343 if (de_iir & DE_GSE) 1344 intel_opregion_asle_intr(dev); 1345 1346 if (de_iir & DE_PIPEA_VBLANK) 1347 drm_handle_vblank(dev, 0); 1348 1349 if (de_iir & DE_PIPEB_VBLANK) 1350 drm_handle_vblank(dev, 1); 1351 1352 if (de_iir & DE_POISON) 1353 DRM_ERROR("Poison interrupt\n"); 1354 1355 if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1356 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1357 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1358 1359 if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1360 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1361 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1362 1363 if (de_iir & DE_PLANEA_FLIP_DONE) { 1364 intel_prepare_page_flip(dev, 0); 1365 intel_finish_page_flip_plane(dev, 0); 1366 } 1367 1368 if (de_iir & DE_PLANEB_FLIP_DONE) { 1369 intel_prepare_page_flip(dev, 1); 1370 intel_finish_page_flip_plane(dev, 1); 1371 } 1372 1373 /* check event from PCH */ 1374 if (de_iir & DE_PCH_EVENT) { 1375 u32 pch_iir = I915_READ(SDEIIR); 1376 1377 if (HAS_PCH_CPT(dev)) 1378 cpt_irq_handler(dev, pch_iir); 1379 else 1380 ibx_irq_handler(dev, pch_iir); 1381 1382 /* should clear PCH hotplug event before clear CPU irq */ 1383 I915_WRITE(SDEIIR, pch_iir); 1384 } 1385 1386 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1387 ironlake_handle_rps_change(dev); 1388 1389 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) 1390 gen6_queue_rps_work(dev_priv, pm_iir); 1391 1392 I915_WRITE(GTIIR, gt_iir); 1393 I915_WRITE(DEIIR, de_iir); 1394 I915_WRITE(GEN6_PMIIR, pm_iir); 1395 1396 done: 1397 I915_WRITE(DEIER, de_ier); 1398 POSTING_READ(DEIER); 1399 I915_WRITE(SDEIER, sde_ier); 1400 POSTING_READ(SDEIER); 1401 1402 return ret; 1403 } 1404 1405 /** 1406 * i915_error_work_func - do process context error handling work 1407 * @work: work struct 1408 * 1409 * Fire an error uevent so userspace can see that a hang or error 1410 * was detected. 1411 */ 1412 static void i915_error_work_func(struct work_struct *work) 1413 { 1414 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1415 work); 1416 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1417 gpu_error); 1418 struct drm_device *dev = dev_priv->dev; 1419 struct intel_ring_buffer *ring; 1420 char *error_event[] = { "ERROR=1", NULL }; 1421 char *reset_event[] = { "RESET=1", NULL }; 1422 char *reset_done_event[] = { "ERROR=0", NULL }; 1423 int i, ret; 1424 1425 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1426 1427 /* 1428 * Note that there's only one work item which does gpu resets, so we 1429 * need not worry about concurrent gpu resets potentially incrementing 1430 * error->reset_counter twice. We only need to take care of another 1431 * racing irq/hangcheck declaring the gpu dead for a second time. A 1432 * quick check for that is good enough: schedule_work ensures the 1433 * correct ordering between hang detection and this work item, and since 1434 * the reset in-progress bit is only ever set by code outside of this 1435 * work we don't need to worry about any other races. 1436 */ 1437 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1438 DRM_DEBUG_DRIVER("resetting chip\n"); 1439 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1440 reset_event); 1441 1442 ret = i915_reset(dev); 1443 1444 if (ret == 0) { 1445 /* 1446 * After all the gem state is reset, increment the reset 1447 * counter and wake up everyone waiting for the reset to 1448 * complete. 1449 * 1450 * Since unlock operations are a one-sided barrier only, 1451 * we need to insert a barrier here to order any seqno 1452 * updates before 1453 * the counter increment. 1454 */ 1455 smp_mb__before_atomic_inc(); 1456 atomic_inc(&dev_priv->gpu_error.reset_counter); 1457 1458 kobject_uevent_env(&dev->primary->kdev.kobj, 1459 KOBJ_CHANGE, reset_done_event); 1460 } else { 1461 atomic_set(&error->reset_counter, I915_WEDGED); 1462 } 1463 1464 for_each_ring(ring, dev_priv, i) 1465 wake_up_all(&ring->irq_queue); 1466 1467 intel_display_handle_reset(dev); 1468 1469 wake_up_all(&dev_priv->gpu_error.reset_queue); 1470 } 1471 } 1472 1473 /* NB: please notice the memset */ 1474 static void i915_get_extra_instdone(struct drm_device *dev, 1475 uint32_t *instdone) 1476 { 1477 struct drm_i915_private *dev_priv = dev->dev_private; 1478 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1479 1480 switch(INTEL_INFO(dev)->gen) { 1481 case 2: 1482 case 3: 1483 instdone[0] = I915_READ(INSTDONE); 1484 break; 1485 case 4: 1486 case 5: 1487 case 6: 1488 instdone[0] = I915_READ(INSTDONE_I965); 1489 instdone[1] = I915_READ(INSTDONE1); 1490 break; 1491 default: 1492 WARN_ONCE(1, "Unsupported platform\n"); 1493 case 7: 1494 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1495 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1496 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1497 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 1498 break; 1499 } 1500 } 1501 1502 #ifdef CONFIG_DEBUG_FS 1503 static struct drm_i915_error_object * 1504 i915_error_object_create_sized(struct drm_i915_private *dev_priv, 1505 struct drm_i915_gem_object *src, 1506 const int num_pages) 1507 { 1508 struct drm_i915_error_object *dst; 1509 int i; 1510 u32 reloc_offset; 1511 1512 if (src == NULL || src->pages == NULL) 1513 return NULL; 1514 1515 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); 1516 if (dst == NULL) 1517 return NULL; 1518 1519 reloc_offset = src->gtt_offset; 1520 for (i = 0; i < num_pages; i++) { 1521 unsigned long flags; 1522 void *d; 1523 1524 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 1525 if (d == NULL) 1526 goto unwind; 1527 1528 local_irq_save(flags); 1529 if (reloc_offset < dev_priv->gtt.mappable_end && 1530 src->has_global_gtt_mapping) { 1531 void __iomem *s; 1532 1533 /* Simply ignore tiling or any overlapping fence. 1534 * It's part of the error state, and this hopefully 1535 * captures what the GPU read. 1536 */ 1537 1538 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1539 reloc_offset); 1540 memcpy_fromio(d, s, PAGE_SIZE); 1541 io_mapping_unmap_atomic(s); 1542 } else if (src->stolen) { 1543 unsigned long offset; 1544 1545 offset = dev_priv->mm.stolen_base; 1546 offset += src->stolen->start; 1547 offset += i << PAGE_SHIFT; 1548 1549 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); 1550 } else { 1551 struct page *page; 1552 void *s; 1553 1554 page = i915_gem_object_get_page(src, i); 1555 1556 drm_clflush_pages(&page, 1); 1557 1558 s = kmap_atomic(page); 1559 memcpy(d, s, PAGE_SIZE); 1560 kunmap_atomic(s); 1561 1562 drm_clflush_pages(&page, 1); 1563 } 1564 local_irq_restore(flags); 1565 1566 dst->pages[i] = d; 1567 1568 reloc_offset += PAGE_SIZE; 1569 } 1570 dst->page_count = num_pages; 1571 dst->gtt_offset = src->gtt_offset; 1572 1573 return dst; 1574 1575 unwind: 1576 while (i--) 1577 kfree(dst->pages[i]); 1578 kfree(dst); 1579 return NULL; 1580 } 1581 #define i915_error_object_create(dev_priv, src) \ 1582 i915_error_object_create_sized((dev_priv), (src), \ 1583 (src)->base.size>>PAGE_SHIFT) 1584 1585 static void 1586 i915_error_object_free(struct drm_i915_error_object *obj) 1587 { 1588 int page; 1589 1590 if (obj == NULL) 1591 return; 1592 1593 for (page = 0; page < obj->page_count; page++) 1594 kfree(obj->pages[page]); 1595 1596 kfree(obj); 1597 } 1598 1599 void 1600 i915_error_state_free(struct kref *error_ref) 1601 { 1602 struct drm_i915_error_state *error = container_of(error_ref, 1603 typeof(*error), ref); 1604 int i; 1605 1606 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 1607 i915_error_object_free(error->ring[i].batchbuffer); 1608 i915_error_object_free(error->ring[i].ringbuffer); 1609 i915_error_object_free(error->ring[i].ctx); 1610 kfree(error->ring[i].requests); 1611 } 1612 1613 kfree(error->active_bo); 1614 kfree(error->overlay); 1615 kfree(error->display); 1616 kfree(error); 1617 } 1618 static void capture_bo(struct drm_i915_error_buffer *err, 1619 struct drm_i915_gem_object *obj) 1620 { 1621 err->size = obj->base.size; 1622 err->name = obj->base.name; 1623 err->rseqno = obj->last_read_seqno; 1624 err->wseqno = obj->last_write_seqno; 1625 err->gtt_offset = obj->gtt_offset; 1626 err->read_domains = obj->base.read_domains; 1627 err->write_domain = obj->base.write_domain; 1628 err->fence_reg = obj->fence_reg; 1629 err->pinned = 0; 1630 if (obj->pin_count > 0) 1631 err->pinned = 1; 1632 if (obj->user_pin_count > 0) 1633 err->pinned = -1; 1634 err->tiling = obj->tiling_mode; 1635 err->dirty = obj->dirty; 1636 err->purgeable = obj->madv != I915_MADV_WILLNEED; 1637 err->ring = obj->ring ? obj->ring->id : -1; 1638 err->cache_level = obj->cache_level; 1639 } 1640 1641 static u32 capture_active_bo(struct drm_i915_error_buffer *err, 1642 int count, struct list_head *head) 1643 { 1644 struct drm_i915_gem_object *obj; 1645 int i = 0; 1646 1647 list_for_each_entry(obj, head, mm_list) { 1648 capture_bo(err++, obj); 1649 if (++i == count) 1650 break; 1651 } 1652 1653 return i; 1654 } 1655 1656 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 1657 int count, struct list_head *head) 1658 { 1659 struct drm_i915_gem_object *obj; 1660 int i = 0; 1661 1662 list_for_each_entry(obj, head, global_list) { 1663 if (obj->pin_count == 0) 1664 continue; 1665 1666 capture_bo(err++, obj); 1667 if (++i == count) 1668 break; 1669 } 1670 1671 return i; 1672 } 1673 1674 static void i915_gem_record_fences(struct drm_device *dev, 1675 struct drm_i915_error_state *error) 1676 { 1677 struct drm_i915_private *dev_priv = dev->dev_private; 1678 int i; 1679 1680 /* Fences */ 1681 switch (INTEL_INFO(dev)->gen) { 1682 case 7: 1683 case 6: 1684 for (i = 0; i < dev_priv->num_fence_regs; i++) 1685 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1686 break; 1687 case 5: 1688 case 4: 1689 for (i = 0; i < 16; i++) 1690 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1691 break; 1692 case 3: 1693 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1694 for (i = 0; i < 8; i++) 1695 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1696 case 2: 1697 for (i = 0; i < 8; i++) 1698 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1699 break; 1700 1701 default: 1702 BUG(); 1703 } 1704 } 1705 1706 static struct drm_i915_error_object * 1707 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1708 struct intel_ring_buffer *ring) 1709 { 1710 struct drm_i915_gem_object *obj; 1711 u32 seqno; 1712 1713 if (!ring->get_seqno) 1714 return NULL; 1715 1716 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1717 u32 acthd = I915_READ(ACTHD); 1718 1719 if (WARN_ON(ring->id != RCS)) 1720 return NULL; 1721 1722 obj = ring->private; 1723 if (acthd >= obj->gtt_offset && 1724 acthd < obj->gtt_offset + obj->base.size) 1725 return i915_error_object_create(dev_priv, obj); 1726 } 1727 1728 seqno = ring->get_seqno(ring, false); 1729 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1730 if (obj->ring != ring) 1731 continue; 1732 1733 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1734 continue; 1735 1736 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1737 continue; 1738 1739 /* We need to copy these to an anonymous buffer as the simplest 1740 * method to avoid being overwritten by userspace. 1741 */ 1742 return i915_error_object_create(dev_priv, obj); 1743 } 1744 1745 return NULL; 1746 } 1747 1748 static void i915_record_ring_state(struct drm_device *dev, 1749 struct drm_i915_error_state *error, 1750 struct intel_ring_buffer *ring) 1751 { 1752 struct drm_i915_private *dev_priv = dev->dev_private; 1753 1754 if (INTEL_INFO(dev)->gen >= 6) { 1755 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 1756 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1757 error->semaphore_mboxes[ring->id][0] 1758 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1759 error->semaphore_mboxes[ring->id][1] 1760 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1761 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1762 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 1763 } 1764 1765 if (INTEL_INFO(dev)->gen >= 4) { 1766 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1767 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1768 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1769 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1770 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1771 if (ring->id == RCS) 1772 error->bbaddr = I915_READ64(BB_ADDR); 1773 } else { 1774 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1775 error->ipeir[ring->id] = I915_READ(IPEIR); 1776 error->ipehr[ring->id] = I915_READ(IPEHR); 1777 error->instdone[ring->id] = I915_READ(INSTDONE); 1778 } 1779 1780 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1781 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1782 error->seqno[ring->id] = ring->get_seqno(ring, false); 1783 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1784 error->head[ring->id] = I915_READ_HEAD(ring); 1785 error->tail[ring->id] = I915_READ_TAIL(ring); 1786 error->ctl[ring->id] = I915_READ_CTL(ring); 1787 1788 error->cpu_ring_head[ring->id] = ring->head; 1789 error->cpu_ring_tail[ring->id] = ring->tail; 1790 } 1791 1792 1793 static void i915_gem_record_active_context(struct intel_ring_buffer *ring, 1794 struct drm_i915_error_state *error, 1795 struct drm_i915_error_ring *ering) 1796 { 1797 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1798 struct drm_i915_gem_object *obj; 1799 1800 /* Currently render ring is the only HW context user */ 1801 if (ring->id != RCS || !error->ccid) 1802 return; 1803 1804 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1805 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { 1806 ering->ctx = i915_error_object_create_sized(dev_priv, 1807 obj, 1); 1808 } 1809 } 1810 } 1811 1812 static void i915_gem_record_rings(struct drm_device *dev, 1813 struct drm_i915_error_state *error) 1814 { 1815 struct drm_i915_private *dev_priv = dev->dev_private; 1816 struct intel_ring_buffer *ring; 1817 struct drm_i915_gem_request *request; 1818 int i, count; 1819 1820 for_each_ring(ring, dev_priv, i) { 1821 i915_record_ring_state(dev, error, ring); 1822 1823 error->ring[i].batchbuffer = 1824 i915_error_first_batchbuffer(dev_priv, ring); 1825 1826 error->ring[i].ringbuffer = 1827 i915_error_object_create(dev_priv, ring->obj); 1828 1829 1830 i915_gem_record_active_context(ring, error, &error->ring[i]); 1831 1832 count = 0; 1833 list_for_each_entry(request, &ring->request_list, list) 1834 count++; 1835 1836 error->ring[i].num_requests = count; 1837 error->ring[i].requests = 1838 kmalloc(count*sizeof(struct drm_i915_error_request), 1839 GFP_ATOMIC); 1840 if (error->ring[i].requests == NULL) { 1841 error->ring[i].num_requests = 0; 1842 continue; 1843 } 1844 1845 count = 0; 1846 list_for_each_entry(request, &ring->request_list, list) { 1847 struct drm_i915_error_request *erq; 1848 1849 erq = &error->ring[i].requests[count++]; 1850 erq->seqno = request->seqno; 1851 erq->jiffies = request->emitted_jiffies; 1852 erq->tail = request->tail; 1853 } 1854 } 1855 } 1856 1857 /** 1858 * i915_capture_error_state - capture an error record for later analysis 1859 * @dev: drm device 1860 * 1861 * Should be called when an error is detected (either a hang or an error 1862 * interrupt) to capture error state from the time of the error. Fills 1863 * out a structure which becomes available in debugfs for user level tools 1864 * to pick up. 1865 */ 1866 static void i915_capture_error_state(struct drm_device *dev) 1867 { 1868 struct drm_i915_private *dev_priv = dev->dev_private; 1869 struct drm_i915_gem_object *obj; 1870 struct drm_i915_error_state *error; 1871 unsigned long flags; 1872 int i, pipe; 1873 1874 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1875 error = dev_priv->gpu_error.first_error; 1876 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1877 if (error) 1878 return; 1879 1880 /* Account for pipe specific data like PIPE*STAT */ 1881 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1882 if (!error) { 1883 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1884 return; 1885 } 1886 1887 DRM_INFO("capturing error event; look for more information in " 1888 "/sys/kernel/debug/dri/%d/i915_error_state\n", 1889 dev->primary->index); 1890 1891 kref_init(&error->ref); 1892 error->eir = I915_READ(EIR); 1893 error->pgtbl_er = I915_READ(PGTBL_ER); 1894 if (HAS_HW_CONTEXTS(dev)) 1895 error->ccid = I915_READ(CCID); 1896 1897 if (HAS_PCH_SPLIT(dev)) 1898 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1899 else if (IS_VALLEYVIEW(dev)) 1900 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1901 else if (IS_GEN2(dev)) 1902 error->ier = I915_READ16(IER); 1903 else 1904 error->ier = I915_READ(IER); 1905 1906 if (INTEL_INFO(dev)->gen >= 6) 1907 error->derrmr = I915_READ(DERRMR); 1908 1909 if (IS_VALLEYVIEW(dev)) 1910 error->forcewake = I915_READ(FORCEWAKE_VLV); 1911 else if (INTEL_INFO(dev)->gen >= 7) 1912 error->forcewake = I915_READ(FORCEWAKE_MT); 1913 else if (INTEL_INFO(dev)->gen == 6) 1914 error->forcewake = I915_READ(FORCEWAKE); 1915 1916 if (!HAS_PCH_SPLIT(dev)) 1917 for_each_pipe(pipe) 1918 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1919 1920 if (INTEL_INFO(dev)->gen >= 6) { 1921 error->error = I915_READ(ERROR_GEN6); 1922 error->done_reg = I915_READ(DONE_REG); 1923 } 1924 1925 if (INTEL_INFO(dev)->gen == 7) 1926 error->err_int = I915_READ(GEN7_ERR_INT); 1927 1928 i915_get_extra_instdone(dev, error->extra_instdone); 1929 1930 i915_gem_record_fences(dev, error); 1931 i915_gem_record_rings(dev, error); 1932 1933 /* Record buffers on the active and pinned lists. */ 1934 error->active_bo = NULL; 1935 error->pinned_bo = NULL; 1936 1937 i = 0; 1938 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1939 i++; 1940 error->active_bo_count = i; 1941 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1942 if (obj->pin_count) 1943 i++; 1944 error->pinned_bo_count = i - error->active_bo_count; 1945 1946 error->active_bo = NULL; 1947 error->pinned_bo = NULL; 1948 if (i) { 1949 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 1950 GFP_ATOMIC); 1951 if (error->active_bo) 1952 error->pinned_bo = 1953 error->active_bo + error->active_bo_count; 1954 } 1955 1956 if (error->active_bo) 1957 error->active_bo_count = 1958 capture_active_bo(error->active_bo, 1959 error->active_bo_count, 1960 &dev_priv->mm.active_list); 1961 1962 if (error->pinned_bo) 1963 error->pinned_bo_count = 1964 capture_pinned_bo(error->pinned_bo, 1965 error->pinned_bo_count, 1966 &dev_priv->mm.bound_list); 1967 1968 do_gettimeofday(&error->time); 1969 1970 error->overlay = intel_overlay_capture_error_state(dev); 1971 error->display = intel_display_capture_error_state(dev); 1972 1973 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1974 if (dev_priv->gpu_error.first_error == NULL) { 1975 dev_priv->gpu_error.first_error = error; 1976 error = NULL; 1977 } 1978 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1979 1980 if (error) 1981 i915_error_state_free(&error->ref); 1982 } 1983 1984 void i915_destroy_error_state(struct drm_device *dev) 1985 { 1986 struct drm_i915_private *dev_priv = dev->dev_private; 1987 struct drm_i915_error_state *error; 1988 unsigned long flags; 1989 1990 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1991 error = dev_priv->gpu_error.first_error; 1992 dev_priv->gpu_error.first_error = NULL; 1993 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1994 1995 if (error) 1996 kref_put(&error->ref, i915_error_state_free); 1997 } 1998 #else 1999 #define i915_capture_error_state(x) 2000 #endif 2001 2002 static void i915_report_and_clear_eir(struct drm_device *dev) 2003 { 2004 struct drm_i915_private *dev_priv = dev->dev_private; 2005 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2006 u32 eir = I915_READ(EIR); 2007 int pipe, i; 2008 2009 if (!eir) 2010 return; 2011 2012 pr_err("render error detected, EIR: 0x%08x\n", eir); 2013 2014 i915_get_extra_instdone(dev, instdone); 2015 2016 if (IS_G4X(dev)) { 2017 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2018 u32 ipeir = I915_READ(IPEIR_I965); 2019 2020 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2021 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2022 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2023 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2024 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2025 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2026 I915_WRITE(IPEIR_I965, ipeir); 2027 POSTING_READ(IPEIR_I965); 2028 } 2029 if (eir & GM45_ERROR_PAGE_TABLE) { 2030 u32 pgtbl_err = I915_READ(PGTBL_ER); 2031 pr_err("page table error\n"); 2032 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2033 I915_WRITE(PGTBL_ER, pgtbl_err); 2034 POSTING_READ(PGTBL_ER); 2035 } 2036 } 2037 2038 if (!IS_GEN2(dev)) { 2039 if (eir & I915_ERROR_PAGE_TABLE) { 2040 u32 pgtbl_err = I915_READ(PGTBL_ER); 2041 pr_err("page table error\n"); 2042 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2043 I915_WRITE(PGTBL_ER, pgtbl_err); 2044 POSTING_READ(PGTBL_ER); 2045 } 2046 } 2047 2048 if (eir & I915_ERROR_MEMORY_REFRESH) { 2049 pr_err("memory refresh error:\n"); 2050 for_each_pipe(pipe) 2051 pr_err("pipe %c stat: 0x%08x\n", 2052 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2053 /* pipestat has already been acked */ 2054 } 2055 if (eir & I915_ERROR_INSTRUCTION) { 2056 pr_err("instruction error\n"); 2057 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2058 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2059 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2060 if (INTEL_INFO(dev)->gen < 4) { 2061 u32 ipeir = I915_READ(IPEIR); 2062 2063 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2064 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2065 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2066 I915_WRITE(IPEIR, ipeir); 2067 POSTING_READ(IPEIR); 2068 } else { 2069 u32 ipeir = I915_READ(IPEIR_I965); 2070 2071 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2072 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2073 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2074 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2075 I915_WRITE(IPEIR_I965, ipeir); 2076 POSTING_READ(IPEIR_I965); 2077 } 2078 } 2079 2080 I915_WRITE(EIR, eir); 2081 POSTING_READ(EIR); 2082 eir = I915_READ(EIR); 2083 if (eir) { 2084 /* 2085 * some errors might have become stuck, 2086 * mask them. 2087 */ 2088 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2089 I915_WRITE(EMR, I915_READ(EMR) | eir); 2090 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2091 } 2092 } 2093 2094 /** 2095 * i915_handle_error - handle an error interrupt 2096 * @dev: drm device 2097 * 2098 * Do some basic checking of regsiter state at error interrupt time and 2099 * dump it to the syslog. Also call i915_capture_error_state() to make 2100 * sure we get a record and make it available in debugfs. Fire a uevent 2101 * so userspace knows something bad happened (should trigger collection 2102 * of a ring dump etc.). 2103 */ 2104 void i915_handle_error(struct drm_device *dev, bool wedged) 2105 { 2106 struct drm_i915_private *dev_priv = dev->dev_private; 2107 struct intel_ring_buffer *ring; 2108 int i; 2109 2110 i915_capture_error_state(dev); 2111 i915_report_and_clear_eir(dev); 2112 2113 if (wedged) { 2114 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2115 &dev_priv->gpu_error.reset_counter); 2116 2117 /* 2118 * Wakeup waiting processes so that the reset work item 2119 * doesn't deadlock trying to grab various locks. 2120 */ 2121 for_each_ring(ring, dev_priv, i) 2122 wake_up_all(&ring->irq_queue); 2123 } 2124 2125 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 2126 } 2127 2128 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2129 { 2130 drm_i915_private_t *dev_priv = dev->dev_private; 2131 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2132 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2133 struct drm_i915_gem_object *obj; 2134 struct intel_unpin_work *work; 2135 unsigned long flags; 2136 bool stall_detected; 2137 2138 /* Ignore early vblank irqs */ 2139 if (intel_crtc == NULL) 2140 return; 2141 2142 spin_lock_irqsave(&dev->event_lock, flags); 2143 work = intel_crtc->unpin_work; 2144 2145 if (work == NULL || 2146 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2147 !work->enable_stall_check) { 2148 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2149 spin_unlock_irqrestore(&dev->event_lock, flags); 2150 return; 2151 } 2152 2153 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2154 obj = work->pending_flip_obj; 2155 if (INTEL_INFO(dev)->gen >= 4) { 2156 int dspsurf = DSPSURF(intel_crtc->plane); 2157 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2158 obj->gtt_offset; 2159 } else { 2160 int dspaddr = DSPADDR(intel_crtc->plane); 2161 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 2162 crtc->y * crtc->fb->pitches[0] + 2163 crtc->x * crtc->fb->bits_per_pixel/8); 2164 } 2165 2166 spin_unlock_irqrestore(&dev->event_lock, flags); 2167 2168 if (stall_detected) { 2169 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2170 intel_prepare_page_flip(dev, intel_crtc->plane); 2171 } 2172 } 2173 2174 /* Called from drm generic code, passed 'crtc' which 2175 * we use as a pipe index 2176 */ 2177 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2178 { 2179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2180 unsigned long irqflags; 2181 2182 if (!i915_pipe_enabled(dev, pipe)) 2183 return -EINVAL; 2184 2185 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2186 if (INTEL_INFO(dev)->gen >= 4) 2187 i915_enable_pipestat(dev_priv, pipe, 2188 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2189 else 2190 i915_enable_pipestat(dev_priv, pipe, 2191 PIPE_VBLANK_INTERRUPT_ENABLE); 2192 2193 /* maintain vblank delivery even in deep C-states */ 2194 if (dev_priv->info->gen == 3) 2195 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2197 2198 return 0; 2199 } 2200 2201 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2202 { 2203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2204 unsigned long irqflags; 2205 2206 if (!i915_pipe_enabled(dev, pipe)) 2207 return -EINVAL; 2208 2209 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2210 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 2211 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 2212 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2213 2214 return 0; 2215 } 2216 2217 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 2218 { 2219 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2220 unsigned long irqflags; 2221 2222 if (!i915_pipe_enabled(dev, pipe)) 2223 return -EINVAL; 2224 2225 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2226 ironlake_enable_display_irq(dev_priv, 2227 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 2228 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2229 2230 return 0; 2231 } 2232 2233 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2234 { 2235 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2236 unsigned long irqflags; 2237 u32 imr; 2238 2239 if (!i915_pipe_enabled(dev, pipe)) 2240 return -EINVAL; 2241 2242 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2243 imr = I915_READ(VLV_IMR); 2244 if (pipe == 0) 2245 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2246 else 2247 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2248 I915_WRITE(VLV_IMR, imr); 2249 i915_enable_pipestat(dev_priv, pipe, 2250 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2251 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2252 2253 return 0; 2254 } 2255 2256 /* Called from drm generic code, passed 'crtc' which 2257 * we use as a pipe index 2258 */ 2259 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2260 { 2261 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2262 unsigned long irqflags; 2263 2264 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2265 if (dev_priv->info->gen == 3) 2266 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2267 2268 i915_disable_pipestat(dev_priv, pipe, 2269 PIPE_VBLANK_INTERRUPT_ENABLE | 2270 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2271 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2272 } 2273 2274 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2275 { 2276 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2277 unsigned long irqflags; 2278 2279 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2280 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 2281 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 2282 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2283 } 2284 2285 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 2286 { 2287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2288 unsigned long irqflags; 2289 2290 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2291 ironlake_disable_display_irq(dev_priv, 2292 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 2293 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2294 } 2295 2296 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2297 { 2298 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2299 unsigned long irqflags; 2300 u32 imr; 2301 2302 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2303 i915_disable_pipestat(dev_priv, pipe, 2304 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2305 imr = I915_READ(VLV_IMR); 2306 if (pipe == 0) 2307 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2308 else 2309 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2310 I915_WRITE(VLV_IMR, imr); 2311 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2312 } 2313 2314 static u32 2315 ring_last_seqno(struct intel_ring_buffer *ring) 2316 { 2317 return list_entry(ring->request_list.prev, 2318 struct drm_i915_gem_request, list)->seqno; 2319 } 2320 2321 static bool 2322 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2323 { 2324 return (list_empty(&ring->request_list) || 2325 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2326 } 2327 2328 static struct intel_ring_buffer * 2329 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2330 { 2331 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2332 u32 cmd, ipehr, acthd, acthd_min; 2333 2334 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2335 if ((ipehr & ~(0x3 << 16)) != 2336 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2337 return NULL; 2338 2339 /* ACTHD is likely pointing to the dword after the actual command, 2340 * so scan backwards until we find the MBOX. 2341 */ 2342 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2343 acthd_min = max((int)acthd - 3 * 4, 0); 2344 do { 2345 cmd = ioread32(ring->virtual_start + acthd); 2346 if (cmd == ipehr) 2347 break; 2348 2349 acthd -= 4; 2350 if (acthd < acthd_min) 2351 return NULL; 2352 } while (1); 2353 2354 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2355 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2356 } 2357 2358 static int semaphore_passed(struct intel_ring_buffer *ring) 2359 { 2360 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2361 struct intel_ring_buffer *signaller; 2362 u32 seqno, ctl; 2363 2364 ring->hangcheck.deadlock = true; 2365 2366 signaller = semaphore_waits_for(ring, &seqno); 2367 if (signaller == NULL || signaller->hangcheck.deadlock) 2368 return -1; 2369 2370 /* cursory check for an unkickable deadlock */ 2371 ctl = I915_READ_CTL(signaller); 2372 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2373 return -1; 2374 2375 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2376 } 2377 2378 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2379 { 2380 struct intel_ring_buffer *ring; 2381 int i; 2382 2383 for_each_ring(ring, dev_priv, i) 2384 ring->hangcheck.deadlock = false; 2385 } 2386 2387 static enum intel_ring_hangcheck_action 2388 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2389 { 2390 struct drm_device *dev = ring->dev; 2391 struct drm_i915_private *dev_priv = dev->dev_private; 2392 u32 tmp; 2393 2394 if (ring->hangcheck.acthd != acthd) 2395 return active; 2396 2397 if (IS_GEN2(dev)) 2398 return hung; 2399 2400 /* Is the chip hanging on a WAIT_FOR_EVENT? 2401 * If so we can simply poke the RB_WAIT bit 2402 * and break the hang. This should work on 2403 * all but the second generation chipsets. 2404 */ 2405 tmp = I915_READ_CTL(ring); 2406 if (tmp & RING_WAIT) { 2407 DRM_ERROR("Kicking stuck wait on %s\n", 2408 ring->name); 2409 I915_WRITE_CTL(ring, tmp); 2410 return kick; 2411 } 2412 2413 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2414 switch (semaphore_passed(ring)) { 2415 default: 2416 return hung; 2417 case 1: 2418 DRM_ERROR("Kicking stuck semaphore on %s\n", 2419 ring->name); 2420 I915_WRITE_CTL(ring, tmp); 2421 return kick; 2422 case 0: 2423 return wait; 2424 } 2425 } 2426 2427 return hung; 2428 } 2429 2430 /** 2431 * This is called when the chip hasn't reported back with completed 2432 * batchbuffers in a long time. We keep track per ring seqno progress and 2433 * if there are no progress, hangcheck score for that ring is increased. 2434 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2435 * we kick the ring. If we see no progress on three subsequent calls 2436 * we assume chip is wedged and try to fix it by resetting the chip. 2437 */ 2438 void i915_hangcheck_elapsed(unsigned long data) 2439 { 2440 struct drm_device *dev = (struct drm_device *)data; 2441 drm_i915_private_t *dev_priv = dev->dev_private; 2442 struct intel_ring_buffer *ring; 2443 int i; 2444 int busy_count = 0, rings_hung = 0; 2445 bool stuck[I915_NUM_RINGS] = { 0 }; 2446 #define BUSY 1 2447 #define KICK 5 2448 #define HUNG 20 2449 #define FIRE 30 2450 2451 if (!i915_enable_hangcheck) 2452 return; 2453 2454 for_each_ring(ring, dev_priv, i) { 2455 u32 seqno, acthd; 2456 bool busy = true; 2457 2458 semaphore_clear_deadlocks(dev_priv); 2459 2460 seqno = ring->get_seqno(ring, false); 2461 acthd = intel_ring_get_active_head(ring); 2462 2463 if (ring->hangcheck.seqno == seqno) { 2464 if (ring_idle(ring, seqno)) { 2465 if (waitqueue_active(&ring->irq_queue)) { 2466 /* Issue a wake-up to catch stuck h/w. */ 2467 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2468 ring->name); 2469 wake_up_all(&ring->irq_queue); 2470 ring->hangcheck.score += HUNG; 2471 } else 2472 busy = false; 2473 } else { 2474 int score; 2475 2476 /* We always increment the hangcheck score 2477 * if the ring is busy and still processing 2478 * the same request, so that no single request 2479 * can run indefinitely (such as a chain of 2480 * batches). The only time we do not increment 2481 * the hangcheck score on this ring, if this 2482 * ring is in a legitimate wait for another 2483 * ring. In that case the waiting ring is a 2484 * victim and we want to be sure we catch the 2485 * right culprit. Then every time we do kick 2486 * the ring, add a small increment to the 2487 * score so that we can catch a batch that is 2488 * being repeatedly kicked and so responsible 2489 * for stalling the machine. 2490 */ 2491 ring->hangcheck.action = ring_stuck(ring, 2492 acthd); 2493 2494 switch (ring->hangcheck.action) { 2495 case wait: 2496 score = 0; 2497 break; 2498 case active: 2499 score = BUSY; 2500 break; 2501 case kick: 2502 score = KICK; 2503 break; 2504 case hung: 2505 score = HUNG; 2506 stuck[i] = true; 2507 break; 2508 } 2509 ring->hangcheck.score += score; 2510 } 2511 } else { 2512 /* Gradually reduce the count so that we catch DoS 2513 * attempts across multiple batches. 2514 */ 2515 if (ring->hangcheck.score > 0) 2516 ring->hangcheck.score--; 2517 } 2518 2519 ring->hangcheck.seqno = seqno; 2520 ring->hangcheck.acthd = acthd; 2521 busy_count += busy; 2522 } 2523 2524 for_each_ring(ring, dev_priv, i) { 2525 if (ring->hangcheck.score > FIRE) { 2526 DRM_ERROR("%s on %s\n", 2527 stuck[i] ? "stuck" : "no progress", 2528 ring->name); 2529 rings_hung++; 2530 } 2531 } 2532 2533 if (rings_hung) 2534 return i915_handle_error(dev, true); 2535 2536 if (busy_count) 2537 /* Reset timer case chip hangs without another request 2538 * being added */ 2539 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2540 round_jiffies_up(jiffies + 2541 DRM_I915_HANGCHECK_JIFFIES)); 2542 } 2543 2544 static void ibx_irq_preinstall(struct drm_device *dev) 2545 { 2546 struct drm_i915_private *dev_priv = dev->dev_private; 2547 2548 if (HAS_PCH_NOP(dev)) 2549 return; 2550 2551 /* south display irq */ 2552 I915_WRITE(SDEIMR, 0xffffffff); 2553 /* 2554 * SDEIER is also touched by the interrupt handler to work around missed 2555 * PCH interrupts. Hence we can't update it after the interrupt handler 2556 * is enabled - instead we unconditionally enable all PCH interrupt 2557 * sources here, but then only unmask them as needed with SDEIMR. 2558 */ 2559 I915_WRITE(SDEIER, 0xffffffff); 2560 POSTING_READ(SDEIER); 2561 } 2562 2563 /* drm_dma.h hooks 2564 */ 2565 static void ironlake_irq_preinstall(struct drm_device *dev) 2566 { 2567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2568 2569 atomic_set(&dev_priv->irq_received, 0); 2570 2571 I915_WRITE(HWSTAM, 0xeffe); 2572 2573 /* XXX hotplug from PCH */ 2574 2575 I915_WRITE(DEIMR, 0xffffffff); 2576 I915_WRITE(DEIER, 0x0); 2577 POSTING_READ(DEIER); 2578 2579 /* and GT */ 2580 I915_WRITE(GTIMR, 0xffffffff); 2581 I915_WRITE(GTIER, 0x0); 2582 POSTING_READ(GTIER); 2583 2584 ibx_irq_preinstall(dev); 2585 } 2586 2587 static void ivybridge_irq_preinstall(struct drm_device *dev) 2588 { 2589 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2590 2591 atomic_set(&dev_priv->irq_received, 0); 2592 2593 I915_WRITE(HWSTAM, 0xeffe); 2594 2595 /* XXX hotplug from PCH */ 2596 2597 I915_WRITE(DEIMR, 0xffffffff); 2598 I915_WRITE(DEIER, 0x0); 2599 POSTING_READ(DEIER); 2600 2601 /* and GT */ 2602 I915_WRITE(GTIMR, 0xffffffff); 2603 I915_WRITE(GTIER, 0x0); 2604 POSTING_READ(GTIER); 2605 2606 /* Power management */ 2607 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2608 I915_WRITE(GEN6_PMIER, 0x0); 2609 POSTING_READ(GEN6_PMIER); 2610 2611 ibx_irq_preinstall(dev); 2612 } 2613 2614 static void valleyview_irq_preinstall(struct drm_device *dev) 2615 { 2616 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2617 int pipe; 2618 2619 atomic_set(&dev_priv->irq_received, 0); 2620 2621 /* VLV magic */ 2622 I915_WRITE(VLV_IMR, 0); 2623 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2624 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2625 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2626 2627 /* and GT */ 2628 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2629 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2630 I915_WRITE(GTIMR, 0xffffffff); 2631 I915_WRITE(GTIER, 0x0); 2632 POSTING_READ(GTIER); 2633 2634 I915_WRITE(DPINVGTT, 0xff); 2635 2636 I915_WRITE(PORT_HOTPLUG_EN, 0); 2637 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2638 for_each_pipe(pipe) 2639 I915_WRITE(PIPESTAT(pipe), 0xffff); 2640 I915_WRITE(VLV_IIR, 0xffffffff); 2641 I915_WRITE(VLV_IMR, 0xffffffff); 2642 I915_WRITE(VLV_IER, 0x0); 2643 POSTING_READ(VLV_IER); 2644 } 2645 2646 static void ibx_hpd_irq_setup(struct drm_device *dev) 2647 { 2648 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2649 struct drm_mode_config *mode_config = &dev->mode_config; 2650 struct intel_encoder *intel_encoder; 2651 u32 mask = ~I915_READ(SDEIMR); 2652 u32 hotplug; 2653 2654 if (HAS_PCH_IBX(dev)) { 2655 mask &= ~SDE_HOTPLUG_MASK; 2656 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2657 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2658 mask |= hpd_ibx[intel_encoder->hpd_pin]; 2659 } else { 2660 mask &= ~SDE_HOTPLUG_MASK_CPT; 2661 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2662 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2663 mask |= hpd_cpt[intel_encoder->hpd_pin]; 2664 } 2665 2666 I915_WRITE(SDEIMR, ~mask); 2667 2668 /* 2669 * Enable digital hotplug on the PCH, and configure the DP short pulse 2670 * duration to 2ms (which is the minimum in the Display Port spec) 2671 * 2672 * This register is the same on all known PCH chips. 2673 */ 2674 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2675 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2676 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2677 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2678 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2679 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2680 } 2681 2682 static void ibx_irq_postinstall(struct drm_device *dev) 2683 { 2684 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2685 u32 mask; 2686 2687 if (HAS_PCH_NOP(dev)) 2688 return; 2689 2690 if (HAS_PCH_IBX(dev)) { 2691 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2692 SDE_TRANSA_FIFO_UNDER | SDE_POISON; 2693 } else { 2694 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 2695 2696 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2697 } 2698 2699 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2700 I915_WRITE(SDEIMR, ~mask); 2701 } 2702 2703 static int ironlake_irq_postinstall(struct drm_device *dev) 2704 { 2705 unsigned long irqflags; 2706 2707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2708 /* enable kind of interrupts always enabled */ 2709 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2710 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2711 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2712 DE_PIPEA_FIFO_UNDERRUN | DE_POISON; 2713 u32 gt_irqs; 2714 2715 dev_priv->irq_mask = ~display_mask; 2716 2717 /* should always can generate irq */ 2718 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2719 I915_WRITE(DEIMR, dev_priv->irq_mask); 2720 I915_WRITE(DEIER, display_mask | 2721 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); 2722 POSTING_READ(DEIER); 2723 2724 dev_priv->gt_irq_mask = ~0; 2725 2726 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2727 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2728 2729 gt_irqs = GT_RENDER_USER_INTERRUPT; 2730 2731 if (IS_GEN6(dev)) 2732 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2733 else 2734 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2735 ILK_BSD_USER_INTERRUPT; 2736 2737 I915_WRITE(GTIER, gt_irqs); 2738 POSTING_READ(GTIER); 2739 2740 ibx_irq_postinstall(dev); 2741 2742 if (IS_IRONLAKE_M(dev)) { 2743 /* Enable PCU event interrupts 2744 * 2745 * spinlocking not required here for correctness since interrupt 2746 * setup is guaranteed to run in single-threaded context. But we 2747 * need it to make the assert_spin_locked happy. */ 2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2749 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2750 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2751 } 2752 2753 return 0; 2754 } 2755 2756 static int ivybridge_irq_postinstall(struct drm_device *dev) 2757 { 2758 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2759 /* enable kind of interrupts always enabled */ 2760 u32 display_mask = 2761 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 2762 DE_PLANEC_FLIP_DONE_IVB | 2763 DE_PLANEB_FLIP_DONE_IVB | 2764 DE_PLANEA_FLIP_DONE_IVB | 2765 DE_AUX_CHANNEL_A_IVB | 2766 DE_ERR_INT_IVB; 2767 u32 pm_irqs = GEN6_PM_RPS_EVENTS; 2768 u32 gt_irqs; 2769 2770 dev_priv->irq_mask = ~display_mask; 2771 2772 /* should always can generate irq */ 2773 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2774 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2775 I915_WRITE(DEIMR, dev_priv->irq_mask); 2776 I915_WRITE(DEIER, 2777 display_mask | 2778 DE_PIPEC_VBLANK_IVB | 2779 DE_PIPEB_VBLANK_IVB | 2780 DE_PIPEA_VBLANK_IVB); 2781 POSTING_READ(DEIER); 2782 2783 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2784 2785 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2786 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2787 2788 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | 2789 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2790 I915_WRITE(GTIER, gt_irqs); 2791 POSTING_READ(GTIER); 2792 2793 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2794 if (HAS_VEBOX(dev)) 2795 pm_irqs |= PM_VEBOX_USER_INTERRUPT | 2796 PM_VEBOX_CS_ERROR_INTERRUPT; 2797 2798 /* Our enable/disable rps functions may touch these registers so 2799 * make sure to set a known state for only the non-RPS bits. 2800 * The RMW is extra paranoia since this should be called after being set 2801 * to a known state in preinstall. 2802 * */ 2803 I915_WRITE(GEN6_PMIMR, 2804 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs); 2805 I915_WRITE(GEN6_PMIER, 2806 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs); 2807 POSTING_READ(GEN6_PMIER); 2808 2809 ibx_irq_postinstall(dev); 2810 2811 return 0; 2812 } 2813 2814 static int valleyview_irq_postinstall(struct drm_device *dev) 2815 { 2816 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2817 u32 gt_irqs; 2818 u32 enable_mask; 2819 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2820 2821 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2822 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2823 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2824 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2825 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2826 2827 /* 2828 *Leave vblank interrupts masked initially. enable/disable will 2829 * toggle them based on usage. 2830 */ 2831 dev_priv->irq_mask = (~enable_mask) | 2832 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2833 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2834 2835 I915_WRITE(PORT_HOTPLUG_EN, 0); 2836 POSTING_READ(PORT_HOTPLUG_EN); 2837 2838 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2839 I915_WRITE(VLV_IER, enable_mask); 2840 I915_WRITE(VLV_IIR, 0xffffffff); 2841 I915_WRITE(PIPESTAT(0), 0xffff); 2842 I915_WRITE(PIPESTAT(1), 0xffff); 2843 POSTING_READ(VLV_IER); 2844 2845 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2846 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2847 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2848 2849 I915_WRITE(VLV_IIR, 0xffffffff); 2850 I915_WRITE(VLV_IIR, 0xffffffff); 2851 2852 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2853 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2854 2855 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | 2856 GT_BLT_USER_INTERRUPT; 2857 I915_WRITE(GTIER, gt_irqs); 2858 POSTING_READ(GTIER); 2859 2860 /* ack & enable invalid PTE error interrupts */ 2861 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2862 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2863 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2864 #endif 2865 2866 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2867 2868 return 0; 2869 } 2870 2871 static void valleyview_irq_uninstall(struct drm_device *dev) 2872 { 2873 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2874 int pipe; 2875 2876 if (!dev_priv) 2877 return; 2878 2879 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2880 2881 for_each_pipe(pipe) 2882 I915_WRITE(PIPESTAT(pipe), 0xffff); 2883 2884 I915_WRITE(HWSTAM, 0xffffffff); 2885 I915_WRITE(PORT_HOTPLUG_EN, 0); 2886 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2887 for_each_pipe(pipe) 2888 I915_WRITE(PIPESTAT(pipe), 0xffff); 2889 I915_WRITE(VLV_IIR, 0xffffffff); 2890 I915_WRITE(VLV_IMR, 0xffffffff); 2891 I915_WRITE(VLV_IER, 0x0); 2892 POSTING_READ(VLV_IER); 2893 } 2894 2895 static void ironlake_irq_uninstall(struct drm_device *dev) 2896 { 2897 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2898 2899 if (!dev_priv) 2900 return; 2901 2902 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2903 2904 I915_WRITE(HWSTAM, 0xffffffff); 2905 2906 I915_WRITE(DEIMR, 0xffffffff); 2907 I915_WRITE(DEIER, 0x0); 2908 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2909 if (IS_GEN7(dev)) 2910 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2911 2912 I915_WRITE(GTIMR, 0xffffffff); 2913 I915_WRITE(GTIER, 0x0); 2914 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2915 2916 if (HAS_PCH_NOP(dev)) 2917 return; 2918 2919 I915_WRITE(SDEIMR, 0xffffffff); 2920 I915_WRITE(SDEIER, 0x0); 2921 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2922 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 2923 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2924 } 2925 2926 static void i8xx_irq_preinstall(struct drm_device * dev) 2927 { 2928 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2929 int pipe; 2930 2931 atomic_set(&dev_priv->irq_received, 0); 2932 2933 for_each_pipe(pipe) 2934 I915_WRITE(PIPESTAT(pipe), 0); 2935 I915_WRITE16(IMR, 0xffff); 2936 I915_WRITE16(IER, 0x0); 2937 POSTING_READ16(IER); 2938 } 2939 2940 static int i8xx_irq_postinstall(struct drm_device *dev) 2941 { 2942 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2943 2944 I915_WRITE16(EMR, 2945 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2946 2947 /* Unmask the interrupts that we always want on. */ 2948 dev_priv->irq_mask = 2949 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2950 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2951 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2952 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2953 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2954 I915_WRITE16(IMR, dev_priv->irq_mask); 2955 2956 I915_WRITE16(IER, 2957 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2958 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2959 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2960 I915_USER_INTERRUPT); 2961 POSTING_READ16(IER); 2962 2963 return 0; 2964 } 2965 2966 /* 2967 * Returns true when a page flip has completed. 2968 */ 2969 static bool i8xx_handle_vblank(struct drm_device *dev, 2970 int pipe, u16 iir) 2971 { 2972 drm_i915_private_t *dev_priv = dev->dev_private; 2973 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 2974 2975 if (!drm_handle_vblank(dev, pipe)) 2976 return false; 2977 2978 if ((iir & flip_pending) == 0) 2979 return false; 2980 2981 intel_prepare_page_flip(dev, pipe); 2982 2983 /* We detect FlipDone by looking for the change in PendingFlip from '1' 2984 * to '0' on the following vblank, i.e. IIR has the Pendingflip 2985 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 2986 * the flip is completed (no longer pending). Since this doesn't raise 2987 * an interrupt per se, we watch for the change at vblank. 2988 */ 2989 if (I915_READ16(ISR) & flip_pending) 2990 return false; 2991 2992 intel_finish_page_flip(dev, pipe); 2993 2994 return true; 2995 } 2996 2997 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2998 { 2999 struct drm_device *dev = (struct drm_device *) arg; 3000 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3001 u16 iir, new_iir; 3002 u32 pipe_stats[2]; 3003 unsigned long irqflags; 3004 int irq_received; 3005 int pipe; 3006 u16 flip_mask = 3007 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3008 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3009 3010 atomic_inc(&dev_priv->irq_received); 3011 3012 iir = I915_READ16(IIR); 3013 if (iir == 0) 3014 return IRQ_NONE; 3015 3016 while (iir & ~flip_mask) { 3017 /* Can't rely on pipestat interrupt bit in iir as it might 3018 * have been cleared after the pipestat interrupt was received. 3019 * It doesn't set the bit in iir again, but it still produces 3020 * interrupts (for non-MSI). 3021 */ 3022 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3023 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3024 i915_handle_error(dev, false); 3025 3026 for_each_pipe(pipe) { 3027 int reg = PIPESTAT(pipe); 3028 pipe_stats[pipe] = I915_READ(reg); 3029 3030 /* 3031 * Clear the PIPE*STAT regs before the IIR 3032 */ 3033 if (pipe_stats[pipe] & 0x8000ffff) { 3034 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3035 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3036 pipe_name(pipe)); 3037 I915_WRITE(reg, pipe_stats[pipe]); 3038 irq_received = 1; 3039 } 3040 } 3041 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3042 3043 I915_WRITE16(IIR, iir & ~flip_mask); 3044 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3045 3046 i915_update_dri1_breadcrumb(dev); 3047 3048 if (iir & I915_USER_INTERRUPT) 3049 notify_ring(dev, &dev_priv->ring[RCS]); 3050 3051 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 3052 i8xx_handle_vblank(dev, 0, iir)) 3053 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 3054 3055 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 3056 i8xx_handle_vblank(dev, 1, iir)) 3057 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 3058 3059 iir = new_iir; 3060 } 3061 3062 return IRQ_HANDLED; 3063 } 3064 3065 static void i8xx_irq_uninstall(struct drm_device * dev) 3066 { 3067 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3068 int pipe; 3069 3070 for_each_pipe(pipe) { 3071 /* Clear enable bits; then clear status bits */ 3072 I915_WRITE(PIPESTAT(pipe), 0); 3073 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3074 } 3075 I915_WRITE16(IMR, 0xffff); 3076 I915_WRITE16(IER, 0x0); 3077 I915_WRITE16(IIR, I915_READ16(IIR)); 3078 } 3079 3080 static void i915_irq_preinstall(struct drm_device * dev) 3081 { 3082 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3083 int pipe; 3084 3085 atomic_set(&dev_priv->irq_received, 0); 3086 3087 if (I915_HAS_HOTPLUG(dev)) { 3088 I915_WRITE(PORT_HOTPLUG_EN, 0); 3089 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3090 } 3091 3092 I915_WRITE16(HWSTAM, 0xeffe); 3093 for_each_pipe(pipe) 3094 I915_WRITE(PIPESTAT(pipe), 0); 3095 I915_WRITE(IMR, 0xffffffff); 3096 I915_WRITE(IER, 0x0); 3097 POSTING_READ(IER); 3098 } 3099 3100 static int i915_irq_postinstall(struct drm_device *dev) 3101 { 3102 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3103 u32 enable_mask; 3104 3105 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3106 3107 /* Unmask the interrupts that we always want on. */ 3108 dev_priv->irq_mask = 3109 ~(I915_ASLE_INTERRUPT | 3110 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3111 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3112 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3113 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3114 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3115 3116 enable_mask = 3117 I915_ASLE_INTERRUPT | 3118 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3119 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3120 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3121 I915_USER_INTERRUPT; 3122 3123 if (I915_HAS_HOTPLUG(dev)) { 3124 I915_WRITE(PORT_HOTPLUG_EN, 0); 3125 POSTING_READ(PORT_HOTPLUG_EN); 3126 3127 /* Enable in IER... */ 3128 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3129 /* and unmask in IMR */ 3130 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3131 } 3132 3133 I915_WRITE(IMR, dev_priv->irq_mask); 3134 I915_WRITE(IER, enable_mask); 3135 POSTING_READ(IER); 3136 3137 i915_enable_asle_pipestat(dev); 3138 3139 return 0; 3140 } 3141 3142 /* 3143 * Returns true when a page flip has completed. 3144 */ 3145 static bool i915_handle_vblank(struct drm_device *dev, 3146 int plane, int pipe, u32 iir) 3147 { 3148 drm_i915_private_t *dev_priv = dev->dev_private; 3149 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3150 3151 if (!drm_handle_vblank(dev, pipe)) 3152 return false; 3153 3154 if ((iir & flip_pending) == 0) 3155 return false; 3156 3157 intel_prepare_page_flip(dev, plane); 3158 3159 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3160 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3161 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3162 * the flip is completed (no longer pending). Since this doesn't raise 3163 * an interrupt per se, we watch for the change at vblank. 3164 */ 3165 if (I915_READ(ISR) & flip_pending) 3166 return false; 3167 3168 intel_finish_page_flip(dev, pipe); 3169 3170 return true; 3171 } 3172 3173 static irqreturn_t i915_irq_handler(int irq, void *arg) 3174 { 3175 struct drm_device *dev = (struct drm_device *) arg; 3176 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3177 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3178 unsigned long irqflags; 3179 u32 flip_mask = 3180 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3181 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3182 int pipe, ret = IRQ_NONE; 3183 3184 atomic_inc(&dev_priv->irq_received); 3185 3186 iir = I915_READ(IIR); 3187 do { 3188 bool irq_received = (iir & ~flip_mask) != 0; 3189 bool blc_event = false; 3190 3191 /* Can't rely on pipestat interrupt bit in iir as it might 3192 * have been cleared after the pipestat interrupt was received. 3193 * It doesn't set the bit in iir again, but it still produces 3194 * interrupts (for non-MSI). 3195 */ 3196 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3197 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3198 i915_handle_error(dev, false); 3199 3200 for_each_pipe(pipe) { 3201 int reg = PIPESTAT(pipe); 3202 pipe_stats[pipe] = I915_READ(reg); 3203 3204 /* Clear the PIPE*STAT regs before the IIR */ 3205 if (pipe_stats[pipe] & 0x8000ffff) { 3206 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3207 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3208 pipe_name(pipe)); 3209 I915_WRITE(reg, pipe_stats[pipe]); 3210 irq_received = true; 3211 } 3212 } 3213 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3214 3215 if (!irq_received) 3216 break; 3217 3218 /* Consume port. Then clear IIR or we'll miss events */ 3219 if ((I915_HAS_HOTPLUG(dev)) && 3220 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3221 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3222 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3223 3224 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3225 hotplug_status); 3226 3227 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3228 3229 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3230 POSTING_READ(PORT_HOTPLUG_STAT); 3231 } 3232 3233 I915_WRITE(IIR, iir & ~flip_mask); 3234 new_iir = I915_READ(IIR); /* Flush posted writes */ 3235 3236 if (iir & I915_USER_INTERRUPT) 3237 notify_ring(dev, &dev_priv->ring[RCS]); 3238 3239 for_each_pipe(pipe) { 3240 int plane = pipe; 3241 if (IS_MOBILE(dev)) 3242 plane = !plane; 3243 3244 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3245 i915_handle_vblank(dev, plane, pipe, iir)) 3246 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3247 3248 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3249 blc_event = true; 3250 } 3251 3252 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3253 intel_opregion_asle_intr(dev); 3254 3255 /* With MSI, interrupts are only generated when iir 3256 * transitions from zero to nonzero. If another bit got 3257 * set while we were handling the existing iir bits, then 3258 * we would never get another interrupt. 3259 * 3260 * This is fine on non-MSI as well, as if we hit this path 3261 * we avoid exiting the interrupt handler only to generate 3262 * another one. 3263 * 3264 * Note that for MSI this could cause a stray interrupt report 3265 * if an interrupt landed in the time between writing IIR and 3266 * the posting read. This should be rare enough to never 3267 * trigger the 99% of 100,000 interrupts test for disabling 3268 * stray interrupts. 3269 */ 3270 ret = IRQ_HANDLED; 3271 iir = new_iir; 3272 } while (iir & ~flip_mask); 3273 3274 i915_update_dri1_breadcrumb(dev); 3275 3276 return ret; 3277 } 3278 3279 static void i915_irq_uninstall(struct drm_device * dev) 3280 { 3281 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3282 int pipe; 3283 3284 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3285 3286 if (I915_HAS_HOTPLUG(dev)) { 3287 I915_WRITE(PORT_HOTPLUG_EN, 0); 3288 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3289 } 3290 3291 I915_WRITE16(HWSTAM, 0xffff); 3292 for_each_pipe(pipe) { 3293 /* Clear enable bits; then clear status bits */ 3294 I915_WRITE(PIPESTAT(pipe), 0); 3295 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3296 } 3297 I915_WRITE(IMR, 0xffffffff); 3298 I915_WRITE(IER, 0x0); 3299 3300 I915_WRITE(IIR, I915_READ(IIR)); 3301 } 3302 3303 static void i965_irq_preinstall(struct drm_device * dev) 3304 { 3305 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3306 int pipe; 3307 3308 atomic_set(&dev_priv->irq_received, 0); 3309 3310 I915_WRITE(PORT_HOTPLUG_EN, 0); 3311 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3312 3313 I915_WRITE(HWSTAM, 0xeffe); 3314 for_each_pipe(pipe) 3315 I915_WRITE(PIPESTAT(pipe), 0); 3316 I915_WRITE(IMR, 0xffffffff); 3317 I915_WRITE(IER, 0x0); 3318 POSTING_READ(IER); 3319 } 3320 3321 static int i965_irq_postinstall(struct drm_device *dev) 3322 { 3323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3324 u32 enable_mask; 3325 u32 error_mask; 3326 3327 /* Unmask the interrupts that we always want on. */ 3328 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3329 I915_DISPLAY_PORT_INTERRUPT | 3330 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3331 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3332 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3333 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3334 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3335 3336 enable_mask = ~dev_priv->irq_mask; 3337 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3338 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3339 enable_mask |= I915_USER_INTERRUPT; 3340 3341 if (IS_G4X(dev)) 3342 enable_mask |= I915_BSD_USER_INTERRUPT; 3343 3344 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3345 3346 /* 3347 * Enable some error detection, note the instruction error mask 3348 * bit is reserved, so we leave it masked. 3349 */ 3350 if (IS_G4X(dev)) { 3351 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3352 GM45_ERROR_MEM_PRIV | 3353 GM45_ERROR_CP_PRIV | 3354 I915_ERROR_MEMORY_REFRESH); 3355 } else { 3356 error_mask = ~(I915_ERROR_PAGE_TABLE | 3357 I915_ERROR_MEMORY_REFRESH); 3358 } 3359 I915_WRITE(EMR, error_mask); 3360 3361 I915_WRITE(IMR, dev_priv->irq_mask); 3362 I915_WRITE(IER, enable_mask); 3363 POSTING_READ(IER); 3364 3365 I915_WRITE(PORT_HOTPLUG_EN, 0); 3366 POSTING_READ(PORT_HOTPLUG_EN); 3367 3368 i915_enable_asle_pipestat(dev); 3369 3370 return 0; 3371 } 3372 3373 static void i915_hpd_irq_setup(struct drm_device *dev) 3374 { 3375 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3376 struct drm_mode_config *mode_config = &dev->mode_config; 3377 struct intel_encoder *intel_encoder; 3378 u32 hotplug_en; 3379 3380 assert_spin_locked(&dev_priv->irq_lock); 3381 3382 if (I915_HAS_HOTPLUG(dev)) { 3383 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3384 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3385 /* Note HDMI and DP share hotplug bits */ 3386 /* enable bits are the same for all generations */ 3387 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3388 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3389 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3390 /* Programming the CRT detection parameters tends 3391 to generate a spurious hotplug event about three 3392 seconds later. So just do it once. 3393 */ 3394 if (IS_G4X(dev)) 3395 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3396 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3397 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3398 3399 /* Ignore TV since it's buggy */ 3400 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3401 } 3402 } 3403 3404 static irqreturn_t i965_irq_handler(int irq, void *arg) 3405 { 3406 struct drm_device *dev = (struct drm_device *) arg; 3407 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3408 u32 iir, new_iir; 3409 u32 pipe_stats[I915_MAX_PIPES]; 3410 unsigned long irqflags; 3411 int irq_received; 3412 int ret = IRQ_NONE, pipe; 3413 u32 flip_mask = 3414 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3415 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3416 3417 atomic_inc(&dev_priv->irq_received); 3418 3419 iir = I915_READ(IIR); 3420 3421 for (;;) { 3422 bool blc_event = false; 3423 3424 irq_received = (iir & ~flip_mask) != 0; 3425 3426 /* Can't rely on pipestat interrupt bit in iir as it might 3427 * have been cleared after the pipestat interrupt was received. 3428 * It doesn't set the bit in iir again, but it still produces 3429 * interrupts (for non-MSI). 3430 */ 3431 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3432 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3433 i915_handle_error(dev, false); 3434 3435 for_each_pipe(pipe) { 3436 int reg = PIPESTAT(pipe); 3437 pipe_stats[pipe] = I915_READ(reg); 3438 3439 /* 3440 * Clear the PIPE*STAT regs before the IIR 3441 */ 3442 if (pipe_stats[pipe] & 0x8000ffff) { 3443 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3444 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3445 pipe_name(pipe)); 3446 I915_WRITE(reg, pipe_stats[pipe]); 3447 irq_received = 1; 3448 } 3449 } 3450 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3451 3452 if (!irq_received) 3453 break; 3454 3455 ret = IRQ_HANDLED; 3456 3457 /* Consume port. Then clear IIR or we'll miss events */ 3458 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3459 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3460 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3461 HOTPLUG_INT_STATUS_G4X : 3462 HOTPLUG_INT_STATUS_I915); 3463 3464 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3465 hotplug_status); 3466 3467 intel_hpd_irq_handler(dev, hotplug_trigger, 3468 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 3469 3470 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3471 I915_READ(PORT_HOTPLUG_STAT); 3472 } 3473 3474 I915_WRITE(IIR, iir & ~flip_mask); 3475 new_iir = I915_READ(IIR); /* Flush posted writes */ 3476 3477 if (iir & I915_USER_INTERRUPT) 3478 notify_ring(dev, &dev_priv->ring[RCS]); 3479 if (iir & I915_BSD_USER_INTERRUPT) 3480 notify_ring(dev, &dev_priv->ring[VCS]); 3481 3482 for_each_pipe(pipe) { 3483 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3484 i915_handle_vblank(dev, pipe, pipe, iir)) 3485 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3486 3487 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3488 blc_event = true; 3489 } 3490 3491 3492 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3493 intel_opregion_asle_intr(dev); 3494 3495 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3496 gmbus_irq_handler(dev); 3497 3498 /* With MSI, interrupts are only generated when iir 3499 * transitions from zero to nonzero. If another bit got 3500 * set while we were handling the existing iir bits, then 3501 * we would never get another interrupt. 3502 * 3503 * This is fine on non-MSI as well, as if we hit this path 3504 * we avoid exiting the interrupt handler only to generate 3505 * another one. 3506 * 3507 * Note that for MSI this could cause a stray interrupt report 3508 * if an interrupt landed in the time between writing IIR and 3509 * the posting read. This should be rare enough to never 3510 * trigger the 99% of 100,000 interrupts test for disabling 3511 * stray interrupts. 3512 */ 3513 iir = new_iir; 3514 } 3515 3516 i915_update_dri1_breadcrumb(dev); 3517 3518 return ret; 3519 } 3520 3521 static void i965_irq_uninstall(struct drm_device * dev) 3522 { 3523 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3524 int pipe; 3525 3526 if (!dev_priv) 3527 return; 3528 3529 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3530 3531 I915_WRITE(PORT_HOTPLUG_EN, 0); 3532 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3533 3534 I915_WRITE(HWSTAM, 0xffffffff); 3535 for_each_pipe(pipe) 3536 I915_WRITE(PIPESTAT(pipe), 0); 3537 I915_WRITE(IMR, 0xffffffff); 3538 I915_WRITE(IER, 0x0); 3539 3540 for_each_pipe(pipe) 3541 I915_WRITE(PIPESTAT(pipe), 3542 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3543 I915_WRITE(IIR, I915_READ(IIR)); 3544 } 3545 3546 static void i915_reenable_hotplug_timer_func(unsigned long data) 3547 { 3548 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3549 struct drm_device *dev = dev_priv->dev; 3550 struct drm_mode_config *mode_config = &dev->mode_config; 3551 unsigned long irqflags; 3552 int i; 3553 3554 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3555 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3556 struct drm_connector *connector; 3557 3558 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3559 continue; 3560 3561 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3562 3563 list_for_each_entry(connector, &mode_config->connector_list, head) { 3564 struct intel_connector *intel_connector = to_intel_connector(connector); 3565 3566 if (intel_connector->encoder->hpd_pin == i) { 3567 if (connector->polled != intel_connector->polled) 3568 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3569 drm_get_connector_name(connector)); 3570 connector->polled = intel_connector->polled; 3571 if (!connector->polled) 3572 connector->polled = DRM_CONNECTOR_POLL_HPD; 3573 } 3574 } 3575 } 3576 if (dev_priv->display.hpd_irq_setup) 3577 dev_priv->display.hpd_irq_setup(dev); 3578 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3579 } 3580 3581 void intel_irq_init(struct drm_device *dev) 3582 { 3583 struct drm_i915_private *dev_priv = dev->dev_private; 3584 3585 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3586 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3587 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3588 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3589 3590 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3591 i915_hangcheck_elapsed, 3592 (unsigned long) dev); 3593 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3594 (unsigned long) dev_priv); 3595 3596 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3597 3598 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3599 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3600 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3601 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3602 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3603 } 3604 3605 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3606 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3607 else 3608 dev->driver->get_vblank_timestamp = NULL; 3609 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3610 3611 if (IS_VALLEYVIEW(dev)) { 3612 dev->driver->irq_handler = valleyview_irq_handler; 3613 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3614 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3615 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3616 dev->driver->enable_vblank = valleyview_enable_vblank; 3617 dev->driver->disable_vblank = valleyview_disable_vblank; 3618 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3619 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 3620 /* Share uninstall handlers with ILK/SNB */ 3621 dev->driver->irq_handler = ivybridge_irq_handler; 3622 dev->driver->irq_preinstall = ivybridge_irq_preinstall; 3623 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 3624 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3625 dev->driver->enable_vblank = ivybridge_enable_vblank; 3626 dev->driver->disable_vblank = ivybridge_disable_vblank; 3627 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3628 } else if (HAS_PCH_SPLIT(dev)) { 3629 dev->driver->irq_handler = ironlake_irq_handler; 3630 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3631 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3632 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3633 dev->driver->enable_vblank = ironlake_enable_vblank; 3634 dev->driver->disable_vblank = ironlake_disable_vblank; 3635 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3636 } else { 3637 if (INTEL_INFO(dev)->gen == 2) { 3638 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3639 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3640 dev->driver->irq_handler = i8xx_irq_handler; 3641 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3642 } else if (INTEL_INFO(dev)->gen == 3) { 3643 dev->driver->irq_preinstall = i915_irq_preinstall; 3644 dev->driver->irq_postinstall = i915_irq_postinstall; 3645 dev->driver->irq_uninstall = i915_irq_uninstall; 3646 dev->driver->irq_handler = i915_irq_handler; 3647 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3648 } else { 3649 dev->driver->irq_preinstall = i965_irq_preinstall; 3650 dev->driver->irq_postinstall = i965_irq_postinstall; 3651 dev->driver->irq_uninstall = i965_irq_uninstall; 3652 dev->driver->irq_handler = i965_irq_handler; 3653 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3654 } 3655 dev->driver->enable_vblank = i915_enable_vblank; 3656 dev->driver->disable_vblank = i915_disable_vblank; 3657 } 3658 } 3659 3660 void intel_hpd_init(struct drm_device *dev) 3661 { 3662 struct drm_i915_private *dev_priv = dev->dev_private; 3663 struct drm_mode_config *mode_config = &dev->mode_config; 3664 struct drm_connector *connector; 3665 unsigned long irqflags; 3666 int i; 3667 3668 for (i = 1; i < HPD_NUM_PINS; i++) { 3669 dev_priv->hpd_stats[i].hpd_cnt = 0; 3670 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3671 } 3672 list_for_each_entry(connector, &mode_config->connector_list, head) { 3673 struct intel_connector *intel_connector = to_intel_connector(connector); 3674 connector->polled = intel_connector->polled; 3675 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3676 connector->polled = DRM_CONNECTOR_POLL_HPD; 3677 } 3678 3679 /* Interrupt setup is already guaranteed to be single-threaded, this is 3680 * just to make the assert_spin_locked checks happy. */ 3681 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3682 if (dev_priv->display.hpd_irq_setup) 3683 dev_priv->display.hpd_irq_setup(dev); 3684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3685 } 3686