1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <drm/drmP.h> 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include "intel_drv.h" 38 39 /* For display hotplug interrupt */ 40 static void 41 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 42 { 43 if ((dev_priv->irq_mask & mask) != 0) { 44 dev_priv->irq_mask &= ~mask; 45 I915_WRITE(DEIMR, dev_priv->irq_mask); 46 POSTING_READ(DEIMR); 47 } 48 } 49 50 static inline void 51 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 52 { 53 if ((dev_priv->irq_mask & mask) != mask) { 54 dev_priv->irq_mask |= mask; 55 I915_WRITE(DEIMR, dev_priv->irq_mask); 56 POSTING_READ(DEIMR); 57 } 58 } 59 60 void 61 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 62 { 63 if ((dev_priv->pipestat[pipe] & mask) != mask) { 64 u32 reg = PIPESTAT(pipe); 65 66 dev_priv->pipestat[pipe] |= mask; 67 /* Enable the interrupt, clear any pending status */ 68 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 69 POSTING_READ(reg); 70 } 71 } 72 73 void 74 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 75 { 76 if ((dev_priv->pipestat[pipe] & mask) != 0) { 77 u32 reg = PIPESTAT(pipe); 78 79 dev_priv->pipestat[pipe] &= ~mask; 80 I915_WRITE(reg, dev_priv->pipestat[pipe]); 81 POSTING_READ(reg); 82 } 83 } 84 85 /** 86 * intel_enable_asle - enable ASLE interrupt for OpRegion 87 */ 88 void intel_enable_asle(struct drm_device *dev) 89 { 90 drm_i915_private_t *dev_priv = dev->dev_private; 91 unsigned long irqflags; 92 93 /* FIXME: opregion/asle for VLV */ 94 if (IS_VALLEYVIEW(dev)) 95 return; 96 97 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 98 99 if (HAS_PCH_SPLIT(dev)) 100 ironlake_enable_display_irq(dev_priv, DE_GSE); 101 else { 102 i915_enable_pipestat(dev_priv, 1, 103 PIPE_LEGACY_BLC_EVENT_ENABLE); 104 if (INTEL_INFO(dev)->gen >= 4) 105 i915_enable_pipestat(dev_priv, 0, 106 PIPE_LEGACY_BLC_EVENT_ENABLE); 107 } 108 109 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 110 } 111 112 /** 113 * i915_pipe_enabled - check if a pipe is enabled 114 * @dev: DRM device 115 * @pipe: pipe to check 116 * 117 * Reading certain registers when the pipe is disabled can hang the chip. 118 * Use this routine to make sure the PLL is running and the pipe is active 119 * before reading such registers if unsure. 120 */ 121 static int 122 i915_pipe_enabled(struct drm_device *dev, int pipe) 123 { 124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 126 pipe); 127 128 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 129 } 130 131 /* Called from drm generic code, passed a 'crtc', which 132 * we use as a pipe index 133 */ 134 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 135 { 136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 137 unsigned long high_frame; 138 unsigned long low_frame; 139 u32 high1, high2, low; 140 141 if (!i915_pipe_enabled(dev, pipe)) { 142 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 143 "pipe %c\n", pipe_name(pipe)); 144 return 0; 145 } 146 147 high_frame = PIPEFRAME(pipe); 148 low_frame = PIPEFRAMEPIXEL(pipe); 149 150 /* 151 * High & low register fields aren't synchronized, so make sure 152 * we get a low value that's stable across two reads of the high 153 * register. 154 */ 155 do { 156 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 157 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 158 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 159 } while (high1 != high2); 160 161 high1 >>= PIPE_FRAME_HIGH_SHIFT; 162 low >>= PIPE_FRAME_LOW_SHIFT; 163 return (high1 << 8) | low; 164 } 165 166 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 167 { 168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 169 int reg = PIPE_FRMCOUNT_GM45(pipe); 170 171 if (!i915_pipe_enabled(dev, pipe)) { 172 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 173 "pipe %c\n", pipe_name(pipe)); 174 return 0; 175 } 176 177 return I915_READ(reg); 178 } 179 180 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 181 int *vpos, int *hpos) 182 { 183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 184 u32 vbl = 0, position = 0; 185 int vbl_start, vbl_end, htotal, vtotal; 186 bool in_vbl = true; 187 int ret = 0; 188 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 189 pipe); 190 191 if (!i915_pipe_enabled(dev, pipe)) { 192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 193 "pipe %c\n", pipe_name(pipe)); 194 return 0; 195 } 196 197 /* Get vtotal. */ 198 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 199 200 if (INTEL_INFO(dev)->gen >= 4) { 201 /* No obvious pixelcount register. Only query vertical 202 * scanout position from Display scan line register. 203 */ 204 position = I915_READ(PIPEDSL(pipe)); 205 206 /* Decode into vertical scanout position. Don't have 207 * horizontal scanout position. 208 */ 209 *vpos = position & 0x1fff; 210 *hpos = 0; 211 } else { 212 /* Have access to pixelcount since start of frame. 213 * We can split this into vertical and horizontal 214 * scanout position. 215 */ 216 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 217 218 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 219 *vpos = position / htotal; 220 *hpos = position - (*vpos * htotal); 221 } 222 223 /* Query vblank area. */ 224 vbl = I915_READ(VBLANK(cpu_transcoder)); 225 226 /* Test position against vblank region. */ 227 vbl_start = vbl & 0x1fff; 228 vbl_end = (vbl >> 16) & 0x1fff; 229 230 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 231 in_vbl = false; 232 233 /* Inside "upper part" of vblank area? Apply corrective offset: */ 234 if (in_vbl && (*vpos >= vbl_start)) 235 *vpos = *vpos - vtotal; 236 237 /* Readouts valid? */ 238 if (vbl > 0) 239 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 240 241 /* In vblank? */ 242 if (in_vbl) 243 ret |= DRM_SCANOUTPOS_INVBL; 244 245 return ret; 246 } 247 248 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 249 int *max_error, 250 struct timeval *vblank_time, 251 unsigned flags) 252 { 253 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct drm_crtc *crtc; 255 256 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 257 DRM_ERROR("Invalid crtc %d\n", pipe); 258 return -EINVAL; 259 } 260 261 /* Get drm_crtc to timestamp: */ 262 crtc = intel_get_crtc_for_pipe(dev, pipe); 263 if (crtc == NULL) { 264 DRM_ERROR("Invalid crtc %d\n", pipe); 265 return -EINVAL; 266 } 267 268 if (!crtc->enabled) { 269 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 270 return -EBUSY; 271 } 272 273 /* Helper routine in DRM core does all the work: */ 274 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 275 vblank_time, flags, 276 crtc); 277 } 278 279 /* 280 * Handle hotplug events outside the interrupt handler proper. 281 */ 282 static void i915_hotplug_work_func(struct work_struct *work) 283 { 284 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 285 hotplug_work); 286 struct drm_device *dev = dev_priv->dev; 287 struct drm_mode_config *mode_config = &dev->mode_config; 288 struct intel_encoder *encoder; 289 290 mutex_lock(&mode_config->mutex); 291 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 292 293 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 294 if (encoder->hot_plug) 295 encoder->hot_plug(encoder); 296 297 mutex_unlock(&mode_config->mutex); 298 299 /* Just fire off a uevent and let userspace tell us what to do */ 300 drm_helper_hpd_irq_event(dev); 301 } 302 303 /* defined intel_pm.c */ 304 extern spinlock_t mchdev_lock; 305 306 static void ironlake_handle_rps_change(struct drm_device *dev) 307 { 308 drm_i915_private_t *dev_priv = dev->dev_private; 309 u32 busy_up, busy_down, max_avg, min_avg; 310 u8 new_delay; 311 unsigned long flags; 312 313 spin_lock_irqsave(&mchdev_lock, flags); 314 315 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 316 317 new_delay = dev_priv->ips.cur_delay; 318 319 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 320 busy_up = I915_READ(RCPREVBSYTUPAVG); 321 busy_down = I915_READ(RCPREVBSYTDNAVG); 322 max_avg = I915_READ(RCBMAXAVG); 323 min_avg = I915_READ(RCBMINAVG); 324 325 /* Handle RCS change request from hw */ 326 if (busy_up > max_avg) { 327 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 328 new_delay = dev_priv->ips.cur_delay - 1; 329 if (new_delay < dev_priv->ips.max_delay) 330 new_delay = dev_priv->ips.max_delay; 331 } else if (busy_down < min_avg) { 332 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 333 new_delay = dev_priv->ips.cur_delay + 1; 334 if (new_delay > dev_priv->ips.min_delay) 335 new_delay = dev_priv->ips.min_delay; 336 } 337 338 if (ironlake_set_drps(dev, new_delay)) 339 dev_priv->ips.cur_delay = new_delay; 340 341 spin_unlock_irqrestore(&mchdev_lock, flags); 342 343 return; 344 } 345 346 static void notify_ring(struct drm_device *dev, 347 struct intel_ring_buffer *ring) 348 { 349 struct drm_i915_private *dev_priv = dev->dev_private; 350 351 if (ring->obj == NULL) 352 return; 353 354 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 355 356 wake_up_all(&ring->irq_queue); 357 if (i915_enable_hangcheck) { 358 dev_priv->hangcheck_count = 0; 359 mod_timer(&dev_priv->hangcheck_timer, 360 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 361 } 362 } 363 364 static void gen6_pm_rps_work(struct work_struct *work) 365 { 366 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 367 rps.work); 368 u32 pm_iir, pm_imr; 369 u8 new_delay; 370 371 spin_lock_irq(&dev_priv->rps.lock); 372 pm_iir = dev_priv->rps.pm_iir; 373 dev_priv->rps.pm_iir = 0; 374 pm_imr = I915_READ(GEN6_PMIMR); 375 I915_WRITE(GEN6_PMIMR, 0); 376 spin_unlock_irq(&dev_priv->rps.lock); 377 378 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 379 return; 380 381 mutex_lock(&dev_priv->rps.hw_lock); 382 383 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 384 new_delay = dev_priv->rps.cur_delay + 1; 385 else 386 new_delay = dev_priv->rps.cur_delay - 1; 387 388 /* sysfs frequency interfaces may have snuck in while servicing the 389 * interrupt 390 */ 391 if (!(new_delay > dev_priv->rps.max_delay || 392 new_delay < dev_priv->rps.min_delay)) { 393 gen6_set_rps(dev_priv->dev, new_delay); 394 } 395 396 mutex_unlock(&dev_priv->rps.hw_lock); 397 } 398 399 400 /** 401 * ivybridge_parity_work - Workqueue called when a parity error interrupt 402 * occurred. 403 * @work: workqueue struct 404 * 405 * Doesn't actually do anything except notify userspace. As a consequence of 406 * this event, userspace should try to remap the bad rows since statistically 407 * it is likely the same row is more likely to go bad again. 408 */ 409 static void ivybridge_parity_work(struct work_struct *work) 410 { 411 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 412 l3_parity.error_work); 413 u32 error_status, row, bank, subbank; 414 char *parity_event[5]; 415 uint32_t misccpctl; 416 unsigned long flags; 417 418 /* We must turn off DOP level clock gating to access the L3 registers. 419 * In order to prevent a get/put style interface, acquire struct mutex 420 * any time we access those registers. 421 */ 422 mutex_lock(&dev_priv->dev->struct_mutex); 423 424 misccpctl = I915_READ(GEN7_MISCCPCTL); 425 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 426 POSTING_READ(GEN7_MISCCPCTL); 427 428 error_status = I915_READ(GEN7_L3CDERRST1); 429 row = GEN7_PARITY_ERROR_ROW(error_status); 430 bank = GEN7_PARITY_ERROR_BANK(error_status); 431 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 432 433 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 434 GEN7_L3CDERRST1_ENABLE); 435 POSTING_READ(GEN7_L3CDERRST1); 436 437 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 438 439 spin_lock_irqsave(&dev_priv->irq_lock, flags); 440 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 441 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 442 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 443 444 mutex_unlock(&dev_priv->dev->struct_mutex); 445 446 parity_event[0] = "L3_PARITY_ERROR=1"; 447 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 448 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 449 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 450 parity_event[4] = NULL; 451 452 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 453 KOBJ_CHANGE, parity_event); 454 455 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 456 row, bank, subbank); 457 458 kfree(parity_event[3]); 459 kfree(parity_event[2]); 460 kfree(parity_event[1]); 461 } 462 463 static void ivybridge_handle_parity_error(struct drm_device *dev) 464 { 465 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 466 unsigned long flags; 467 468 if (!HAS_L3_GPU_CACHE(dev)) 469 return; 470 471 spin_lock_irqsave(&dev_priv->irq_lock, flags); 472 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 473 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 474 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 475 476 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 477 } 478 479 static void snb_gt_irq_handler(struct drm_device *dev, 480 struct drm_i915_private *dev_priv, 481 u32 gt_iir) 482 { 483 484 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 485 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 486 notify_ring(dev, &dev_priv->ring[RCS]); 487 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 488 notify_ring(dev, &dev_priv->ring[VCS]); 489 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 490 notify_ring(dev, &dev_priv->ring[BCS]); 491 492 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 493 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 494 GT_RENDER_CS_ERROR_INTERRUPT)) { 495 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 496 i915_handle_error(dev, false); 497 } 498 499 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 500 ivybridge_handle_parity_error(dev); 501 } 502 503 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 504 u32 pm_iir) 505 { 506 unsigned long flags; 507 508 /* 509 * IIR bits should never already be set because IMR should 510 * prevent an interrupt from being shown in IIR. The warning 511 * displays a case where we've unsafely cleared 512 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 513 * type is not a problem, it displays a problem in the logic. 514 * 515 * The mask bit in IMR is cleared by dev_priv->rps.work. 516 */ 517 518 spin_lock_irqsave(&dev_priv->rps.lock, flags); 519 dev_priv->rps.pm_iir |= pm_iir; 520 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 521 POSTING_READ(GEN6_PMIMR); 522 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 523 524 queue_work(dev_priv->wq, &dev_priv->rps.work); 525 } 526 527 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 528 { 529 struct drm_device *dev = (struct drm_device *) arg; 530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 531 u32 iir, gt_iir, pm_iir; 532 irqreturn_t ret = IRQ_NONE; 533 unsigned long irqflags; 534 int pipe; 535 u32 pipe_stats[I915_MAX_PIPES]; 536 bool blc_event; 537 538 atomic_inc(&dev_priv->irq_received); 539 540 while (true) { 541 iir = I915_READ(VLV_IIR); 542 gt_iir = I915_READ(GTIIR); 543 pm_iir = I915_READ(GEN6_PMIIR); 544 545 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 546 goto out; 547 548 ret = IRQ_HANDLED; 549 550 snb_gt_irq_handler(dev, dev_priv, gt_iir); 551 552 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 553 for_each_pipe(pipe) { 554 int reg = PIPESTAT(pipe); 555 pipe_stats[pipe] = I915_READ(reg); 556 557 /* 558 * Clear the PIPE*STAT regs before the IIR 559 */ 560 if (pipe_stats[pipe] & 0x8000ffff) { 561 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 562 DRM_DEBUG_DRIVER("pipe %c underrun\n", 563 pipe_name(pipe)); 564 I915_WRITE(reg, pipe_stats[pipe]); 565 } 566 } 567 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 568 569 for_each_pipe(pipe) { 570 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 571 drm_handle_vblank(dev, pipe); 572 573 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 574 intel_prepare_page_flip(dev, pipe); 575 intel_finish_page_flip(dev, pipe); 576 } 577 } 578 579 /* Consume port. Then clear IIR or we'll miss events */ 580 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 581 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 582 583 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 584 hotplug_status); 585 if (hotplug_status & dev_priv->hotplug_supported_mask) 586 queue_work(dev_priv->wq, 587 &dev_priv->hotplug_work); 588 589 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 590 I915_READ(PORT_HOTPLUG_STAT); 591 } 592 593 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 594 blc_event = true; 595 596 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 597 gen6_queue_rps_work(dev_priv, pm_iir); 598 599 I915_WRITE(GTIIR, gt_iir); 600 I915_WRITE(GEN6_PMIIR, pm_iir); 601 I915_WRITE(VLV_IIR, iir); 602 } 603 604 out: 605 return ret; 606 } 607 608 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 609 { 610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 611 int pipe; 612 613 if (pch_iir & SDE_HOTPLUG_MASK) 614 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 615 616 if (pch_iir & SDE_AUDIO_POWER_MASK) 617 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 618 (pch_iir & SDE_AUDIO_POWER_MASK) >> 619 SDE_AUDIO_POWER_SHIFT); 620 621 if (pch_iir & SDE_GMBUS) 622 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 623 624 if (pch_iir & SDE_AUDIO_HDCP_MASK) 625 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 626 627 if (pch_iir & SDE_AUDIO_TRANS_MASK) 628 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 629 630 if (pch_iir & SDE_POISON) 631 DRM_ERROR("PCH poison interrupt\n"); 632 633 if (pch_iir & SDE_FDI_MASK) 634 for_each_pipe(pipe) 635 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 636 pipe_name(pipe), 637 I915_READ(FDI_RX_IIR(pipe))); 638 639 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 640 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 641 642 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 643 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 644 645 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 646 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 647 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 648 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 649 } 650 651 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 652 { 653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 654 int pipe; 655 656 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 657 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 658 659 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 660 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 661 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 662 SDE_AUDIO_POWER_SHIFT_CPT); 663 664 if (pch_iir & SDE_AUX_MASK_CPT) 665 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 666 667 if (pch_iir & SDE_GMBUS_CPT) 668 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 669 670 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 671 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 672 673 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 674 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 675 676 if (pch_iir & SDE_FDI_MASK_CPT) 677 for_each_pipe(pipe) 678 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 679 pipe_name(pipe), 680 I915_READ(FDI_RX_IIR(pipe))); 681 } 682 683 static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 684 { 685 struct drm_device *dev = (struct drm_device *) arg; 686 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 687 u32 de_iir, gt_iir, de_ier, pm_iir; 688 irqreturn_t ret = IRQ_NONE; 689 int i; 690 691 atomic_inc(&dev_priv->irq_received); 692 693 /* disable master interrupt before clearing iir */ 694 de_ier = I915_READ(DEIER); 695 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 696 697 gt_iir = I915_READ(GTIIR); 698 if (gt_iir) { 699 snb_gt_irq_handler(dev, dev_priv, gt_iir); 700 I915_WRITE(GTIIR, gt_iir); 701 ret = IRQ_HANDLED; 702 } 703 704 de_iir = I915_READ(DEIIR); 705 if (de_iir) { 706 if (de_iir & DE_GSE_IVB) 707 intel_opregion_gse_intr(dev); 708 709 for (i = 0; i < 3; i++) { 710 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 711 drm_handle_vblank(dev, i); 712 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 713 intel_prepare_page_flip(dev, i); 714 intel_finish_page_flip_plane(dev, i); 715 } 716 } 717 718 /* check event from PCH */ 719 if (de_iir & DE_PCH_EVENT_IVB) { 720 u32 pch_iir = I915_READ(SDEIIR); 721 722 cpt_irq_handler(dev, pch_iir); 723 724 /* clear PCH hotplug event before clear CPU irq */ 725 I915_WRITE(SDEIIR, pch_iir); 726 } 727 728 I915_WRITE(DEIIR, de_iir); 729 ret = IRQ_HANDLED; 730 } 731 732 pm_iir = I915_READ(GEN6_PMIIR); 733 if (pm_iir) { 734 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 735 gen6_queue_rps_work(dev_priv, pm_iir); 736 I915_WRITE(GEN6_PMIIR, pm_iir); 737 ret = IRQ_HANDLED; 738 } 739 740 I915_WRITE(DEIER, de_ier); 741 POSTING_READ(DEIER); 742 743 return ret; 744 } 745 746 static void ilk_gt_irq_handler(struct drm_device *dev, 747 struct drm_i915_private *dev_priv, 748 u32 gt_iir) 749 { 750 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 751 notify_ring(dev, &dev_priv->ring[RCS]); 752 if (gt_iir & GT_BSD_USER_INTERRUPT) 753 notify_ring(dev, &dev_priv->ring[VCS]); 754 } 755 756 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 757 { 758 struct drm_device *dev = (struct drm_device *) arg; 759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 760 int ret = IRQ_NONE; 761 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 762 763 atomic_inc(&dev_priv->irq_received); 764 765 /* disable master interrupt before clearing iir */ 766 de_ier = I915_READ(DEIER); 767 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 768 POSTING_READ(DEIER); 769 770 de_iir = I915_READ(DEIIR); 771 gt_iir = I915_READ(GTIIR); 772 pch_iir = I915_READ(SDEIIR); 773 pm_iir = I915_READ(GEN6_PMIIR); 774 775 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 776 (!IS_GEN6(dev) || pm_iir == 0)) 777 goto done; 778 779 ret = IRQ_HANDLED; 780 781 if (IS_GEN5(dev)) 782 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 783 else 784 snb_gt_irq_handler(dev, dev_priv, gt_iir); 785 786 if (de_iir & DE_GSE) 787 intel_opregion_gse_intr(dev); 788 789 if (de_iir & DE_PIPEA_VBLANK) 790 drm_handle_vblank(dev, 0); 791 792 if (de_iir & DE_PIPEB_VBLANK) 793 drm_handle_vblank(dev, 1); 794 795 if (de_iir & DE_PLANEA_FLIP_DONE) { 796 intel_prepare_page_flip(dev, 0); 797 intel_finish_page_flip_plane(dev, 0); 798 } 799 800 if (de_iir & DE_PLANEB_FLIP_DONE) { 801 intel_prepare_page_flip(dev, 1); 802 intel_finish_page_flip_plane(dev, 1); 803 } 804 805 /* check event from PCH */ 806 if (de_iir & DE_PCH_EVENT) { 807 if (HAS_PCH_CPT(dev)) 808 cpt_irq_handler(dev, pch_iir); 809 else 810 ibx_irq_handler(dev, pch_iir); 811 } 812 813 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 814 ironlake_handle_rps_change(dev); 815 816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 817 gen6_queue_rps_work(dev_priv, pm_iir); 818 819 /* should clear PCH hotplug event before clear CPU irq */ 820 I915_WRITE(SDEIIR, pch_iir); 821 I915_WRITE(GTIIR, gt_iir); 822 I915_WRITE(DEIIR, de_iir); 823 I915_WRITE(GEN6_PMIIR, pm_iir); 824 825 done: 826 I915_WRITE(DEIER, de_ier); 827 POSTING_READ(DEIER); 828 829 return ret; 830 } 831 832 /** 833 * i915_error_work_func - do process context error handling work 834 * @work: work struct 835 * 836 * Fire an error uevent so userspace can see that a hang or error 837 * was detected. 838 */ 839 static void i915_error_work_func(struct work_struct *work) 840 { 841 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 842 error_work); 843 struct drm_device *dev = dev_priv->dev; 844 char *error_event[] = { "ERROR=1", NULL }; 845 char *reset_event[] = { "RESET=1", NULL }; 846 char *reset_done_event[] = { "ERROR=0", NULL }; 847 848 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 849 850 if (atomic_read(&dev_priv->mm.wedged)) { 851 DRM_DEBUG_DRIVER("resetting chip\n"); 852 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 853 if (!i915_reset(dev)) { 854 atomic_set(&dev_priv->mm.wedged, 0); 855 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 856 } 857 complete_all(&dev_priv->error_completion); 858 } 859 } 860 861 /* NB: please notice the memset */ 862 static void i915_get_extra_instdone(struct drm_device *dev, 863 uint32_t *instdone) 864 { 865 struct drm_i915_private *dev_priv = dev->dev_private; 866 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 867 868 switch(INTEL_INFO(dev)->gen) { 869 case 2: 870 case 3: 871 instdone[0] = I915_READ(INSTDONE); 872 break; 873 case 4: 874 case 5: 875 case 6: 876 instdone[0] = I915_READ(INSTDONE_I965); 877 instdone[1] = I915_READ(INSTDONE1); 878 break; 879 default: 880 WARN_ONCE(1, "Unsupported platform\n"); 881 case 7: 882 instdone[0] = I915_READ(GEN7_INSTDONE_1); 883 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 884 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 885 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 886 break; 887 } 888 } 889 890 #ifdef CONFIG_DEBUG_FS 891 static struct drm_i915_error_object * 892 i915_error_object_create(struct drm_i915_private *dev_priv, 893 struct drm_i915_gem_object *src) 894 { 895 struct drm_i915_error_object *dst; 896 int i, count; 897 u32 reloc_offset; 898 899 if (src == NULL || src->pages == NULL) 900 return NULL; 901 902 count = src->base.size / PAGE_SIZE; 903 904 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 905 if (dst == NULL) 906 return NULL; 907 908 reloc_offset = src->gtt_offset; 909 for (i = 0; i < count; i++) { 910 unsigned long flags; 911 void *d; 912 913 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 914 if (d == NULL) 915 goto unwind; 916 917 local_irq_save(flags); 918 if (reloc_offset < dev_priv->mm.gtt_mappable_end && 919 src->has_global_gtt_mapping) { 920 void __iomem *s; 921 922 /* Simply ignore tiling or any overlapping fence. 923 * It's part of the error state, and this hopefully 924 * captures what the GPU read. 925 */ 926 927 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 928 reloc_offset); 929 memcpy_fromio(d, s, PAGE_SIZE); 930 io_mapping_unmap_atomic(s); 931 } else { 932 struct page *page; 933 void *s; 934 935 page = i915_gem_object_get_page(src, i); 936 937 drm_clflush_pages(&page, 1); 938 939 s = kmap_atomic(page); 940 memcpy(d, s, PAGE_SIZE); 941 kunmap_atomic(s); 942 943 drm_clflush_pages(&page, 1); 944 } 945 local_irq_restore(flags); 946 947 dst->pages[i] = d; 948 949 reloc_offset += PAGE_SIZE; 950 } 951 dst->page_count = count; 952 dst->gtt_offset = src->gtt_offset; 953 954 return dst; 955 956 unwind: 957 while (i--) 958 kfree(dst->pages[i]); 959 kfree(dst); 960 return NULL; 961 } 962 963 static void 964 i915_error_object_free(struct drm_i915_error_object *obj) 965 { 966 int page; 967 968 if (obj == NULL) 969 return; 970 971 for (page = 0; page < obj->page_count; page++) 972 kfree(obj->pages[page]); 973 974 kfree(obj); 975 } 976 977 void 978 i915_error_state_free(struct kref *error_ref) 979 { 980 struct drm_i915_error_state *error = container_of(error_ref, 981 typeof(*error), ref); 982 int i; 983 984 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 985 i915_error_object_free(error->ring[i].batchbuffer); 986 i915_error_object_free(error->ring[i].ringbuffer); 987 kfree(error->ring[i].requests); 988 } 989 990 kfree(error->active_bo); 991 kfree(error->overlay); 992 kfree(error); 993 } 994 static void capture_bo(struct drm_i915_error_buffer *err, 995 struct drm_i915_gem_object *obj) 996 { 997 err->size = obj->base.size; 998 err->name = obj->base.name; 999 err->rseqno = obj->last_read_seqno; 1000 err->wseqno = obj->last_write_seqno; 1001 err->gtt_offset = obj->gtt_offset; 1002 err->read_domains = obj->base.read_domains; 1003 err->write_domain = obj->base.write_domain; 1004 err->fence_reg = obj->fence_reg; 1005 err->pinned = 0; 1006 if (obj->pin_count > 0) 1007 err->pinned = 1; 1008 if (obj->user_pin_count > 0) 1009 err->pinned = -1; 1010 err->tiling = obj->tiling_mode; 1011 err->dirty = obj->dirty; 1012 err->purgeable = obj->madv != I915_MADV_WILLNEED; 1013 err->ring = obj->ring ? obj->ring->id : -1; 1014 err->cache_level = obj->cache_level; 1015 } 1016 1017 static u32 capture_active_bo(struct drm_i915_error_buffer *err, 1018 int count, struct list_head *head) 1019 { 1020 struct drm_i915_gem_object *obj; 1021 int i = 0; 1022 1023 list_for_each_entry(obj, head, mm_list) { 1024 capture_bo(err++, obj); 1025 if (++i == count) 1026 break; 1027 } 1028 1029 return i; 1030 } 1031 1032 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 1033 int count, struct list_head *head) 1034 { 1035 struct drm_i915_gem_object *obj; 1036 int i = 0; 1037 1038 list_for_each_entry(obj, head, gtt_list) { 1039 if (obj->pin_count == 0) 1040 continue; 1041 1042 capture_bo(err++, obj); 1043 if (++i == count) 1044 break; 1045 } 1046 1047 return i; 1048 } 1049 1050 static void i915_gem_record_fences(struct drm_device *dev, 1051 struct drm_i915_error_state *error) 1052 { 1053 struct drm_i915_private *dev_priv = dev->dev_private; 1054 int i; 1055 1056 /* Fences */ 1057 switch (INTEL_INFO(dev)->gen) { 1058 case 7: 1059 case 6: 1060 for (i = 0; i < 16; i++) 1061 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1062 break; 1063 case 5: 1064 case 4: 1065 for (i = 0; i < 16; i++) 1066 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1067 break; 1068 case 3: 1069 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1070 for (i = 0; i < 8; i++) 1071 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1072 case 2: 1073 for (i = 0; i < 8; i++) 1074 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1075 break; 1076 1077 } 1078 } 1079 1080 static struct drm_i915_error_object * 1081 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1082 struct intel_ring_buffer *ring) 1083 { 1084 struct drm_i915_gem_object *obj; 1085 u32 seqno; 1086 1087 if (!ring->get_seqno) 1088 return NULL; 1089 1090 seqno = ring->get_seqno(ring, false); 1091 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1092 if (obj->ring != ring) 1093 continue; 1094 1095 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1096 continue; 1097 1098 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1099 continue; 1100 1101 /* We need to copy these to an anonymous buffer as the simplest 1102 * method to avoid being overwritten by userspace. 1103 */ 1104 return i915_error_object_create(dev_priv, obj); 1105 } 1106 1107 return NULL; 1108 } 1109 1110 static void i915_record_ring_state(struct drm_device *dev, 1111 struct drm_i915_error_state *error, 1112 struct intel_ring_buffer *ring) 1113 { 1114 struct drm_i915_private *dev_priv = dev->dev_private; 1115 1116 if (INTEL_INFO(dev)->gen >= 6) { 1117 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 1118 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1119 error->semaphore_mboxes[ring->id][0] 1120 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1121 error->semaphore_mboxes[ring->id][1] 1122 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1123 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1124 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 1125 } 1126 1127 if (INTEL_INFO(dev)->gen >= 4) { 1128 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1129 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1130 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1131 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1132 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1133 if (ring->id == RCS) 1134 error->bbaddr = I915_READ64(BB_ADDR); 1135 } else { 1136 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1137 error->ipeir[ring->id] = I915_READ(IPEIR); 1138 error->ipehr[ring->id] = I915_READ(IPEHR); 1139 error->instdone[ring->id] = I915_READ(INSTDONE); 1140 } 1141 1142 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1143 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1144 error->seqno[ring->id] = ring->get_seqno(ring, false); 1145 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1146 error->head[ring->id] = I915_READ_HEAD(ring); 1147 error->tail[ring->id] = I915_READ_TAIL(ring); 1148 1149 error->cpu_ring_head[ring->id] = ring->head; 1150 error->cpu_ring_tail[ring->id] = ring->tail; 1151 } 1152 1153 static void i915_gem_record_rings(struct drm_device *dev, 1154 struct drm_i915_error_state *error) 1155 { 1156 struct drm_i915_private *dev_priv = dev->dev_private; 1157 struct intel_ring_buffer *ring; 1158 struct drm_i915_gem_request *request; 1159 int i, count; 1160 1161 for_each_ring(ring, dev_priv, i) { 1162 i915_record_ring_state(dev, error, ring); 1163 1164 error->ring[i].batchbuffer = 1165 i915_error_first_batchbuffer(dev_priv, ring); 1166 1167 error->ring[i].ringbuffer = 1168 i915_error_object_create(dev_priv, ring->obj); 1169 1170 count = 0; 1171 list_for_each_entry(request, &ring->request_list, list) 1172 count++; 1173 1174 error->ring[i].num_requests = count; 1175 error->ring[i].requests = 1176 kmalloc(count*sizeof(struct drm_i915_error_request), 1177 GFP_ATOMIC); 1178 if (error->ring[i].requests == NULL) { 1179 error->ring[i].num_requests = 0; 1180 continue; 1181 } 1182 1183 count = 0; 1184 list_for_each_entry(request, &ring->request_list, list) { 1185 struct drm_i915_error_request *erq; 1186 1187 erq = &error->ring[i].requests[count++]; 1188 erq->seqno = request->seqno; 1189 erq->jiffies = request->emitted_jiffies; 1190 erq->tail = request->tail; 1191 } 1192 } 1193 } 1194 1195 /** 1196 * i915_capture_error_state - capture an error record for later analysis 1197 * @dev: drm device 1198 * 1199 * Should be called when an error is detected (either a hang or an error 1200 * interrupt) to capture error state from the time of the error. Fills 1201 * out a structure which becomes available in debugfs for user level tools 1202 * to pick up. 1203 */ 1204 static void i915_capture_error_state(struct drm_device *dev) 1205 { 1206 struct drm_i915_private *dev_priv = dev->dev_private; 1207 struct drm_i915_gem_object *obj; 1208 struct drm_i915_error_state *error; 1209 unsigned long flags; 1210 int i, pipe; 1211 1212 spin_lock_irqsave(&dev_priv->error_lock, flags); 1213 error = dev_priv->first_error; 1214 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1215 if (error) 1216 return; 1217 1218 /* Account for pipe specific data like PIPE*STAT */ 1219 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1220 if (!error) { 1221 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1222 return; 1223 } 1224 1225 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1226 dev->primary->index); 1227 1228 kref_init(&error->ref); 1229 error->eir = I915_READ(EIR); 1230 error->pgtbl_er = I915_READ(PGTBL_ER); 1231 error->ccid = I915_READ(CCID); 1232 1233 if (HAS_PCH_SPLIT(dev)) 1234 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1235 else if (IS_VALLEYVIEW(dev)) 1236 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1237 else if (IS_GEN2(dev)) 1238 error->ier = I915_READ16(IER); 1239 else 1240 error->ier = I915_READ(IER); 1241 1242 for_each_pipe(pipe) 1243 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1244 1245 if (INTEL_INFO(dev)->gen >= 6) { 1246 error->error = I915_READ(ERROR_GEN6); 1247 error->done_reg = I915_READ(DONE_REG); 1248 } 1249 1250 if (INTEL_INFO(dev)->gen == 7) 1251 error->err_int = I915_READ(GEN7_ERR_INT); 1252 1253 i915_get_extra_instdone(dev, error->extra_instdone); 1254 1255 i915_gem_record_fences(dev, error); 1256 i915_gem_record_rings(dev, error); 1257 1258 /* Record buffers on the active and pinned lists. */ 1259 error->active_bo = NULL; 1260 error->pinned_bo = NULL; 1261 1262 i = 0; 1263 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1264 i++; 1265 error->active_bo_count = i; 1266 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 1267 if (obj->pin_count) 1268 i++; 1269 error->pinned_bo_count = i - error->active_bo_count; 1270 1271 error->active_bo = NULL; 1272 error->pinned_bo = NULL; 1273 if (i) { 1274 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 1275 GFP_ATOMIC); 1276 if (error->active_bo) 1277 error->pinned_bo = 1278 error->active_bo + error->active_bo_count; 1279 } 1280 1281 if (error->active_bo) 1282 error->active_bo_count = 1283 capture_active_bo(error->active_bo, 1284 error->active_bo_count, 1285 &dev_priv->mm.active_list); 1286 1287 if (error->pinned_bo) 1288 error->pinned_bo_count = 1289 capture_pinned_bo(error->pinned_bo, 1290 error->pinned_bo_count, 1291 &dev_priv->mm.bound_list); 1292 1293 do_gettimeofday(&error->time); 1294 1295 error->overlay = intel_overlay_capture_error_state(dev); 1296 error->display = intel_display_capture_error_state(dev); 1297 1298 spin_lock_irqsave(&dev_priv->error_lock, flags); 1299 if (dev_priv->first_error == NULL) { 1300 dev_priv->first_error = error; 1301 error = NULL; 1302 } 1303 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1304 1305 if (error) 1306 i915_error_state_free(&error->ref); 1307 } 1308 1309 void i915_destroy_error_state(struct drm_device *dev) 1310 { 1311 struct drm_i915_private *dev_priv = dev->dev_private; 1312 struct drm_i915_error_state *error; 1313 unsigned long flags; 1314 1315 spin_lock_irqsave(&dev_priv->error_lock, flags); 1316 error = dev_priv->first_error; 1317 dev_priv->first_error = NULL; 1318 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1319 1320 if (error) 1321 kref_put(&error->ref, i915_error_state_free); 1322 } 1323 #else 1324 #define i915_capture_error_state(x) 1325 #endif 1326 1327 static void i915_report_and_clear_eir(struct drm_device *dev) 1328 { 1329 struct drm_i915_private *dev_priv = dev->dev_private; 1330 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1331 u32 eir = I915_READ(EIR); 1332 int pipe, i; 1333 1334 if (!eir) 1335 return; 1336 1337 pr_err("render error detected, EIR: 0x%08x\n", eir); 1338 1339 i915_get_extra_instdone(dev, instdone); 1340 1341 if (IS_G4X(dev)) { 1342 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1343 u32 ipeir = I915_READ(IPEIR_I965); 1344 1345 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1346 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1347 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1348 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1349 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1350 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1351 I915_WRITE(IPEIR_I965, ipeir); 1352 POSTING_READ(IPEIR_I965); 1353 } 1354 if (eir & GM45_ERROR_PAGE_TABLE) { 1355 u32 pgtbl_err = I915_READ(PGTBL_ER); 1356 pr_err("page table error\n"); 1357 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1358 I915_WRITE(PGTBL_ER, pgtbl_err); 1359 POSTING_READ(PGTBL_ER); 1360 } 1361 } 1362 1363 if (!IS_GEN2(dev)) { 1364 if (eir & I915_ERROR_PAGE_TABLE) { 1365 u32 pgtbl_err = I915_READ(PGTBL_ER); 1366 pr_err("page table error\n"); 1367 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1368 I915_WRITE(PGTBL_ER, pgtbl_err); 1369 POSTING_READ(PGTBL_ER); 1370 } 1371 } 1372 1373 if (eir & I915_ERROR_MEMORY_REFRESH) { 1374 pr_err("memory refresh error:\n"); 1375 for_each_pipe(pipe) 1376 pr_err("pipe %c stat: 0x%08x\n", 1377 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1378 /* pipestat has already been acked */ 1379 } 1380 if (eir & I915_ERROR_INSTRUCTION) { 1381 pr_err("instruction error\n"); 1382 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1383 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1384 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1385 if (INTEL_INFO(dev)->gen < 4) { 1386 u32 ipeir = I915_READ(IPEIR); 1387 1388 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1389 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1390 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1391 I915_WRITE(IPEIR, ipeir); 1392 POSTING_READ(IPEIR); 1393 } else { 1394 u32 ipeir = I915_READ(IPEIR_I965); 1395 1396 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1397 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1398 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1399 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1400 I915_WRITE(IPEIR_I965, ipeir); 1401 POSTING_READ(IPEIR_I965); 1402 } 1403 } 1404 1405 I915_WRITE(EIR, eir); 1406 POSTING_READ(EIR); 1407 eir = I915_READ(EIR); 1408 if (eir) { 1409 /* 1410 * some errors might have become stuck, 1411 * mask them. 1412 */ 1413 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1414 I915_WRITE(EMR, I915_READ(EMR) | eir); 1415 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1416 } 1417 } 1418 1419 /** 1420 * i915_handle_error - handle an error interrupt 1421 * @dev: drm device 1422 * 1423 * Do some basic checking of regsiter state at error interrupt time and 1424 * dump it to the syslog. Also call i915_capture_error_state() to make 1425 * sure we get a record and make it available in debugfs. Fire a uevent 1426 * so userspace knows something bad happened (should trigger collection 1427 * of a ring dump etc.). 1428 */ 1429 void i915_handle_error(struct drm_device *dev, bool wedged) 1430 { 1431 struct drm_i915_private *dev_priv = dev->dev_private; 1432 struct intel_ring_buffer *ring; 1433 int i; 1434 1435 i915_capture_error_state(dev); 1436 i915_report_and_clear_eir(dev); 1437 1438 if (wedged) { 1439 INIT_COMPLETION(dev_priv->error_completion); 1440 atomic_set(&dev_priv->mm.wedged, 1); 1441 1442 /* 1443 * Wakeup waiting processes so they don't hang 1444 */ 1445 for_each_ring(ring, dev_priv, i) 1446 wake_up_all(&ring->irq_queue); 1447 } 1448 1449 queue_work(dev_priv->wq, &dev_priv->error_work); 1450 } 1451 1452 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1453 { 1454 drm_i915_private_t *dev_priv = dev->dev_private; 1455 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1456 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1457 struct drm_i915_gem_object *obj; 1458 struct intel_unpin_work *work; 1459 unsigned long flags; 1460 bool stall_detected; 1461 1462 /* Ignore early vblank irqs */ 1463 if (intel_crtc == NULL) 1464 return; 1465 1466 spin_lock_irqsave(&dev->event_lock, flags); 1467 work = intel_crtc->unpin_work; 1468 1469 if (work == NULL || 1470 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1471 !work->enable_stall_check) { 1472 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1473 spin_unlock_irqrestore(&dev->event_lock, flags); 1474 return; 1475 } 1476 1477 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1478 obj = work->pending_flip_obj; 1479 if (INTEL_INFO(dev)->gen >= 4) { 1480 int dspsurf = DSPSURF(intel_crtc->plane); 1481 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1482 obj->gtt_offset; 1483 } else { 1484 int dspaddr = DSPADDR(intel_crtc->plane); 1485 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1486 crtc->y * crtc->fb->pitches[0] + 1487 crtc->x * crtc->fb->bits_per_pixel/8); 1488 } 1489 1490 spin_unlock_irqrestore(&dev->event_lock, flags); 1491 1492 if (stall_detected) { 1493 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1494 intel_prepare_page_flip(dev, intel_crtc->plane); 1495 } 1496 } 1497 1498 /* Called from drm generic code, passed 'crtc' which 1499 * we use as a pipe index 1500 */ 1501 static int i915_enable_vblank(struct drm_device *dev, int pipe) 1502 { 1503 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1504 unsigned long irqflags; 1505 1506 if (!i915_pipe_enabled(dev, pipe)) 1507 return -EINVAL; 1508 1509 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1510 if (INTEL_INFO(dev)->gen >= 4) 1511 i915_enable_pipestat(dev_priv, pipe, 1512 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1513 else 1514 i915_enable_pipestat(dev_priv, pipe, 1515 PIPE_VBLANK_INTERRUPT_ENABLE); 1516 1517 /* maintain vblank delivery even in deep C-states */ 1518 if (dev_priv->info->gen == 3) 1519 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1520 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1521 1522 return 0; 1523 } 1524 1525 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1526 { 1527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1528 unsigned long irqflags; 1529 1530 if (!i915_pipe_enabled(dev, pipe)) 1531 return -EINVAL; 1532 1533 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1534 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1535 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1536 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1537 1538 return 0; 1539 } 1540 1541 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1542 { 1543 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1544 unsigned long irqflags; 1545 1546 if (!i915_pipe_enabled(dev, pipe)) 1547 return -EINVAL; 1548 1549 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1550 ironlake_enable_display_irq(dev_priv, 1551 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 1552 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1553 1554 return 0; 1555 } 1556 1557 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1558 { 1559 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1560 unsigned long irqflags; 1561 u32 imr; 1562 1563 if (!i915_pipe_enabled(dev, pipe)) 1564 return -EINVAL; 1565 1566 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1567 imr = I915_READ(VLV_IMR); 1568 if (pipe == 0) 1569 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1570 else 1571 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1572 I915_WRITE(VLV_IMR, imr); 1573 i915_enable_pipestat(dev_priv, pipe, 1574 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1575 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1576 1577 return 0; 1578 } 1579 1580 /* Called from drm generic code, passed 'crtc' which 1581 * we use as a pipe index 1582 */ 1583 static void i915_disable_vblank(struct drm_device *dev, int pipe) 1584 { 1585 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1586 unsigned long irqflags; 1587 1588 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1589 if (dev_priv->info->gen == 3) 1590 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1591 1592 i915_disable_pipestat(dev_priv, pipe, 1593 PIPE_VBLANK_INTERRUPT_ENABLE | 1594 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1595 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1596 } 1597 1598 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1599 { 1600 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1601 unsigned long irqflags; 1602 1603 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1604 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1605 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1606 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1607 } 1608 1609 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1610 { 1611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1612 unsigned long irqflags; 1613 1614 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1615 ironlake_disable_display_irq(dev_priv, 1616 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 1617 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1618 } 1619 1620 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1621 { 1622 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1623 unsigned long irqflags; 1624 u32 imr; 1625 1626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1627 i915_disable_pipestat(dev_priv, pipe, 1628 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1629 imr = I915_READ(VLV_IMR); 1630 if (pipe == 0) 1631 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1632 else 1633 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1634 I915_WRITE(VLV_IMR, imr); 1635 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1636 } 1637 1638 static u32 1639 ring_last_seqno(struct intel_ring_buffer *ring) 1640 { 1641 return list_entry(ring->request_list.prev, 1642 struct drm_i915_gem_request, list)->seqno; 1643 } 1644 1645 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1646 { 1647 if (list_empty(&ring->request_list) || 1648 i915_seqno_passed(ring->get_seqno(ring, false), 1649 ring_last_seqno(ring))) { 1650 /* Issue a wake-up to catch stuck h/w. */ 1651 if (waitqueue_active(&ring->irq_queue)) { 1652 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1653 ring->name); 1654 wake_up_all(&ring->irq_queue); 1655 *err = true; 1656 } 1657 return true; 1658 } 1659 return false; 1660 } 1661 1662 static bool kick_ring(struct intel_ring_buffer *ring) 1663 { 1664 struct drm_device *dev = ring->dev; 1665 struct drm_i915_private *dev_priv = dev->dev_private; 1666 u32 tmp = I915_READ_CTL(ring); 1667 if (tmp & RING_WAIT) { 1668 DRM_ERROR("Kicking stuck wait on %s\n", 1669 ring->name); 1670 I915_WRITE_CTL(ring, tmp); 1671 return true; 1672 } 1673 return false; 1674 } 1675 1676 static bool i915_hangcheck_hung(struct drm_device *dev) 1677 { 1678 drm_i915_private_t *dev_priv = dev->dev_private; 1679 1680 if (dev_priv->hangcheck_count++ > 1) { 1681 bool hung = true; 1682 1683 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1684 i915_handle_error(dev, true); 1685 1686 if (!IS_GEN2(dev)) { 1687 struct intel_ring_buffer *ring; 1688 int i; 1689 1690 /* Is the chip hanging on a WAIT_FOR_EVENT? 1691 * If so we can simply poke the RB_WAIT bit 1692 * and break the hang. This should work on 1693 * all but the second generation chipsets. 1694 */ 1695 for_each_ring(ring, dev_priv, i) 1696 hung &= !kick_ring(ring); 1697 } 1698 1699 return hung; 1700 } 1701 1702 return false; 1703 } 1704 1705 /** 1706 * This is called when the chip hasn't reported back with completed 1707 * batchbuffers in a long time. The first time this is called we simply record 1708 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1709 * again, we assume the chip is wedged and try to fix it. 1710 */ 1711 void i915_hangcheck_elapsed(unsigned long data) 1712 { 1713 struct drm_device *dev = (struct drm_device *)data; 1714 drm_i915_private_t *dev_priv = dev->dev_private; 1715 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; 1716 struct intel_ring_buffer *ring; 1717 bool err = false, idle; 1718 int i; 1719 1720 if (!i915_enable_hangcheck) 1721 return; 1722 1723 memset(acthd, 0, sizeof(acthd)); 1724 idle = true; 1725 for_each_ring(ring, dev_priv, i) { 1726 idle &= i915_hangcheck_ring_idle(ring, &err); 1727 acthd[i] = intel_ring_get_active_head(ring); 1728 } 1729 1730 /* If all work is done then ACTHD clearly hasn't advanced. */ 1731 if (idle) { 1732 if (err) { 1733 if (i915_hangcheck_hung(dev)) 1734 return; 1735 1736 goto repeat; 1737 } 1738 1739 dev_priv->hangcheck_count = 0; 1740 return; 1741 } 1742 1743 i915_get_extra_instdone(dev, instdone); 1744 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1745 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { 1746 if (i915_hangcheck_hung(dev)) 1747 return; 1748 } else { 1749 dev_priv->hangcheck_count = 0; 1750 1751 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1752 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); 1753 } 1754 1755 repeat: 1756 /* Reset timer case chip hangs without another request being added */ 1757 mod_timer(&dev_priv->hangcheck_timer, 1758 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1759 } 1760 1761 /* drm_dma.h hooks 1762 */ 1763 static void ironlake_irq_preinstall(struct drm_device *dev) 1764 { 1765 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1766 1767 atomic_set(&dev_priv->irq_received, 0); 1768 1769 I915_WRITE(HWSTAM, 0xeffe); 1770 1771 /* XXX hotplug from PCH */ 1772 1773 I915_WRITE(DEIMR, 0xffffffff); 1774 I915_WRITE(DEIER, 0x0); 1775 POSTING_READ(DEIER); 1776 1777 /* and GT */ 1778 I915_WRITE(GTIMR, 0xffffffff); 1779 I915_WRITE(GTIER, 0x0); 1780 POSTING_READ(GTIER); 1781 1782 /* south display irq */ 1783 I915_WRITE(SDEIMR, 0xffffffff); 1784 I915_WRITE(SDEIER, 0x0); 1785 POSTING_READ(SDEIER); 1786 } 1787 1788 static void valleyview_irq_preinstall(struct drm_device *dev) 1789 { 1790 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1791 int pipe; 1792 1793 atomic_set(&dev_priv->irq_received, 0); 1794 1795 /* VLV magic */ 1796 I915_WRITE(VLV_IMR, 0); 1797 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 1798 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 1799 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 1800 1801 /* and GT */ 1802 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1803 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1804 I915_WRITE(GTIMR, 0xffffffff); 1805 I915_WRITE(GTIER, 0x0); 1806 POSTING_READ(GTIER); 1807 1808 I915_WRITE(DPINVGTT, 0xff); 1809 1810 I915_WRITE(PORT_HOTPLUG_EN, 0); 1811 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1812 for_each_pipe(pipe) 1813 I915_WRITE(PIPESTAT(pipe), 0xffff); 1814 I915_WRITE(VLV_IIR, 0xffffffff); 1815 I915_WRITE(VLV_IMR, 0xffffffff); 1816 I915_WRITE(VLV_IER, 0x0); 1817 POSTING_READ(VLV_IER); 1818 } 1819 1820 /* 1821 * Enable digital hotplug on the PCH, and configure the DP short pulse 1822 * duration to 2ms (which is the minimum in the Display Port spec) 1823 * 1824 * This register is the same on all known PCH chips. 1825 */ 1826 1827 static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1828 { 1829 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1830 u32 hotplug; 1831 1832 hotplug = I915_READ(PCH_PORT_HOTPLUG); 1833 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1834 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1835 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1836 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1837 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1838 } 1839 1840 static int ironlake_irq_postinstall(struct drm_device *dev) 1841 { 1842 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1843 /* enable kind of interrupts always enabled */ 1844 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1845 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1846 u32 render_irqs; 1847 u32 hotplug_mask; 1848 1849 dev_priv->irq_mask = ~display_mask; 1850 1851 /* should always can generate irq */ 1852 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1853 I915_WRITE(DEIMR, dev_priv->irq_mask); 1854 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1855 POSTING_READ(DEIER); 1856 1857 dev_priv->gt_irq_mask = ~0; 1858 1859 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1860 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1861 1862 if (IS_GEN6(dev)) 1863 render_irqs = 1864 GT_USER_INTERRUPT | 1865 GEN6_BSD_USER_INTERRUPT | 1866 GEN6_BLITTER_USER_INTERRUPT; 1867 else 1868 render_irqs = 1869 GT_USER_INTERRUPT | 1870 GT_PIPE_NOTIFY | 1871 GT_BSD_USER_INTERRUPT; 1872 I915_WRITE(GTIER, render_irqs); 1873 POSTING_READ(GTIER); 1874 1875 if (HAS_PCH_CPT(dev)) { 1876 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1877 SDE_PORTB_HOTPLUG_CPT | 1878 SDE_PORTC_HOTPLUG_CPT | 1879 SDE_PORTD_HOTPLUG_CPT); 1880 } else { 1881 hotplug_mask = (SDE_CRT_HOTPLUG | 1882 SDE_PORTB_HOTPLUG | 1883 SDE_PORTC_HOTPLUG | 1884 SDE_PORTD_HOTPLUG | 1885 SDE_AUX_MASK); 1886 } 1887 1888 dev_priv->pch_irq_mask = ~hotplug_mask; 1889 1890 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1891 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1892 I915_WRITE(SDEIER, hotplug_mask); 1893 POSTING_READ(SDEIER); 1894 1895 ironlake_enable_pch_hotplug(dev); 1896 1897 if (IS_IRONLAKE_M(dev)) { 1898 /* Clear & enable PCU event interrupts */ 1899 I915_WRITE(DEIIR, DE_PCU_EVENT); 1900 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1901 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1902 } 1903 1904 return 0; 1905 } 1906 1907 static int ivybridge_irq_postinstall(struct drm_device *dev) 1908 { 1909 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1910 /* enable kind of interrupts always enabled */ 1911 u32 display_mask = 1912 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 1913 DE_PLANEC_FLIP_DONE_IVB | 1914 DE_PLANEB_FLIP_DONE_IVB | 1915 DE_PLANEA_FLIP_DONE_IVB; 1916 u32 render_irqs; 1917 u32 hotplug_mask; 1918 1919 dev_priv->irq_mask = ~display_mask; 1920 1921 /* should always can generate irq */ 1922 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1923 I915_WRITE(DEIMR, dev_priv->irq_mask); 1924 I915_WRITE(DEIER, 1925 display_mask | 1926 DE_PIPEC_VBLANK_IVB | 1927 DE_PIPEB_VBLANK_IVB | 1928 DE_PIPEA_VBLANK_IVB); 1929 POSTING_READ(DEIER); 1930 1931 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1932 1933 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1934 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1935 1936 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 1937 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1938 I915_WRITE(GTIER, render_irqs); 1939 POSTING_READ(GTIER); 1940 1941 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1942 SDE_PORTB_HOTPLUG_CPT | 1943 SDE_PORTC_HOTPLUG_CPT | 1944 SDE_PORTD_HOTPLUG_CPT); 1945 dev_priv->pch_irq_mask = ~hotplug_mask; 1946 1947 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1948 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1949 I915_WRITE(SDEIER, hotplug_mask); 1950 POSTING_READ(SDEIER); 1951 1952 ironlake_enable_pch_hotplug(dev); 1953 1954 return 0; 1955 } 1956 1957 static int valleyview_irq_postinstall(struct drm_device *dev) 1958 { 1959 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1960 u32 enable_mask; 1961 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1962 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 1963 u32 render_irqs; 1964 u16 msid; 1965 1966 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 1967 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1968 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1969 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1970 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1971 1972 /* 1973 *Leave vblank interrupts masked initially. enable/disable will 1974 * toggle them based on usage. 1975 */ 1976 dev_priv->irq_mask = (~enable_mask) | 1977 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1978 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1979 1980 dev_priv->pipestat[0] = 0; 1981 dev_priv->pipestat[1] = 0; 1982 1983 /* Hack for broken MSIs on VLV */ 1984 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); 1985 pci_read_config_word(dev->pdev, 0x98, &msid); 1986 msid &= 0xff; /* mask out delivery bits */ 1987 msid |= (1<<14); 1988 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); 1989 1990 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 1991 I915_WRITE(VLV_IER, enable_mask); 1992 I915_WRITE(VLV_IIR, 0xffffffff); 1993 I915_WRITE(PIPESTAT(0), 0xffff); 1994 I915_WRITE(PIPESTAT(1), 0xffff); 1995 POSTING_READ(VLV_IER); 1996 1997 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 1998 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 1999 2000 I915_WRITE(VLV_IIR, 0xffffffff); 2001 I915_WRITE(VLV_IIR, 0xffffffff); 2002 2003 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2004 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2005 2006 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2007 GEN6_BLITTER_USER_INTERRUPT; 2008 I915_WRITE(GTIER, render_irqs); 2009 POSTING_READ(GTIER); 2010 2011 /* ack & enable invalid PTE error interrupts */ 2012 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2013 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2014 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2015 #endif 2016 2017 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2018 /* Note HDMI and DP share bits */ 2019 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2020 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2021 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2022 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2023 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2024 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2025 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2026 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2027 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2028 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2029 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2030 hotplug_en |= CRT_HOTPLUG_INT_EN; 2031 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2032 } 2033 2034 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2035 2036 return 0; 2037 } 2038 2039 static void valleyview_irq_uninstall(struct drm_device *dev) 2040 { 2041 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2042 int pipe; 2043 2044 if (!dev_priv) 2045 return; 2046 2047 for_each_pipe(pipe) 2048 I915_WRITE(PIPESTAT(pipe), 0xffff); 2049 2050 I915_WRITE(HWSTAM, 0xffffffff); 2051 I915_WRITE(PORT_HOTPLUG_EN, 0); 2052 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2053 for_each_pipe(pipe) 2054 I915_WRITE(PIPESTAT(pipe), 0xffff); 2055 I915_WRITE(VLV_IIR, 0xffffffff); 2056 I915_WRITE(VLV_IMR, 0xffffffff); 2057 I915_WRITE(VLV_IER, 0x0); 2058 POSTING_READ(VLV_IER); 2059 } 2060 2061 static void ironlake_irq_uninstall(struct drm_device *dev) 2062 { 2063 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2064 2065 if (!dev_priv) 2066 return; 2067 2068 I915_WRITE(HWSTAM, 0xffffffff); 2069 2070 I915_WRITE(DEIMR, 0xffffffff); 2071 I915_WRITE(DEIER, 0x0); 2072 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2073 2074 I915_WRITE(GTIMR, 0xffffffff); 2075 I915_WRITE(GTIER, 0x0); 2076 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2077 2078 I915_WRITE(SDEIMR, 0xffffffff); 2079 I915_WRITE(SDEIER, 0x0); 2080 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2081 } 2082 2083 static void i8xx_irq_preinstall(struct drm_device * dev) 2084 { 2085 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2086 int pipe; 2087 2088 atomic_set(&dev_priv->irq_received, 0); 2089 2090 for_each_pipe(pipe) 2091 I915_WRITE(PIPESTAT(pipe), 0); 2092 I915_WRITE16(IMR, 0xffff); 2093 I915_WRITE16(IER, 0x0); 2094 POSTING_READ16(IER); 2095 } 2096 2097 static int i8xx_irq_postinstall(struct drm_device *dev) 2098 { 2099 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2100 2101 dev_priv->pipestat[0] = 0; 2102 dev_priv->pipestat[1] = 0; 2103 2104 I915_WRITE16(EMR, 2105 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2106 2107 /* Unmask the interrupts that we always want on. */ 2108 dev_priv->irq_mask = 2109 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2110 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2111 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2112 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2113 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2114 I915_WRITE16(IMR, dev_priv->irq_mask); 2115 2116 I915_WRITE16(IER, 2117 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2118 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2119 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2120 I915_USER_INTERRUPT); 2121 POSTING_READ16(IER); 2122 2123 return 0; 2124 } 2125 2126 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2127 { 2128 struct drm_device *dev = (struct drm_device *) arg; 2129 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2130 u16 iir, new_iir; 2131 u32 pipe_stats[2]; 2132 unsigned long irqflags; 2133 int irq_received; 2134 int pipe; 2135 u16 flip_mask = 2136 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2137 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2138 2139 atomic_inc(&dev_priv->irq_received); 2140 2141 iir = I915_READ16(IIR); 2142 if (iir == 0) 2143 return IRQ_NONE; 2144 2145 while (iir & ~flip_mask) { 2146 /* Can't rely on pipestat interrupt bit in iir as it might 2147 * have been cleared after the pipestat interrupt was received. 2148 * It doesn't set the bit in iir again, but it still produces 2149 * interrupts (for non-MSI). 2150 */ 2151 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2152 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2153 i915_handle_error(dev, false); 2154 2155 for_each_pipe(pipe) { 2156 int reg = PIPESTAT(pipe); 2157 pipe_stats[pipe] = I915_READ(reg); 2158 2159 /* 2160 * Clear the PIPE*STAT regs before the IIR 2161 */ 2162 if (pipe_stats[pipe] & 0x8000ffff) { 2163 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2164 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2165 pipe_name(pipe)); 2166 I915_WRITE(reg, pipe_stats[pipe]); 2167 irq_received = 1; 2168 } 2169 } 2170 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2171 2172 I915_WRITE16(IIR, iir & ~flip_mask); 2173 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2174 2175 i915_update_dri1_breadcrumb(dev); 2176 2177 if (iir & I915_USER_INTERRUPT) 2178 notify_ring(dev, &dev_priv->ring[RCS]); 2179 2180 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2181 drm_handle_vblank(dev, 0)) { 2182 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2183 intel_prepare_page_flip(dev, 0); 2184 intel_finish_page_flip(dev, 0); 2185 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 2186 } 2187 } 2188 2189 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2190 drm_handle_vblank(dev, 1)) { 2191 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2192 intel_prepare_page_flip(dev, 1); 2193 intel_finish_page_flip(dev, 1); 2194 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2195 } 2196 } 2197 2198 iir = new_iir; 2199 } 2200 2201 return IRQ_HANDLED; 2202 } 2203 2204 static void i8xx_irq_uninstall(struct drm_device * dev) 2205 { 2206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2207 int pipe; 2208 2209 for_each_pipe(pipe) { 2210 /* Clear enable bits; then clear status bits */ 2211 I915_WRITE(PIPESTAT(pipe), 0); 2212 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2213 } 2214 I915_WRITE16(IMR, 0xffff); 2215 I915_WRITE16(IER, 0x0); 2216 I915_WRITE16(IIR, I915_READ16(IIR)); 2217 } 2218 2219 static void i915_irq_preinstall(struct drm_device * dev) 2220 { 2221 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2222 int pipe; 2223 2224 atomic_set(&dev_priv->irq_received, 0); 2225 2226 if (I915_HAS_HOTPLUG(dev)) { 2227 I915_WRITE(PORT_HOTPLUG_EN, 0); 2228 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2229 } 2230 2231 I915_WRITE16(HWSTAM, 0xeffe); 2232 for_each_pipe(pipe) 2233 I915_WRITE(PIPESTAT(pipe), 0); 2234 I915_WRITE(IMR, 0xffffffff); 2235 I915_WRITE(IER, 0x0); 2236 POSTING_READ(IER); 2237 } 2238 2239 static int i915_irq_postinstall(struct drm_device *dev) 2240 { 2241 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2242 u32 enable_mask; 2243 2244 dev_priv->pipestat[0] = 0; 2245 dev_priv->pipestat[1] = 0; 2246 2247 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2248 2249 /* Unmask the interrupts that we always want on. */ 2250 dev_priv->irq_mask = 2251 ~(I915_ASLE_INTERRUPT | 2252 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2253 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2254 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2255 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2256 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2257 2258 enable_mask = 2259 I915_ASLE_INTERRUPT | 2260 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2261 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2262 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2263 I915_USER_INTERRUPT; 2264 2265 if (I915_HAS_HOTPLUG(dev)) { 2266 /* Enable in IER... */ 2267 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2268 /* and unmask in IMR */ 2269 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2270 } 2271 2272 I915_WRITE(IMR, dev_priv->irq_mask); 2273 I915_WRITE(IER, enable_mask); 2274 POSTING_READ(IER); 2275 2276 if (I915_HAS_HOTPLUG(dev)) { 2277 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2278 2279 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2280 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2281 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2282 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2283 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2284 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2285 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2286 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2287 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2288 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2289 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2290 hotplug_en |= CRT_HOTPLUG_INT_EN; 2291 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2292 } 2293 2294 /* Ignore TV since it's buggy */ 2295 2296 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2297 } 2298 2299 intel_opregion_enable_asle(dev); 2300 2301 return 0; 2302 } 2303 2304 static irqreturn_t i915_irq_handler(int irq, void *arg) 2305 { 2306 struct drm_device *dev = (struct drm_device *) arg; 2307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2308 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2309 unsigned long irqflags; 2310 u32 flip_mask = 2311 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2312 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2313 u32 flip[2] = { 2314 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, 2315 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT 2316 }; 2317 int pipe, ret = IRQ_NONE; 2318 2319 atomic_inc(&dev_priv->irq_received); 2320 2321 iir = I915_READ(IIR); 2322 do { 2323 bool irq_received = (iir & ~flip_mask) != 0; 2324 bool blc_event = false; 2325 2326 /* Can't rely on pipestat interrupt bit in iir as it might 2327 * have been cleared after the pipestat interrupt was received. 2328 * It doesn't set the bit in iir again, but it still produces 2329 * interrupts (for non-MSI). 2330 */ 2331 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2332 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2333 i915_handle_error(dev, false); 2334 2335 for_each_pipe(pipe) { 2336 int reg = PIPESTAT(pipe); 2337 pipe_stats[pipe] = I915_READ(reg); 2338 2339 /* Clear the PIPE*STAT regs before the IIR */ 2340 if (pipe_stats[pipe] & 0x8000ffff) { 2341 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2342 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2343 pipe_name(pipe)); 2344 I915_WRITE(reg, pipe_stats[pipe]); 2345 irq_received = true; 2346 } 2347 } 2348 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2349 2350 if (!irq_received) 2351 break; 2352 2353 /* Consume port. Then clear IIR or we'll miss events */ 2354 if ((I915_HAS_HOTPLUG(dev)) && 2355 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2356 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2357 2358 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2359 hotplug_status); 2360 if (hotplug_status & dev_priv->hotplug_supported_mask) 2361 queue_work(dev_priv->wq, 2362 &dev_priv->hotplug_work); 2363 2364 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2365 POSTING_READ(PORT_HOTPLUG_STAT); 2366 } 2367 2368 I915_WRITE(IIR, iir & ~flip_mask); 2369 new_iir = I915_READ(IIR); /* Flush posted writes */ 2370 2371 if (iir & I915_USER_INTERRUPT) 2372 notify_ring(dev, &dev_priv->ring[RCS]); 2373 2374 for_each_pipe(pipe) { 2375 int plane = pipe; 2376 if (IS_MOBILE(dev)) 2377 plane = !plane; 2378 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2379 drm_handle_vblank(dev, pipe)) { 2380 if (iir & flip[plane]) { 2381 intel_prepare_page_flip(dev, plane); 2382 intel_finish_page_flip(dev, pipe); 2383 flip_mask &= ~flip[plane]; 2384 } 2385 } 2386 2387 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2388 blc_event = true; 2389 } 2390 2391 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2392 intel_opregion_asle_intr(dev); 2393 2394 /* With MSI, interrupts are only generated when iir 2395 * transitions from zero to nonzero. If another bit got 2396 * set while we were handling the existing iir bits, then 2397 * we would never get another interrupt. 2398 * 2399 * This is fine on non-MSI as well, as if we hit this path 2400 * we avoid exiting the interrupt handler only to generate 2401 * another one. 2402 * 2403 * Note that for MSI this could cause a stray interrupt report 2404 * if an interrupt landed in the time between writing IIR and 2405 * the posting read. This should be rare enough to never 2406 * trigger the 99% of 100,000 interrupts test for disabling 2407 * stray interrupts. 2408 */ 2409 ret = IRQ_HANDLED; 2410 iir = new_iir; 2411 } while (iir & ~flip_mask); 2412 2413 i915_update_dri1_breadcrumb(dev); 2414 2415 return ret; 2416 } 2417 2418 static void i915_irq_uninstall(struct drm_device * dev) 2419 { 2420 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2421 int pipe; 2422 2423 if (I915_HAS_HOTPLUG(dev)) { 2424 I915_WRITE(PORT_HOTPLUG_EN, 0); 2425 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2426 } 2427 2428 I915_WRITE16(HWSTAM, 0xffff); 2429 for_each_pipe(pipe) { 2430 /* Clear enable bits; then clear status bits */ 2431 I915_WRITE(PIPESTAT(pipe), 0); 2432 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2433 } 2434 I915_WRITE(IMR, 0xffffffff); 2435 I915_WRITE(IER, 0x0); 2436 2437 I915_WRITE(IIR, I915_READ(IIR)); 2438 } 2439 2440 static void i965_irq_preinstall(struct drm_device * dev) 2441 { 2442 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2443 int pipe; 2444 2445 atomic_set(&dev_priv->irq_received, 0); 2446 2447 I915_WRITE(PORT_HOTPLUG_EN, 0); 2448 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2449 2450 I915_WRITE(HWSTAM, 0xeffe); 2451 for_each_pipe(pipe) 2452 I915_WRITE(PIPESTAT(pipe), 0); 2453 I915_WRITE(IMR, 0xffffffff); 2454 I915_WRITE(IER, 0x0); 2455 POSTING_READ(IER); 2456 } 2457 2458 static int i965_irq_postinstall(struct drm_device *dev) 2459 { 2460 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2461 u32 hotplug_en; 2462 u32 enable_mask; 2463 u32 error_mask; 2464 2465 /* Unmask the interrupts that we always want on. */ 2466 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2467 I915_DISPLAY_PORT_INTERRUPT | 2468 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2469 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2470 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2471 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2472 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2473 2474 enable_mask = ~dev_priv->irq_mask; 2475 enable_mask |= I915_USER_INTERRUPT; 2476 2477 if (IS_G4X(dev)) 2478 enable_mask |= I915_BSD_USER_INTERRUPT; 2479 2480 dev_priv->pipestat[0] = 0; 2481 dev_priv->pipestat[1] = 0; 2482 2483 /* 2484 * Enable some error detection, note the instruction error mask 2485 * bit is reserved, so we leave it masked. 2486 */ 2487 if (IS_G4X(dev)) { 2488 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2489 GM45_ERROR_MEM_PRIV | 2490 GM45_ERROR_CP_PRIV | 2491 I915_ERROR_MEMORY_REFRESH); 2492 } else { 2493 error_mask = ~(I915_ERROR_PAGE_TABLE | 2494 I915_ERROR_MEMORY_REFRESH); 2495 } 2496 I915_WRITE(EMR, error_mask); 2497 2498 I915_WRITE(IMR, dev_priv->irq_mask); 2499 I915_WRITE(IER, enable_mask); 2500 POSTING_READ(IER); 2501 2502 /* Note HDMI and DP share hotplug bits */ 2503 hotplug_en = 0; 2504 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2505 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2506 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2507 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2508 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2509 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2510 if (IS_G4X(dev)) { 2511 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2512 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2513 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) 2514 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2515 } else { 2516 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) 2517 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2518 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) 2519 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2520 } 2521 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2522 hotplug_en |= CRT_HOTPLUG_INT_EN; 2523 2524 /* Programming the CRT detection parameters tends 2525 to generate a spurious hotplug event about three 2526 seconds later. So just do it once. 2527 */ 2528 if (IS_G4X(dev)) 2529 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2530 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2531 } 2532 2533 /* Ignore TV since it's buggy */ 2534 2535 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2536 2537 intel_opregion_enable_asle(dev); 2538 2539 return 0; 2540 } 2541 2542 static irqreturn_t i965_irq_handler(int irq, void *arg) 2543 { 2544 struct drm_device *dev = (struct drm_device *) arg; 2545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2546 u32 iir, new_iir; 2547 u32 pipe_stats[I915_MAX_PIPES]; 2548 unsigned long irqflags; 2549 int irq_received; 2550 int ret = IRQ_NONE, pipe; 2551 2552 atomic_inc(&dev_priv->irq_received); 2553 2554 iir = I915_READ(IIR); 2555 2556 for (;;) { 2557 bool blc_event = false; 2558 2559 irq_received = iir != 0; 2560 2561 /* Can't rely on pipestat interrupt bit in iir as it might 2562 * have been cleared after the pipestat interrupt was received. 2563 * It doesn't set the bit in iir again, but it still produces 2564 * interrupts (for non-MSI). 2565 */ 2566 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2567 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2568 i915_handle_error(dev, false); 2569 2570 for_each_pipe(pipe) { 2571 int reg = PIPESTAT(pipe); 2572 pipe_stats[pipe] = I915_READ(reg); 2573 2574 /* 2575 * Clear the PIPE*STAT regs before the IIR 2576 */ 2577 if (pipe_stats[pipe] & 0x8000ffff) { 2578 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2579 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2580 pipe_name(pipe)); 2581 I915_WRITE(reg, pipe_stats[pipe]); 2582 irq_received = 1; 2583 } 2584 } 2585 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2586 2587 if (!irq_received) 2588 break; 2589 2590 ret = IRQ_HANDLED; 2591 2592 /* Consume port. Then clear IIR or we'll miss events */ 2593 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2594 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2595 2596 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2597 hotplug_status); 2598 if (hotplug_status & dev_priv->hotplug_supported_mask) 2599 queue_work(dev_priv->wq, 2600 &dev_priv->hotplug_work); 2601 2602 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2603 I915_READ(PORT_HOTPLUG_STAT); 2604 } 2605 2606 I915_WRITE(IIR, iir); 2607 new_iir = I915_READ(IIR); /* Flush posted writes */ 2608 2609 if (iir & I915_USER_INTERRUPT) 2610 notify_ring(dev, &dev_priv->ring[RCS]); 2611 if (iir & I915_BSD_USER_INTERRUPT) 2612 notify_ring(dev, &dev_priv->ring[VCS]); 2613 2614 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 2615 intel_prepare_page_flip(dev, 0); 2616 2617 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 2618 intel_prepare_page_flip(dev, 1); 2619 2620 for_each_pipe(pipe) { 2621 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2622 drm_handle_vblank(dev, pipe)) { 2623 i915_pageflip_stall_check(dev, pipe); 2624 intel_finish_page_flip(dev, pipe); 2625 } 2626 2627 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2628 blc_event = true; 2629 } 2630 2631 2632 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2633 intel_opregion_asle_intr(dev); 2634 2635 /* With MSI, interrupts are only generated when iir 2636 * transitions from zero to nonzero. If another bit got 2637 * set while we were handling the existing iir bits, then 2638 * we would never get another interrupt. 2639 * 2640 * This is fine on non-MSI as well, as if we hit this path 2641 * we avoid exiting the interrupt handler only to generate 2642 * another one. 2643 * 2644 * Note that for MSI this could cause a stray interrupt report 2645 * if an interrupt landed in the time between writing IIR and 2646 * the posting read. This should be rare enough to never 2647 * trigger the 99% of 100,000 interrupts test for disabling 2648 * stray interrupts. 2649 */ 2650 iir = new_iir; 2651 } 2652 2653 i915_update_dri1_breadcrumb(dev); 2654 2655 return ret; 2656 } 2657 2658 static void i965_irq_uninstall(struct drm_device * dev) 2659 { 2660 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2661 int pipe; 2662 2663 if (!dev_priv) 2664 return; 2665 2666 I915_WRITE(PORT_HOTPLUG_EN, 0); 2667 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2668 2669 I915_WRITE(HWSTAM, 0xffffffff); 2670 for_each_pipe(pipe) 2671 I915_WRITE(PIPESTAT(pipe), 0); 2672 I915_WRITE(IMR, 0xffffffff); 2673 I915_WRITE(IER, 0x0); 2674 2675 for_each_pipe(pipe) 2676 I915_WRITE(PIPESTAT(pipe), 2677 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2678 I915_WRITE(IIR, I915_READ(IIR)); 2679 } 2680 2681 void intel_irq_init(struct drm_device *dev) 2682 { 2683 struct drm_i915_private *dev_priv = dev->dev_private; 2684 2685 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2686 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2687 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2688 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2689 2690 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2691 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2692 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2693 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2694 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2695 } 2696 2697 if (drm_core_check_feature(dev, DRIVER_MODESET)) 2698 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2699 else 2700 dev->driver->get_vblank_timestamp = NULL; 2701 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2702 2703 if (IS_VALLEYVIEW(dev)) { 2704 dev->driver->irq_handler = valleyview_irq_handler; 2705 dev->driver->irq_preinstall = valleyview_irq_preinstall; 2706 dev->driver->irq_postinstall = valleyview_irq_postinstall; 2707 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2708 dev->driver->enable_vblank = valleyview_enable_vblank; 2709 dev->driver->disable_vblank = valleyview_disable_vblank; 2710 } else if (IS_IVYBRIDGE(dev)) { 2711 /* Share pre & uninstall handlers with ILK/SNB */ 2712 dev->driver->irq_handler = ivybridge_irq_handler; 2713 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2714 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2715 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2716 dev->driver->enable_vblank = ivybridge_enable_vblank; 2717 dev->driver->disable_vblank = ivybridge_disable_vblank; 2718 } else if (IS_HASWELL(dev)) { 2719 /* Share interrupts handling with IVB */ 2720 dev->driver->irq_handler = ivybridge_irq_handler; 2721 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2722 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2723 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2724 dev->driver->enable_vblank = ivybridge_enable_vblank; 2725 dev->driver->disable_vblank = ivybridge_disable_vblank; 2726 } else if (HAS_PCH_SPLIT(dev)) { 2727 dev->driver->irq_handler = ironlake_irq_handler; 2728 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2729 dev->driver->irq_postinstall = ironlake_irq_postinstall; 2730 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2731 dev->driver->enable_vblank = ironlake_enable_vblank; 2732 dev->driver->disable_vblank = ironlake_disable_vblank; 2733 } else { 2734 if (INTEL_INFO(dev)->gen == 2) { 2735 dev->driver->irq_preinstall = i8xx_irq_preinstall; 2736 dev->driver->irq_postinstall = i8xx_irq_postinstall; 2737 dev->driver->irq_handler = i8xx_irq_handler; 2738 dev->driver->irq_uninstall = i8xx_irq_uninstall; 2739 } else if (INTEL_INFO(dev)->gen == 3) { 2740 dev->driver->irq_preinstall = i915_irq_preinstall; 2741 dev->driver->irq_postinstall = i915_irq_postinstall; 2742 dev->driver->irq_uninstall = i915_irq_uninstall; 2743 dev->driver->irq_handler = i915_irq_handler; 2744 } else { 2745 dev->driver->irq_preinstall = i965_irq_preinstall; 2746 dev->driver->irq_postinstall = i965_irq_postinstall; 2747 dev->driver->irq_uninstall = i965_irq_uninstall; 2748 dev->driver->irq_handler = i965_irq_handler; 2749 } 2750 dev->driver->enable_vblank = i915_enable_vblank; 2751 dev->driver->disable_vblank = i915_disable_vblank; 2752 } 2753 } 2754