1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/slab.h> 32 #include <linux/sysrq.h> 33 34 #include <drm/drm_drv.h> 35 36 #include "display/intel_display_core.h" 37 #include "display/intel_display_irq.h" 38 #include "display/intel_hotplug.h" 39 #include "display/intel_hotplug_irq.h" 40 #include "display/intel_lpe_audio.h" 41 #include "display/intel_psr_regs.h" 42 43 #include "gt/intel_breadcrumbs.h" 44 #include "gt/intel_gt.h" 45 #include "gt/intel_gt_irq.h" 46 #include "gt/intel_gt_pm_irq.h" 47 #include "gt/intel_gt_regs.h" 48 #include "gt/intel_rps.h" 49 50 #include "i915_driver.h" 51 #include "i915_drv.h" 52 #include "i915_irq.h" 53 #include "i915_reg.h" 54 55 /** 56 * DOC: interrupt handling 57 * 58 * These functions provide the basic support for enabling and disabling the 59 * interrupt handling support. There's a lot more functionality in i915_irq.c 60 * and related files, but that will be described in separate chapters. 61 */ 62 63 /* 64 * Interrupt statistic for PMU. Increments the counter only if the 65 * interrupt originated from the GPU so interrupts from a device which 66 * shares the interrupt line are not accounted. 67 */ 68 static inline void pmu_irq_stats(struct drm_i915_private *i915, 69 irqreturn_t res) 70 { 71 if (unlikely(res != IRQ_HANDLED)) 72 return; 73 74 /* 75 * A clever compiler translates that into INC. A not so clever one 76 * should at least prevent store tearing. 77 */ 78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); 79 } 80 81 void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) 82 { 83 intel_uncore_write(uncore, regs.imr, 0xffffffff); 84 intel_uncore_posting_read(uncore, regs.imr); 85 86 intel_uncore_write(uncore, regs.ier, 0); 87 88 /* IIR can theoretically queue up two events. Be paranoid. */ 89 intel_uncore_write(uncore, regs.iir, 0xffffffff); 90 intel_uncore_posting_read(uncore, regs.iir); 91 intel_uncore_write(uncore, regs.iir, 0xffffffff); 92 intel_uncore_posting_read(uncore, regs.iir); 93 } 94 95 /* 96 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 97 */ 98 void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 99 { 100 u32 val = intel_uncore_read(uncore, reg); 101 102 if (val == 0) 103 return; 104 105 drm_WARN(&uncore->i915->drm, 1, 106 "Interrupt register 0x%x is not zero: 0x%08x\n", 107 i915_mmio_reg_offset(reg), val); 108 intel_uncore_write(uncore, reg, 0xffffffff); 109 intel_uncore_posting_read(uncore, reg); 110 intel_uncore_write(uncore, reg, 0xffffffff); 111 intel_uncore_posting_read(uncore, reg); 112 } 113 114 void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, 115 u32 imr_val, u32 ier_val) 116 { 117 gen2_assert_iir_is_zero(uncore, regs.iir); 118 119 intel_uncore_write(uncore, regs.ier, ier_val); 120 intel_uncore_write(uncore, regs.imr, imr_val); 121 intel_uncore_posting_read(uncore, regs.imr); 122 } 123 124 void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs) 125 { 126 intel_uncore_write(uncore, regs.emr, 0xffffffff); 127 intel_uncore_posting_read(uncore, regs.emr); 128 129 intel_uncore_write(uncore, regs.eir, 0xffffffff); 130 intel_uncore_posting_read(uncore, regs.eir); 131 intel_uncore_write(uncore, regs.eir, 0xffffffff); 132 intel_uncore_posting_read(uncore, regs.eir); 133 } 134 135 void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs, 136 u32 emr_val) 137 { 138 intel_uncore_write(uncore, regs.eir, 0xffffffff); 139 intel_uncore_posting_read(uncore, regs.eir); 140 intel_uncore_write(uncore, regs.eir, 0xffffffff); 141 intel_uncore_posting_read(uncore, regs.eir); 142 143 intel_uncore_write(uncore, regs.emr, emr_val); 144 intel_uncore_posting_read(uncore, regs.emr); 145 } 146 147 /** 148 * ivb_parity_work - Workqueue called when a parity error interrupt 149 * occurred. 150 * @work: workqueue struct 151 * 152 * Doesn't actually do anything except notify userspace. As a consequence of 153 * this event, userspace should try to remap the bad rows since statistically 154 * it is likely the same row is more likely to go bad again. 155 */ 156 static void ivb_parity_work(struct work_struct *work) 157 { 158 struct drm_i915_private *dev_priv = 159 container_of(work, typeof(*dev_priv), l3_parity.error_work); 160 struct intel_gt *gt = to_gt(dev_priv); 161 u32 error_status, row, bank, subbank; 162 char *parity_event[6]; 163 u32 misccpctl; 164 u8 slice = 0; 165 166 /* We must turn off DOP level clock gating to access the L3 registers. 167 * In order to prevent a get/put style interface, acquire struct mutex 168 * any time we access those registers. 169 */ 170 mutex_lock(&dev_priv->drm.struct_mutex); 171 172 /* If we've screwed up tracking, just let the interrupt fire again */ 173 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 174 goto out; 175 176 misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL, 177 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 178 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 179 180 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 181 i915_reg_t reg; 182 183 slice--; 184 if (drm_WARN_ON_ONCE(&dev_priv->drm, 185 slice >= NUM_L3_SLICES(dev_priv))) 186 break; 187 188 dev_priv->l3_parity.which_slice &= ~(1<<slice); 189 190 reg = GEN7_L3CDERRST1(slice); 191 192 error_status = intel_uncore_read(&dev_priv->uncore, reg); 193 row = GEN7_PARITY_ERROR_ROW(error_status); 194 bank = GEN7_PARITY_ERROR_BANK(error_status); 195 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 196 197 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 198 intel_uncore_posting_read(&dev_priv->uncore, reg); 199 200 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 201 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 202 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 203 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 204 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 205 parity_event[5] = NULL; 206 207 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 208 KOBJ_CHANGE, parity_event); 209 210 drm_dbg(&dev_priv->drm, 211 "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 212 slice, row, bank, subbank); 213 214 kfree(parity_event[4]); 215 kfree(parity_event[3]); 216 kfree(parity_event[2]); 217 kfree(parity_event[1]); 218 } 219 220 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 221 222 out: 223 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 224 spin_lock_irq(gt->irq_lock); 225 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 226 spin_unlock_irq(gt->irq_lock); 227 228 mutex_unlock(&dev_priv->drm.struct_mutex); 229 } 230 231 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 232 { 233 struct drm_i915_private *dev_priv = arg; 234 struct intel_display *display = dev_priv->display; 235 irqreturn_t ret = IRQ_NONE; 236 237 if (!intel_irqs_enabled(dev_priv)) 238 return IRQ_NONE; 239 240 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 241 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 242 243 do { 244 u32 iir, gt_iir, pm_iir; 245 u32 eir = 0, dpinvgtt = 0; 246 u32 pipe_stats[I915_MAX_PIPES] = {}; 247 u32 hotplug_status = 0; 248 u32 ier = 0; 249 250 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 251 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 252 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 253 254 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 255 break; 256 257 ret = IRQ_HANDLED; 258 259 /* 260 * Theory on interrupt generation, based on empirical evidence: 261 * 262 * x = ((VLV_IIR & VLV_IER) || 263 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 264 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 265 * 266 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 267 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 268 * guarantee the CPU interrupt will be raised again even if we 269 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 270 * bits this time around. 271 */ 272 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 273 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 274 275 if (gt_iir) 276 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 277 if (pm_iir) 278 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 279 280 if (iir & I915_DISPLAY_PORT_INTERRUPT) 281 hotplug_status = i9xx_hpd_irq_ack(display); 282 283 if (iir & I915_MASTER_ERROR_INTERRUPT) 284 vlv_display_error_irq_ack(display, &eir, &dpinvgtt); 285 286 /* Call regardless, as some status bits might not be 287 * signalled in IIR */ 288 i9xx_pipestat_irq_ack(display, iir, pipe_stats); 289 290 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 291 I915_LPE_PIPE_B_INTERRUPT)) 292 intel_lpe_audio_irq_handler(display); 293 294 /* 295 * VLV_IIR is single buffered, and reflects the level 296 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 297 */ 298 if (iir) 299 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 300 301 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 302 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 303 304 if (gt_iir) 305 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir); 306 if (pm_iir) 307 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); 308 309 if (hotplug_status) 310 i9xx_hpd_irq_handler(display, hotplug_status); 311 312 if (iir & I915_MASTER_ERROR_INTERRUPT) 313 vlv_display_error_irq_handler(display, eir, dpinvgtt); 314 315 valleyview_pipestat_irq_handler(display, pipe_stats); 316 } while (0); 317 318 pmu_irq_stats(dev_priv, ret); 319 320 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 321 322 return ret; 323 } 324 325 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 326 { 327 struct drm_i915_private *dev_priv = arg; 328 struct intel_display *display = dev_priv->display; 329 irqreturn_t ret = IRQ_NONE; 330 331 if (!intel_irqs_enabled(dev_priv)) 332 return IRQ_NONE; 333 334 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 335 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 336 337 do { 338 u32 master_ctl, iir; 339 u32 eir = 0, dpinvgtt = 0; 340 u32 pipe_stats[I915_MAX_PIPES] = {}; 341 u32 hotplug_status = 0; 342 u32 ier = 0; 343 344 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 345 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 346 347 if (master_ctl == 0 && iir == 0) 348 break; 349 350 ret = IRQ_HANDLED; 351 352 /* 353 * Theory on interrupt generation, based on empirical evidence: 354 * 355 * x = ((VLV_IIR & VLV_IER) || 356 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 357 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 358 * 359 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 360 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 361 * guarantee the CPU interrupt will be raised again even if we 362 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 363 * bits this time around. 364 */ 365 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 366 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 367 368 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 369 370 if (iir & I915_DISPLAY_PORT_INTERRUPT) 371 hotplug_status = i9xx_hpd_irq_ack(display); 372 373 if (iir & I915_MASTER_ERROR_INTERRUPT) 374 vlv_display_error_irq_ack(display, &eir, &dpinvgtt); 375 376 /* Call regardless, as some status bits might not be 377 * signalled in IIR */ 378 i9xx_pipestat_irq_ack(display, iir, pipe_stats); 379 380 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 381 I915_LPE_PIPE_B_INTERRUPT | 382 I915_LPE_PIPE_C_INTERRUPT)) 383 intel_lpe_audio_irq_handler(display); 384 385 /* 386 * VLV_IIR is single buffered, and reflects the level 387 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 388 */ 389 if (iir) 390 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 391 392 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 393 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 394 395 if (hotplug_status) 396 i9xx_hpd_irq_handler(display, hotplug_status); 397 398 if (iir & I915_MASTER_ERROR_INTERRUPT) 399 vlv_display_error_irq_handler(display, eir, dpinvgtt); 400 401 valleyview_pipestat_irq_handler(display, pipe_stats); 402 } while (0); 403 404 pmu_irq_stats(dev_priv, ret); 405 406 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 407 408 return ret; 409 } 410 411 /* 412 * To handle irqs with the minimum potential races with fresh interrupts, we: 413 * 1 - Disable Master Interrupt Control. 414 * 2 - Find the source(s) of the interrupt. 415 * 3 - Clear the Interrupt Identity bits (IIR). 416 * 4 - Process the interrupt(s) that had bits set in the IIRs. 417 * 5 - Re-enable Master Interrupt Control. 418 */ 419 static irqreturn_t ilk_irq_handler(int irq, void *arg) 420 { 421 struct drm_i915_private *i915 = arg; 422 struct intel_display *display = i915->display; 423 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 424 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 425 irqreturn_t ret = IRQ_NONE; 426 427 if (unlikely(!intel_irqs_enabled(i915))) 428 return IRQ_NONE; 429 430 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 431 disable_rpm_wakeref_asserts(&i915->runtime_pm); 432 433 /* disable master interrupt before clearing iir */ 434 de_ier = raw_reg_read(regs, DEIER); 435 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 436 437 /* Disable south interrupts. We'll only write to SDEIIR once, so further 438 * interrupts will will be stored on its back queue, and then we'll be 439 * able to process them after we restore SDEIER (as soon as we restore 440 * it, we'll get an interrupt if SDEIIR still has something to process 441 * due to its back queue). */ 442 if (!HAS_PCH_NOP(display)) { 443 sde_ier = raw_reg_read(regs, SDEIER); 444 raw_reg_write(regs, SDEIER, 0); 445 } 446 447 /* Find, clear, then process each source of interrupt */ 448 449 gt_iir = raw_reg_read(regs, GTIIR); 450 if (gt_iir) { 451 raw_reg_write(regs, GTIIR, gt_iir); 452 if (GRAPHICS_VER(i915) >= 6) 453 gen6_gt_irq_handler(to_gt(i915), gt_iir); 454 else 455 gen5_gt_irq_handler(to_gt(i915), gt_iir); 456 ret = IRQ_HANDLED; 457 } 458 459 de_iir = raw_reg_read(regs, DEIIR); 460 if (de_iir) { 461 raw_reg_write(regs, DEIIR, de_iir); 462 if (DISPLAY_VER(display) >= 7) 463 ivb_display_irq_handler(display, de_iir); 464 else 465 ilk_display_irq_handler(display, de_iir); 466 ret = IRQ_HANDLED; 467 } 468 469 if (GRAPHICS_VER(i915) >= 6) { 470 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 471 if (pm_iir) { 472 raw_reg_write(regs, GEN6_PMIIR, pm_iir); 473 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir); 474 ret = IRQ_HANDLED; 475 } 476 } 477 478 raw_reg_write(regs, DEIER, de_ier); 479 if (sde_ier) 480 raw_reg_write(regs, SDEIER, sde_ier); 481 482 pmu_irq_stats(i915, ret); 483 484 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 485 enable_rpm_wakeref_asserts(&i915->runtime_pm); 486 487 return ret; 488 } 489 490 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 491 { 492 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 493 494 /* 495 * Now with master disabled, get a sample of level indications 496 * for this interrupt. Indications will be cleared on related acks. 497 * New indications can and will light up during processing, 498 * and will generate new interrupt after enabling master. 499 */ 500 return raw_reg_read(regs, GEN8_MASTER_IRQ); 501 } 502 503 static inline void gen8_master_intr_enable(void __iomem * const regs) 504 { 505 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 506 } 507 508 static irqreturn_t gen8_irq_handler(int irq, void *arg) 509 { 510 struct drm_i915_private *dev_priv = arg; 511 struct intel_display *display = dev_priv->display; 512 void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore); 513 u32 master_ctl; 514 515 if (!intel_irqs_enabled(dev_priv)) 516 return IRQ_NONE; 517 518 master_ctl = gen8_master_intr_disable(regs); 519 if (!master_ctl) { 520 gen8_master_intr_enable(regs); 521 return IRQ_NONE; 522 } 523 524 /* Find, queue (onto bottom-halves), then clear each source */ 525 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 526 527 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 528 if (master_ctl & ~GEN8_GT_IRQS) { 529 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 530 gen8_de_irq_handler(display, master_ctl); 531 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 532 } 533 534 gen8_master_intr_enable(regs); 535 536 pmu_irq_stats(dev_priv, IRQ_HANDLED); 537 538 return IRQ_HANDLED; 539 } 540 541 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 542 { 543 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 544 545 /* 546 * Now with master disabled, get a sample of level indications 547 * for this interrupt. Indications will be cleared on related acks. 548 * New indications can and will light up during processing, 549 * and will generate new interrupt after enabling master. 550 */ 551 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 552 } 553 554 static inline void gen11_master_intr_enable(void __iomem * const regs) 555 { 556 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 557 } 558 559 static irqreturn_t gen11_irq_handler(int irq, void *arg) 560 { 561 struct drm_i915_private *i915 = arg; 562 struct intel_display *display = i915->display; 563 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 564 struct intel_gt *gt = to_gt(i915); 565 u32 master_ctl; 566 u32 gu_misc_iir; 567 568 if (!intel_irqs_enabled(i915)) 569 return IRQ_NONE; 570 571 master_ctl = gen11_master_intr_disable(regs); 572 if (!master_ctl) { 573 gen11_master_intr_enable(regs); 574 return IRQ_NONE; 575 } 576 577 /* Find, queue (onto bottom-halves), then clear each source */ 578 gen11_gt_irq_handler(gt, master_ctl); 579 580 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 581 if (master_ctl & GEN11_DISPLAY_IRQ) 582 gen11_display_irq_handler(display); 583 584 gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); 585 586 gen11_master_intr_enable(regs); 587 588 gen11_gu_misc_irq_handler(display, gu_misc_iir); 589 590 pmu_irq_stats(i915, IRQ_HANDLED); 591 592 return IRQ_HANDLED; 593 } 594 595 static inline u32 dg1_master_intr_disable(void __iomem * const regs) 596 { 597 u32 val; 598 599 /* First disable interrupts */ 600 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); 601 602 /* Get the indication levels and ack the master unit */ 603 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); 604 if (unlikely(!val)) 605 return 0; 606 607 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); 608 609 return val; 610 } 611 612 static inline void dg1_master_intr_enable(void __iomem * const regs) 613 { 614 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 615 } 616 617 static irqreturn_t dg1_irq_handler(int irq, void *arg) 618 { 619 struct drm_i915_private * const i915 = arg; 620 struct intel_display *display = i915->display; 621 struct intel_gt *gt = to_gt(i915); 622 void __iomem * const regs = intel_uncore_regs(gt->uncore); 623 u32 master_tile_ctl, master_ctl; 624 u32 gu_misc_iir; 625 626 if (!intel_irqs_enabled(i915)) 627 return IRQ_NONE; 628 629 master_tile_ctl = dg1_master_intr_disable(regs); 630 if (!master_tile_ctl) { 631 dg1_master_intr_enable(regs); 632 return IRQ_NONE; 633 } 634 635 /* FIXME: we only support tile 0 for now. */ 636 if (master_tile_ctl & DG1_MSTR_TILE(0)) { 637 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 638 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); 639 } else { 640 drm_err(&i915->drm, "Tile not supported: 0x%08x\n", 641 master_tile_ctl); 642 dg1_master_intr_enable(regs); 643 return IRQ_NONE; 644 } 645 646 gen11_gt_irq_handler(gt, master_ctl); 647 648 if (master_ctl & GEN11_DISPLAY_IRQ) 649 gen11_display_irq_handler(display); 650 651 gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); 652 653 dg1_master_intr_enable(regs); 654 655 gen11_gu_misc_irq_handler(display, gu_misc_iir); 656 657 pmu_irq_stats(i915, IRQ_HANDLED); 658 659 return IRQ_HANDLED; 660 } 661 662 static void ilk_irq_reset(struct drm_i915_private *dev_priv) 663 { 664 struct intel_display *display = dev_priv->display; 665 struct intel_uncore *uncore = &dev_priv->uncore; 666 667 gen2_irq_reset(uncore, DE_IRQ_REGS); 668 dev_priv->irq_mask = ~0u; 669 670 if (GRAPHICS_VER(dev_priv) == 7) 671 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 672 673 if (IS_HASWELL(dev_priv)) { 674 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 675 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 676 } 677 678 gen5_gt_irq_reset(to_gt(dev_priv)); 679 680 ibx_display_irq_reset(display); 681 } 682 683 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 684 { 685 struct intel_display *display = dev_priv->display; 686 687 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 688 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 689 690 gen5_gt_irq_reset(to_gt(dev_priv)); 691 692 vlv_display_irq_reset(display); 693 } 694 695 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 696 { 697 struct intel_display *display = dev_priv->display; 698 struct intel_uncore *uncore = &dev_priv->uncore; 699 700 gen8_master_intr_disable(intel_uncore_regs(uncore)); 701 702 gen8_gt_irq_reset(to_gt(dev_priv)); 703 gen8_display_irq_reset(display); 704 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); 705 } 706 707 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 708 { 709 struct intel_display *display = dev_priv->display; 710 struct intel_gt *gt = to_gt(dev_priv); 711 struct intel_uncore *uncore = gt->uncore; 712 713 gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); 714 715 gen11_gt_irq_reset(gt); 716 gen11_display_irq_reset(display); 717 718 gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); 719 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); 720 } 721 722 static void dg1_irq_reset(struct drm_i915_private *dev_priv) 723 { 724 struct intel_display *display = dev_priv->display; 725 struct intel_uncore *uncore = &dev_priv->uncore; 726 struct intel_gt *gt; 727 unsigned int i; 728 729 dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); 730 731 for_each_gt(gt, dev_priv, i) 732 gen11_gt_irq_reset(gt); 733 734 gen11_display_irq_reset(display); 735 736 gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); 737 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); 738 739 intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0); 740 } 741 742 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 743 { 744 struct intel_display *display = dev_priv->display; 745 struct intel_uncore *uncore = &dev_priv->uncore; 746 747 intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0); 748 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 749 750 gen8_gt_irq_reset(to_gt(dev_priv)); 751 752 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); 753 754 vlv_display_irq_reset(display); 755 } 756 757 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 758 { 759 struct intel_display *display = dev_priv->display; 760 761 gen5_gt_irq_postinstall(to_gt(dev_priv)); 762 763 ilk_de_irq_postinstall(display); 764 } 765 766 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 767 { 768 struct intel_display *display = dev_priv->display; 769 770 gen5_gt_irq_postinstall(to_gt(dev_priv)); 771 772 vlv_display_irq_postinstall(display); 773 774 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 775 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 776 } 777 778 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 779 { 780 struct intel_display *display = dev_priv->display; 781 782 gen8_gt_irq_postinstall(to_gt(dev_priv)); 783 gen8_de_irq_postinstall(display); 784 785 gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore)); 786 } 787 788 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 789 { 790 struct intel_display *display = dev_priv->display; 791 struct intel_gt *gt = to_gt(dev_priv); 792 struct intel_uncore *uncore = gt->uncore; 793 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 794 795 gen11_gt_irq_postinstall(gt); 796 gen11_de_irq_postinstall(display); 797 798 gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); 799 800 gen11_master_intr_enable(intel_uncore_regs(uncore)); 801 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 802 } 803 804 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) 805 { 806 struct intel_display *display = dev_priv->display; 807 struct intel_uncore *uncore = &dev_priv->uncore; 808 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 809 struct intel_gt *gt; 810 unsigned int i; 811 812 for_each_gt(gt, dev_priv, i) 813 gen11_gt_irq_postinstall(gt); 814 815 gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); 816 817 dg1_de_irq_postinstall(display); 818 819 dg1_master_intr_enable(intel_uncore_regs(uncore)); 820 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); 821 } 822 823 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 824 { 825 struct intel_display *display = dev_priv->display; 826 827 gen8_gt_irq_postinstall(to_gt(dev_priv)); 828 829 vlv_display_irq_postinstall(display); 830 831 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 832 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 833 } 834 835 static u32 i9xx_error_mask(struct drm_i915_private *i915) 836 { 837 struct intel_display *display = i915->display; 838 /* 839 * On gen2/3 FBC generates (seemingly spurious) 840 * display INVALID_GTT/INVALID_GTT_PTE table errors. 841 * 842 * Also gen3 bspec has this to say: 843 * "DISPA_INVALID_GTT_PTE 844 " [DevNapa] : Reserved. This bit does not reflect the page 845 " table error for the display plane A." 846 * 847 * Unfortunately we can't mask off individual PGTBL_ER bits, 848 * so we just have to mask off all page table errors via EMR. 849 */ 850 if (HAS_FBC(display)) 851 return I915_ERROR_MEMORY_REFRESH; 852 else 853 return I915_ERROR_PAGE_TABLE | 854 I915_ERROR_MEMORY_REFRESH; 855 } 856 857 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 858 u32 *eir, u32 *eir_stuck) 859 { 860 u32 emr; 861 862 *eir = intel_uncore_read(&dev_priv->uncore, EIR); 863 intel_uncore_write(&dev_priv->uncore, EIR, *eir); 864 865 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 866 if (*eir_stuck == 0) 867 return; 868 869 /* 870 * Toggle all EMR bits to make sure we get an edge 871 * in the ISR master error bit if we don't clear 872 * all the EIR bits. Otherwise the edge triggered 873 * IIR on i965/g4x wouldn't notice that an interrupt 874 * is still pending. Also some EIR bits can't be 875 * cleared except by handling the underlying error 876 * (or by a GPU reset) so we mask any bit that 877 * remains set. 878 */ 879 emr = intel_uncore_read(&dev_priv->uncore, EMR); 880 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 881 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 882 } 883 884 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 885 u32 eir, u32 eir_stuck) 886 { 887 drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir); 888 889 if (eir_stuck) 890 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 891 eir_stuck); 892 893 drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", 894 intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); 895 } 896 897 static void i915_irq_reset(struct drm_i915_private *dev_priv) 898 { 899 struct intel_display *display = dev_priv->display; 900 struct intel_uncore *uncore = &dev_priv->uncore; 901 902 i9xx_display_irq_reset(display); 903 904 gen2_error_reset(uncore, GEN2_ERROR_REGS); 905 gen2_irq_reset(uncore, GEN2_IRQ_REGS); 906 dev_priv->irq_mask = ~0u; 907 } 908 909 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 910 { 911 struct intel_display *display = dev_priv->display; 912 struct intel_uncore *uncore = &dev_priv->uncore; 913 u32 enable_mask; 914 915 gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv)); 916 917 dev_priv->irq_mask = 918 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 919 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 920 I915_MASTER_ERROR_INTERRUPT); 921 922 enable_mask = 923 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 924 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 925 I915_MASTER_ERROR_INTERRUPT | 926 I915_USER_INTERRUPT; 927 928 if (DISPLAY_VER(display) >= 3) { 929 dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT; 930 enable_mask |= I915_ASLE_INTERRUPT; 931 } 932 933 if (HAS_HOTPLUG(display)) { 934 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 935 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 936 } 937 938 gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); 939 940 i915_display_irq_postinstall(display); 941 } 942 943 static irqreturn_t i915_irq_handler(int irq, void *arg) 944 { 945 struct drm_i915_private *dev_priv = arg; 946 struct intel_display *display = dev_priv->display; 947 irqreturn_t ret = IRQ_NONE; 948 949 if (!intel_irqs_enabled(dev_priv)) 950 return IRQ_NONE; 951 952 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 953 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 954 955 do { 956 u32 pipe_stats[I915_MAX_PIPES] = {}; 957 u32 eir = 0, eir_stuck = 0; 958 u32 hotplug_status = 0; 959 u32 iir; 960 961 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 962 if (iir == 0) 963 break; 964 965 ret = IRQ_HANDLED; 966 967 if (HAS_HOTPLUG(display) && 968 iir & I915_DISPLAY_PORT_INTERRUPT) 969 hotplug_status = i9xx_hpd_irq_ack(display); 970 971 /* Call regardless, as some status bits might not be 972 * signalled in IIR */ 973 i9xx_pipestat_irq_ack(display, iir, pipe_stats); 974 975 if (iir & I915_MASTER_ERROR_INTERRUPT) 976 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 977 978 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 979 980 if (iir & I915_USER_INTERRUPT) 981 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 982 983 if (iir & I915_MASTER_ERROR_INTERRUPT) 984 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 985 986 if (hotplug_status) 987 i9xx_hpd_irq_handler(display, hotplug_status); 988 989 i915_pipestat_irq_handler(display, iir, pipe_stats); 990 } while (0); 991 992 pmu_irq_stats(dev_priv, ret); 993 994 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 995 996 return ret; 997 } 998 999 static void i965_irq_reset(struct drm_i915_private *dev_priv) 1000 { 1001 struct intel_display *display = dev_priv->display; 1002 struct intel_uncore *uncore = &dev_priv->uncore; 1003 1004 i9xx_display_irq_reset(display); 1005 1006 gen2_error_reset(uncore, GEN2_ERROR_REGS); 1007 gen2_irq_reset(uncore, GEN2_IRQ_REGS); 1008 dev_priv->irq_mask = ~0u; 1009 } 1010 1011 static u32 i965_error_mask(struct drm_i915_private *i915) 1012 { 1013 /* 1014 * Enable some error detection, note the instruction error mask 1015 * bit is reserved, so we leave it masked. 1016 * 1017 * i965 FBC no longer generates spurious GTT errors, 1018 * so we can always enable the page table errors. 1019 */ 1020 if (IS_G4X(i915)) 1021 return GM45_ERROR_PAGE_TABLE | 1022 GM45_ERROR_MEM_PRIV | 1023 GM45_ERROR_CP_PRIV | 1024 I915_ERROR_MEMORY_REFRESH; 1025 else 1026 return I915_ERROR_PAGE_TABLE | 1027 I915_ERROR_MEMORY_REFRESH; 1028 } 1029 1030 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 1031 { 1032 struct intel_display *display = dev_priv->display; 1033 struct intel_uncore *uncore = &dev_priv->uncore; 1034 u32 enable_mask; 1035 1036 gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv)); 1037 1038 dev_priv->irq_mask = 1039 ~(I915_ASLE_INTERRUPT | 1040 I915_DISPLAY_PORT_INTERRUPT | 1041 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1042 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1043 I915_MASTER_ERROR_INTERRUPT); 1044 1045 enable_mask = 1046 I915_ASLE_INTERRUPT | 1047 I915_DISPLAY_PORT_INTERRUPT | 1048 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1049 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1050 I915_MASTER_ERROR_INTERRUPT | 1051 I915_USER_INTERRUPT; 1052 1053 if (IS_G4X(dev_priv)) 1054 enable_mask |= I915_BSD_USER_INTERRUPT; 1055 1056 gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); 1057 1058 i965_display_irq_postinstall(display); 1059 } 1060 1061 static irqreturn_t i965_irq_handler(int irq, void *arg) 1062 { 1063 struct drm_i915_private *dev_priv = arg; 1064 struct intel_display *display = dev_priv->display; 1065 irqreturn_t ret = IRQ_NONE; 1066 1067 if (!intel_irqs_enabled(dev_priv)) 1068 return IRQ_NONE; 1069 1070 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1071 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1072 1073 do { 1074 u32 pipe_stats[I915_MAX_PIPES] = {}; 1075 u32 eir = 0, eir_stuck = 0; 1076 u32 hotplug_status = 0; 1077 u32 iir; 1078 1079 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 1080 if (iir == 0) 1081 break; 1082 1083 ret = IRQ_HANDLED; 1084 1085 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1086 hotplug_status = i9xx_hpd_irq_ack(display); 1087 1088 /* Call regardless, as some status bits might not be 1089 * signalled in IIR */ 1090 i9xx_pipestat_irq_ack(display, iir, pipe_stats); 1091 1092 if (iir & I915_MASTER_ERROR_INTERRUPT) 1093 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 1094 1095 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 1096 1097 if (iir & I915_USER_INTERRUPT) 1098 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], 1099 iir); 1100 1101 if (iir & I915_BSD_USER_INTERRUPT) 1102 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0], 1103 iir >> 25); 1104 1105 if (iir & I915_MASTER_ERROR_INTERRUPT) 1106 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 1107 1108 if (hotplug_status) 1109 i9xx_hpd_irq_handler(display, hotplug_status); 1110 1111 i965_pipestat_irq_handler(display, iir, pipe_stats); 1112 } while (0); 1113 1114 pmu_irq_stats(dev_priv, IRQ_HANDLED); 1115 1116 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1117 1118 return ret; 1119 } 1120 1121 /** 1122 * intel_irq_init - initializes irq support 1123 * @dev_priv: i915 device instance 1124 * 1125 * This function initializes all the irq support including work items, timers 1126 * and all the vtables. It does not setup the interrupt itself though. 1127 */ 1128 void intel_irq_init(struct drm_i915_private *dev_priv) 1129 { 1130 int i; 1131 1132 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 1133 for (i = 0; i < MAX_L3_SLICES; ++i) 1134 dev_priv->l3_parity.remap_info[i] = NULL; 1135 1136 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 1137 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) 1138 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; 1139 } 1140 1141 /** 1142 * intel_irq_fini - deinitializes IRQ support 1143 * @i915: i915 device instance 1144 * 1145 * This function deinitializes all the IRQ support. 1146 */ 1147 void intel_irq_fini(struct drm_i915_private *i915) 1148 { 1149 int i; 1150 1151 for (i = 0; i < MAX_L3_SLICES; ++i) 1152 kfree(i915->l3_parity.remap_info[i]); 1153 } 1154 1155 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 1156 { 1157 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 1158 return dg1_irq_handler; 1159 else if (GRAPHICS_VER(dev_priv) >= 11) 1160 return gen11_irq_handler; 1161 else if (IS_CHERRYVIEW(dev_priv)) 1162 return cherryview_irq_handler; 1163 else if (GRAPHICS_VER(dev_priv) >= 8) 1164 return gen8_irq_handler; 1165 else if (IS_VALLEYVIEW(dev_priv)) 1166 return valleyview_irq_handler; 1167 else if (GRAPHICS_VER(dev_priv) >= 5) 1168 return ilk_irq_handler; 1169 else if (GRAPHICS_VER(dev_priv) == 4) 1170 return i965_irq_handler; 1171 else 1172 return i915_irq_handler; 1173 } 1174 1175 static void intel_irq_reset(struct drm_i915_private *dev_priv) 1176 { 1177 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 1178 dg1_irq_reset(dev_priv); 1179 else if (GRAPHICS_VER(dev_priv) >= 11) 1180 gen11_irq_reset(dev_priv); 1181 else if (IS_CHERRYVIEW(dev_priv)) 1182 cherryview_irq_reset(dev_priv); 1183 else if (GRAPHICS_VER(dev_priv) >= 8) 1184 gen8_irq_reset(dev_priv); 1185 else if (IS_VALLEYVIEW(dev_priv)) 1186 valleyview_irq_reset(dev_priv); 1187 else if (GRAPHICS_VER(dev_priv) >= 5) 1188 ilk_irq_reset(dev_priv); 1189 else if (GRAPHICS_VER(dev_priv) == 4) 1190 i965_irq_reset(dev_priv); 1191 else 1192 i915_irq_reset(dev_priv); 1193 } 1194 1195 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 1196 { 1197 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 1198 dg1_irq_postinstall(dev_priv); 1199 else if (GRAPHICS_VER(dev_priv) >= 11) 1200 gen11_irq_postinstall(dev_priv); 1201 else if (IS_CHERRYVIEW(dev_priv)) 1202 cherryview_irq_postinstall(dev_priv); 1203 else if (GRAPHICS_VER(dev_priv) >= 8) 1204 gen8_irq_postinstall(dev_priv); 1205 else if (IS_VALLEYVIEW(dev_priv)) 1206 valleyview_irq_postinstall(dev_priv); 1207 else if (GRAPHICS_VER(dev_priv) >= 5) 1208 ilk_irq_postinstall(dev_priv); 1209 else if (GRAPHICS_VER(dev_priv) == 4) 1210 i965_irq_postinstall(dev_priv); 1211 else 1212 i915_irq_postinstall(dev_priv); 1213 } 1214 1215 /** 1216 * intel_irq_install - enables the hardware interrupt 1217 * @dev_priv: i915 device instance 1218 * 1219 * This function enables the hardware interrupt handling, but leaves the hotplug 1220 * handling still disabled. It is called after intel_irq_init(). 1221 * 1222 * In the driver load and resume code we need working interrupts in a few places 1223 * but don't want to deal with the hassle of concurrent probe and hotplug 1224 * workers. Hence the split into this two-stage approach. 1225 */ 1226 int intel_irq_install(struct drm_i915_private *dev_priv) 1227 { 1228 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 1229 int ret; 1230 1231 /* 1232 * We enable some interrupt sources in our postinstall hooks, so mark 1233 * interrupts as enabled _before_ actually enabling them to avoid 1234 * special cases in our ordering checks. 1235 */ 1236 dev_priv->irqs_enabled = true; 1237 1238 intel_irq_reset(dev_priv); 1239 1240 ret = request_irq(irq, intel_irq_handler(dev_priv), 1241 IRQF_SHARED, DRIVER_NAME, dev_priv); 1242 if (ret < 0) { 1243 dev_priv->irqs_enabled = false; 1244 return ret; 1245 } 1246 1247 intel_irq_postinstall(dev_priv); 1248 1249 return ret; 1250 } 1251 1252 /** 1253 * intel_irq_uninstall - finalizes all irq handling 1254 * @dev_priv: i915 device instance 1255 * 1256 * This stops interrupt and hotplug handling and unregisters and frees all 1257 * resources acquired in the init functions. 1258 */ 1259 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 1260 { 1261 struct intel_display *display = dev_priv->display; 1262 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 1263 1264 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled)) 1265 return; 1266 1267 intel_irq_reset(dev_priv); 1268 1269 free_irq(irq, dev_priv); 1270 1271 intel_hpd_cancel_work(display); 1272 dev_priv->irqs_enabled = false; 1273 } 1274 1275 /** 1276 * intel_irq_suspend - Suspend interrupts 1277 * @i915: i915 device instance 1278 * 1279 * This function is used to disable interrupts at runtime. 1280 */ 1281 void intel_irq_suspend(struct drm_i915_private *i915) 1282 { 1283 intel_irq_reset(i915); 1284 i915->irqs_enabled = false; 1285 intel_synchronize_irq(i915); 1286 } 1287 1288 /** 1289 * intel_irq_resume - Resume interrupts 1290 * @i915: i915 device instance 1291 * 1292 * This function is used to enable interrupts at runtime. 1293 */ 1294 void intel_irq_resume(struct drm_i915_private *i915) 1295 { 1296 i915->irqs_enabled = true; 1297 intel_irq_reset(i915); 1298 intel_irq_postinstall(i915); 1299 } 1300 1301 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 1302 { 1303 return dev_priv->irqs_enabled; 1304 } 1305 1306 void intel_synchronize_irq(struct drm_i915_private *i915) 1307 { 1308 synchronize_irq(to_pci_dev(i915->drm.dev)->irq); 1309 } 1310 1311 void intel_synchronize_hardirq(struct drm_i915_private *i915) 1312 { 1313 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); 1314 } 1315