xref: /linux/drivers/gpu/drm/i915/i915_irq.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33 
34 #include <drm/drm_drv.h>
35 #include <drm/drm_print.h>
36 
37 #include "display/intel_display_irq.h"
38 #include "display/intel_hotplug.h"
39 #include "display/intel_hotplug_irq.h"
40 #include "display/intel_lpe_audio.h"
41 
42 #include "gt/intel_breadcrumbs.h"
43 #include "gt/intel_gt.h"
44 #include "gt/intel_gt_irq.h"
45 #include "gt/intel_gt_pm_irq.h"
46 #include "gt/intel_gt_regs.h"
47 #include "gt/intel_rps.h"
48 
49 #include "i915_driver.h"
50 #include "i915_drv.h"
51 #include "i915_irq.h"
52 #include "i915_reg.h"
53 
54 /**
55  * DOC: interrupt handling
56  *
57  * These functions provide the basic support for enabling and disabling the
58  * interrupt handling support. There's a lot more functionality in i915_irq.c
59  * and related files, but that will be described in separate chapters.
60  */
61 
62 /*
63  * Interrupt statistic for PMU. Increments the counter only if the
64  * interrupt originated from the GPU so interrupts from a device which
65  * shares the interrupt line are not accounted.
66  */
67 static inline void pmu_irq_stats(struct drm_i915_private *i915,
68 				 irqreturn_t res)
69 {
70 	if (unlikely(res != IRQ_HANDLED))
71 		return;
72 
73 	/*
74 	 * A clever compiler translates that into INC. A not so clever one
75 	 * should at least prevent store tearing.
76 	 */
77 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
78 }
79 
80 void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
81 {
82 	intel_uncore_write(uncore, regs.imr, 0xffffffff);
83 	intel_uncore_posting_read(uncore, regs.imr);
84 
85 	intel_uncore_write(uncore, regs.ier, 0);
86 
87 	/* IIR can theoretically queue up two events. Be paranoid. */
88 	intel_uncore_write(uncore, regs.iir, 0xffffffff);
89 	intel_uncore_posting_read(uncore, regs.iir);
90 	intel_uncore_write(uncore, regs.iir, 0xffffffff);
91 	intel_uncore_posting_read(uncore, regs.iir);
92 }
93 
94 /*
95  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
96  */
97 void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
98 {
99 	u32 val = intel_uncore_read(uncore, reg);
100 
101 	if (val == 0)
102 		return;
103 
104 	drm_WARN(&uncore->i915->drm, 1,
105 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
106 		 i915_mmio_reg_offset(reg), val);
107 	intel_uncore_write(uncore, reg, 0xffffffff);
108 	intel_uncore_posting_read(uncore, reg);
109 	intel_uncore_write(uncore, reg, 0xffffffff);
110 	intel_uncore_posting_read(uncore, reg);
111 }
112 
113 void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
114 		   u32 imr_val, u32 ier_val)
115 {
116 	gen2_assert_iir_is_zero(uncore, regs.iir);
117 
118 	intel_uncore_write(uncore, regs.ier, ier_val);
119 	intel_uncore_write(uncore, regs.imr, imr_val);
120 	intel_uncore_posting_read(uncore, regs.imr);
121 }
122 
123 void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
124 {
125 	intel_uncore_write(uncore, regs.emr, 0xffffffff);
126 	intel_uncore_posting_read(uncore, regs.emr);
127 
128 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
129 	intel_uncore_posting_read(uncore, regs.eir);
130 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
131 	intel_uncore_posting_read(uncore, regs.eir);
132 }
133 
134 void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
135 		     u32 emr_val)
136 {
137 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
138 	intel_uncore_posting_read(uncore, regs.eir);
139 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
140 	intel_uncore_posting_read(uncore, regs.eir);
141 
142 	intel_uncore_write(uncore, regs.emr, emr_val);
143 	intel_uncore_posting_read(uncore, regs.emr);
144 }
145 
146 /**
147  * ivb_parity_work - Workqueue called when a parity error interrupt
148  * occurred.
149  * @work: workqueue struct
150  *
151  * Doesn't actually do anything except notify userspace. As a consequence of
152  * this event, userspace should try to remap the bad rows since statistically
153  * it is likely the same row is more likely to go bad again.
154  */
155 static void ivb_parity_work(struct work_struct *work)
156 {
157 	struct drm_i915_private *dev_priv =
158 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
159 	struct intel_gt *gt = to_gt(dev_priv);
160 	u32 error_status, row, bank, subbank;
161 	char *parity_event[6];
162 	u32 misccpctl;
163 	u8 slice = 0;
164 
165 
166 	/* If we've screwed up tracking, just let the interrupt fire again */
167 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
168 		goto out;
169 
170 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
171 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
172 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
173 
174 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
175 		i915_reg_t reg;
176 
177 		slice--;
178 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
179 				     slice >= NUM_L3_SLICES(dev_priv)))
180 			break;
181 
182 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
183 
184 		reg = GEN7_L3CDERRST1(slice);
185 
186 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
187 		row = GEN7_PARITY_ERROR_ROW(error_status);
188 		bank = GEN7_PARITY_ERROR_BANK(error_status);
189 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
190 
191 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
192 		intel_uncore_posting_read(&dev_priv->uncore, reg);
193 
194 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
195 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
196 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
197 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
198 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
199 		parity_event[5] = NULL;
200 
201 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
202 				   KOBJ_CHANGE, parity_event);
203 
204 		drm_dbg(&dev_priv->drm,
205 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
206 			slice, row, bank, subbank);
207 
208 		kfree(parity_event[4]);
209 		kfree(parity_event[3]);
210 		kfree(parity_event[2]);
211 		kfree(parity_event[1]);
212 	}
213 
214 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
215 
216 out:
217 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
218 	spin_lock_irq(gt->irq_lock);
219 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
220 	spin_unlock_irq(gt->irq_lock);
221 
222 }
223 
224 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
225 {
226 	struct drm_i915_private *dev_priv = arg;
227 	struct intel_display *display = dev_priv->display;
228 	irqreturn_t ret = IRQ_NONE;
229 
230 	if (!intel_irqs_enabled(dev_priv))
231 		return IRQ_NONE;
232 
233 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
234 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
235 
236 	do {
237 		u32 iir, gt_iir, pm_iir;
238 		u32 eir = 0, dpinvgtt = 0;
239 		u32 pipe_stats[I915_MAX_PIPES] = {};
240 		u32 hotplug_status = 0;
241 		u32 ier = 0;
242 
243 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
244 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
245 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
246 
247 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
248 			break;
249 
250 		ret = IRQ_HANDLED;
251 
252 		/*
253 		 * Theory on interrupt generation, based on empirical evidence:
254 		 *
255 		 * x = ((VLV_IIR & VLV_IER) ||
256 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
257 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
258 		 *
259 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
260 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
261 		 * guarantee the CPU interrupt will be raised again even if we
262 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
263 		 * bits this time around.
264 		 */
265 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
266 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
267 
268 		if (gt_iir)
269 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
270 		if (pm_iir)
271 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
272 
273 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
274 			hotplug_status = i9xx_hpd_irq_ack(display);
275 
276 		if (iir & I915_MASTER_ERROR_INTERRUPT)
277 			vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
278 
279 		/* Call regardless, as some status bits might not be
280 		 * signalled in IIR */
281 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
282 
283 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
284 			   I915_LPE_PIPE_B_INTERRUPT))
285 			intel_lpe_audio_irq_handler(display);
286 
287 		/*
288 		 * VLV_IIR is single buffered, and reflects the level
289 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
290 		 */
291 		if (iir)
292 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
293 
294 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
295 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
296 
297 		if (gt_iir)
298 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
299 		if (pm_iir)
300 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
301 
302 		if (hotplug_status)
303 			i9xx_hpd_irq_handler(display, hotplug_status);
304 
305 		if (iir & I915_MASTER_ERROR_INTERRUPT)
306 			vlv_display_error_irq_handler(display, eir, dpinvgtt);
307 
308 		valleyview_pipestat_irq_handler(display, pipe_stats);
309 	} while (0);
310 
311 	pmu_irq_stats(dev_priv, ret);
312 
313 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
314 
315 	return ret;
316 }
317 
318 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
319 {
320 	struct drm_i915_private *dev_priv = arg;
321 	struct intel_display *display = dev_priv->display;
322 	irqreturn_t ret = IRQ_NONE;
323 
324 	if (!intel_irqs_enabled(dev_priv))
325 		return IRQ_NONE;
326 
327 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
328 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
329 
330 	do {
331 		u32 master_ctl, iir;
332 		u32 eir = 0, dpinvgtt = 0;
333 		u32 pipe_stats[I915_MAX_PIPES] = {};
334 		u32 hotplug_status = 0;
335 		u32 ier = 0;
336 
337 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
338 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
339 
340 		if (master_ctl == 0 && iir == 0)
341 			break;
342 
343 		ret = IRQ_HANDLED;
344 
345 		/*
346 		 * Theory on interrupt generation, based on empirical evidence:
347 		 *
348 		 * x = ((VLV_IIR & VLV_IER) ||
349 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
350 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
351 		 *
352 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
353 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
354 		 * guarantee the CPU interrupt will be raised again even if we
355 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
356 		 * bits this time around.
357 		 */
358 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
359 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
360 
361 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
362 
363 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
364 			hotplug_status = i9xx_hpd_irq_ack(display);
365 
366 		if (iir & I915_MASTER_ERROR_INTERRUPT)
367 			vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
368 
369 		/* Call regardless, as some status bits might not be
370 		 * signalled in IIR */
371 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
372 
373 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
374 			   I915_LPE_PIPE_B_INTERRUPT |
375 			   I915_LPE_PIPE_C_INTERRUPT))
376 			intel_lpe_audio_irq_handler(display);
377 
378 		/*
379 		 * VLV_IIR is single buffered, and reflects the level
380 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
381 		 */
382 		if (iir)
383 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
384 
385 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
386 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
387 
388 		if (hotplug_status)
389 			i9xx_hpd_irq_handler(display, hotplug_status);
390 
391 		if (iir & I915_MASTER_ERROR_INTERRUPT)
392 			vlv_display_error_irq_handler(display, eir, dpinvgtt);
393 
394 		valleyview_pipestat_irq_handler(display, pipe_stats);
395 	} while (0);
396 
397 	pmu_irq_stats(dev_priv, ret);
398 
399 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
400 
401 	return ret;
402 }
403 
404 /*
405  * To handle irqs with the minimum potential races with fresh interrupts, we:
406  * 1 - Disable Master Interrupt Control.
407  * 2 - Find the source(s) of the interrupt.
408  * 3 - Clear the Interrupt Identity bits (IIR).
409  * 4 - Process the interrupt(s) that had bits set in the IIRs.
410  * 5 - Re-enable Master Interrupt Control.
411  */
412 static irqreturn_t ilk_irq_handler(int irq, void *arg)
413 {
414 	struct drm_i915_private *i915 = arg;
415 	struct intel_display *display = i915->display;
416 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
417 	u32 gt_iir, de_ier = 0, sde_ier = 0;
418 	irqreturn_t ret = IRQ_NONE;
419 
420 	if (unlikely(!intel_irqs_enabled(i915)))
421 		return IRQ_NONE;
422 
423 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
424 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
425 
426 	/* Disable master and south interrupts */
427 	ilk_display_irq_master_disable(display, &de_ier, &sde_ier);
428 
429 	/* Find, clear, then process each source of interrupt */
430 
431 	gt_iir = raw_reg_read(regs, GTIIR);
432 	if (gt_iir) {
433 		raw_reg_write(regs, GTIIR, gt_iir);
434 		if (GRAPHICS_VER(i915) >= 6)
435 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
436 		else
437 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
438 		ret = IRQ_HANDLED;
439 	}
440 
441 	if (ilk_display_irq_handler(display))
442 		ret = IRQ_HANDLED;
443 
444 	if (GRAPHICS_VER(i915) >= 6) {
445 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
446 		if (pm_iir) {
447 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
448 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
449 			ret = IRQ_HANDLED;
450 		}
451 	}
452 
453 	/* Re-enable master and south interrupts */
454 	ilk_display_irq_master_enable(display, de_ier, sde_ier);
455 
456 	pmu_irq_stats(i915, ret);
457 
458 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
459 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
460 
461 	return ret;
462 }
463 
464 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
465 {
466 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
467 
468 	/*
469 	 * Now with master disabled, get a sample of level indications
470 	 * for this interrupt. Indications will be cleared on related acks.
471 	 * New indications can and will light up during processing,
472 	 * and will generate new interrupt after enabling master.
473 	 */
474 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
475 }
476 
477 static inline void gen8_master_intr_enable(void __iomem * const regs)
478 {
479 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
480 }
481 
482 static irqreturn_t gen8_irq_handler(int irq, void *arg)
483 {
484 	struct drm_i915_private *dev_priv = arg;
485 	struct intel_display *display = dev_priv->display;
486 	void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
487 	u32 master_ctl;
488 
489 	if (!intel_irqs_enabled(dev_priv))
490 		return IRQ_NONE;
491 
492 	master_ctl = gen8_master_intr_disable(regs);
493 	if (!master_ctl) {
494 		gen8_master_intr_enable(regs);
495 		return IRQ_NONE;
496 	}
497 
498 	/* Find, queue (onto bottom-halves), then clear each source */
499 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
500 
501 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
502 	if (master_ctl & ~GEN8_GT_IRQS) {
503 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
504 		gen8_de_irq_handler(display, master_ctl);
505 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
506 	}
507 
508 	gen8_master_intr_enable(regs);
509 
510 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
511 
512 	return IRQ_HANDLED;
513 }
514 
515 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
516 {
517 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
518 
519 	/*
520 	 * Now with master disabled, get a sample of level indications
521 	 * for this interrupt. Indications will be cleared on related acks.
522 	 * New indications can and will light up during processing,
523 	 * and will generate new interrupt after enabling master.
524 	 */
525 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
526 }
527 
528 static inline void gen11_master_intr_enable(void __iomem * const regs)
529 {
530 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
531 }
532 
533 static irqreturn_t gen11_irq_handler(int irq, void *arg)
534 {
535 	struct drm_i915_private *i915 = arg;
536 	struct intel_display *display = i915->display;
537 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
538 	struct intel_gt *gt = to_gt(i915);
539 	u32 master_ctl;
540 	u32 gu_misc_iir;
541 
542 	if (!intel_irqs_enabled(i915))
543 		return IRQ_NONE;
544 
545 	master_ctl = gen11_master_intr_disable(regs);
546 	if (!master_ctl) {
547 		gen11_master_intr_enable(regs);
548 		return IRQ_NONE;
549 	}
550 
551 	/* Find, queue (onto bottom-halves), then clear each source */
552 	gen11_gt_irq_handler(gt, master_ctl);
553 
554 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
555 	if (master_ctl & GEN11_DISPLAY_IRQ)
556 		gen11_display_irq_handler(display);
557 
558 	gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
559 
560 	gen11_master_intr_enable(regs);
561 
562 	gen11_gu_misc_irq_handler(display, gu_misc_iir);
563 
564 	pmu_irq_stats(i915, IRQ_HANDLED);
565 
566 	return IRQ_HANDLED;
567 }
568 
569 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
570 {
571 	u32 val;
572 
573 	/* First disable interrupts */
574 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
575 
576 	/* Get the indication levels and ack the master unit */
577 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
578 	if (unlikely(!val))
579 		return 0;
580 
581 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
582 
583 	return val;
584 }
585 
586 static inline void dg1_master_intr_enable(void __iomem * const regs)
587 {
588 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
589 }
590 
591 static irqreturn_t dg1_irq_handler(int irq, void *arg)
592 {
593 	struct drm_i915_private * const i915 = arg;
594 	struct intel_display *display = i915->display;
595 	struct intel_gt *gt = to_gt(i915);
596 	void __iomem * const regs = intel_uncore_regs(gt->uncore);
597 	u32 master_tile_ctl, master_ctl;
598 	u32 gu_misc_iir;
599 
600 	if (!intel_irqs_enabled(i915))
601 		return IRQ_NONE;
602 
603 	master_tile_ctl = dg1_master_intr_disable(regs);
604 	if (!master_tile_ctl) {
605 		dg1_master_intr_enable(regs);
606 		return IRQ_NONE;
607 	}
608 
609 	/* FIXME: we only support tile 0 for now. */
610 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
611 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
612 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
613 	} else {
614 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
615 			master_tile_ctl);
616 		dg1_master_intr_enable(regs);
617 		return IRQ_NONE;
618 	}
619 
620 	gen11_gt_irq_handler(gt, master_ctl);
621 
622 	if (master_ctl & GEN11_DISPLAY_IRQ)
623 		gen11_display_irq_handler(display);
624 
625 	gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
626 
627 	dg1_master_intr_enable(regs);
628 
629 	gen11_gu_misc_irq_handler(display, gu_misc_iir);
630 
631 	pmu_irq_stats(i915, IRQ_HANDLED);
632 
633 	return IRQ_HANDLED;
634 }
635 
636 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
637 {
638 	struct intel_display *display = dev_priv->display;
639 
640 	/* The master interrupt enable is in DEIER, reset display irq first */
641 	ilk_display_irq_reset(display);
642 	gen5_gt_irq_reset(to_gt(dev_priv));
643 }
644 
645 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
646 {
647 	struct intel_display *display = dev_priv->display;
648 
649 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
650 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
651 
652 	gen5_gt_irq_reset(to_gt(dev_priv));
653 
654 	vlv_display_irq_reset(display);
655 }
656 
657 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
658 {
659 	struct intel_display *display = dev_priv->display;
660 	struct intel_uncore *uncore = &dev_priv->uncore;
661 
662 	gen8_master_intr_disable(intel_uncore_regs(uncore));
663 
664 	gen8_gt_irq_reset(to_gt(dev_priv));
665 	gen8_display_irq_reset(display);
666 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
667 }
668 
669 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
670 {
671 	struct intel_display *display = dev_priv->display;
672 	struct intel_gt *gt = to_gt(dev_priv);
673 	struct intel_uncore *uncore = gt->uncore;
674 
675 	gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
676 
677 	gen11_gt_irq_reset(gt);
678 	gen11_display_irq_reset(display);
679 
680 	gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
681 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
682 }
683 
684 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
685 {
686 	struct intel_display *display = dev_priv->display;
687 	struct intel_uncore *uncore = &dev_priv->uncore;
688 	struct intel_gt *gt;
689 	unsigned int i;
690 
691 	dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
692 
693 	for_each_gt(gt, dev_priv, i)
694 		gen11_gt_irq_reset(gt);
695 
696 	gen11_display_irq_reset(display);
697 
698 	gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
699 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
700 
701 	intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0);
702 }
703 
704 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
705 {
706 	struct intel_display *display = dev_priv->display;
707 	struct intel_uncore *uncore = &dev_priv->uncore;
708 
709 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
710 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
711 
712 	gen8_gt_irq_reset(to_gt(dev_priv));
713 
714 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
715 
716 	vlv_display_irq_reset(display);
717 }
718 
719 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
720 {
721 	struct intel_display *display = dev_priv->display;
722 
723 	gen5_gt_irq_postinstall(to_gt(dev_priv));
724 
725 	ilk_de_irq_postinstall(display);
726 }
727 
728 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
729 {
730 	struct intel_display *display = dev_priv->display;
731 
732 	gen5_gt_irq_postinstall(to_gt(dev_priv));
733 
734 	vlv_display_irq_postinstall(display);
735 
736 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
737 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
738 }
739 
740 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
741 {
742 	struct intel_display *display = dev_priv->display;
743 
744 	gen8_gt_irq_postinstall(to_gt(dev_priv));
745 	gen8_de_irq_postinstall(display);
746 
747 	gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
748 }
749 
750 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
751 {
752 	struct intel_display *display = dev_priv->display;
753 	struct intel_gt *gt = to_gt(dev_priv);
754 	struct intel_uncore *uncore = gt->uncore;
755 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
756 
757 	gen11_gt_irq_postinstall(gt);
758 	gen11_de_irq_postinstall(display);
759 
760 	gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
761 
762 	gen11_master_intr_enable(intel_uncore_regs(uncore));
763 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
764 }
765 
766 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
767 {
768 	struct intel_display *display = dev_priv->display;
769 	struct intel_uncore *uncore = &dev_priv->uncore;
770 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
771 	struct intel_gt *gt;
772 	unsigned int i;
773 
774 	for_each_gt(gt, dev_priv, i)
775 		gen11_gt_irq_postinstall(gt);
776 
777 	gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
778 
779 	dg1_de_irq_postinstall(display);
780 
781 	dg1_master_intr_enable(intel_uncore_regs(uncore));
782 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
783 }
784 
785 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
786 {
787 	struct intel_display *display = dev_priv->display;
788 
789 	gen8_gt_irq_postinstall(to_gt(dev_priv));
790 
791 	vlv_display_irq_postinstall(display);
792 
793 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
794 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
795 }
796 
797 #define I9XX_HAS_FBC(i915) (IS_I85X(i915) || IS_I865G(i915) || IS_I915GM(i915) || IS_I945GM(i915))
798 
799 static u32 i9xx_error_mask(struct drm_i915_private *i915)
800 {
801 	/*
802 	 * On gen2/3 FBC generates (seemingly spurious)
803 	 * display INVALID_GTT/INVALID_GTT_PTE table errors.
804 	 *
805 	 * Also gen3 bspec has this to say:
806 	 * "DISPA_INVALID_GTT_PTE
807 	 "  [DevNapa] : Reserved. This bit does not reflect the page
808 	 "              table error for the display plane A."
809 	 *
810 	 * Unfortunately we can't mask off individual PGTBL_ER bits,
811 	 * so we just have to mask off all page table errors via EMR.
812 	 */
813 	if (I9XX_HAS_FBC(i915))
814 		return I915_ERROR_MEMORY_REFRESH;
815 	else
816 		return I915_ERROR_PAGE_TABLE |
817 			I915_ERROR_MEMORY_REFRESH;
818 }
819 
820 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
821 			       u32 *eir, u32 *eir_stuck)
822 {
823 	u32 emr;
824 
825 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
826 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
827 
828 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
829 	if (*eir_stuck == 0)
830 		return;
831 
832 	/*
833 	 * Toggle all EMR bits to make sure we get an edge
834 	 * in the ISR master error bit if we don't clear
835 	 * all the EIR bits. Otherwise the edge triggered
836 	 * IIR on i965/g4x wouldn't notice that an interrupt
837 	 * is still pending. Also some EIR bits can't be
838 	 * cleared except by handling the underlying error
839 	 * (or by a GPU reset) so we mask any bit that
840 	 * remains set.
841 	 */
842 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
843 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
844 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
845 }
846 
847 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
848 				   u32 eir, u32 eir_stuck)
849 {
850 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
851 
852 	if (eir_stuck)
853 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
854 			eir_stuck);
855 
856 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
857 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
858 }
859 
860 static void i915_irq_reset(struct drm_i915_private *dev_priv)
861 {
862 	struct intel_display *display = dev_priv->display;
863 	struct intel_uncore *uncore = &dev_priv->uncore;
864 
865 	i9xx_display_irq_reset(display);
866 
867 	gen2_error_reset(uncore, GEN2_ERROR_REGS);
868 	gen2_irq_reset(uncore, GEN2_IRQ_REGS);
869 	dev_priv->gen2_imr_mask = ~0u;
870 }
871 
872 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
873 {
874 	struct intel_display *display = dev_priv->display;
875 	struct intel_uncore *uncore = &dev_priv->uncore;
876 	u32 enable_mask;
877 
878 	gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
879 
880 	enable_mask = i9xx_display_irq_enable_mask(display) |
881 		I915_MASTER_ERROR_INTERRUPT;
882 
883 	dev_priv->gen2_imr_mask = ~enable_mask;
884 
885 	enable_mask |= I915_USER_INTERRUPT;
886 
887 	gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
888 
889 	i915_display_irq_postinstall(display);
890 }
891 
892 static irqreturn_t i915_irq_handler(int irq, void *arg)
893 {
894 	struct drm_i915_private *dev_priv = arg;
895 	struct intel_display *display = dev_priv->display;
896 	irqreturn_t ret = IRQ_NONE;
897 
898 	if (!intel_irqs_enabled(dev_priv))
899 		return IRQ_NONE;
900 
901 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
902 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
903 
904 	do {
905 		u32 pipe_stats[I915_MAX_PIPES] = {};
906 		u32 eir = 0, eir_stuck = 0;
907 		u32 hotplug_status = 0;
908 		u32 iir;
909 
910 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
911 		if (iir == 0)
912 			break;
913 
914 		ret = IRQ_HANDLED;
915 
916 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
917 			hotplug_status = i9xx_hpd_irq_ack(display);
918 
919 		/* Call regardless, as some status bits might not be
920 		 * signalled in IIR */
921 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
922 
923 		if (iir & I915_MASTER_ERROR_INTERRUPT)
924 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
925 
926 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
927 
928 		if (iir & I915_USER_INTERRUPT)
929 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
930 
931 		if (iir & I915_MASTER_ERROR_INTERRUPT)
932 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
933 
934 		if (hotplug_status)
935 			i9xx_hpd_irq_handler(display, hotplug_status);
936 
937 		i915_pipestat_irq_handler(display, iir, pipe_stats);
938 	} while (0);
939 
940 	pmu_irq_stats(dev_priv, ret);
941 
942 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
943 
944 	return ret;
945 }
946 
947 static void i965_irq_reset(struct drm_i915_private *dev_priv)
948 {
949 	struct intel_display *display = dev_priv->display;
950 	struct intel_uncore *uncore = &dev_priv->uncore;
951 
952 	i9xx_display_irq_reset(display);
953 
954 	gen2_error_reset(uncore, GEN2_ERROR_REGS);
955 	gen2_irq_reset(uncore, GEN2_IRQ_REGS);
956 	dev_priv->gen2_imr_mask = ~0u;
957 }
958 
959 static u32 i965_error_mask(struct drm_i915_private *i915)
960 {
961 	/*
962 	 * Enable some error detection, note the instruction error mask
963 	 * bit is reserved, so we leave it masked.
964 	 *
965 	 * i965 FBC no longer generates spurious GTT errors,
966 	 * so we can always enable the page table errors.
967 	 */
968 	if (IS_G4X(i915))
969 		return GM45_ERROR_PAGE_TABLE |
970 			GM45_ERROR_MEM_PRIV |
971 			GM45_ERROR_CP_PRIV |
972 			I915_ERROR_MEMORY_REFRESH;
973 	else
974 		return I915_ERROR_PAGE_TABLE |
975 			I915_ERROR_MEMORY_REFRESH;
976 }
977 
978 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
979 {
980 	struct intel_display *display = dev_priv->display;
981 	struct intel_uncore *uncore = &dev_priv->uncore;
982 	u32 enable_mask;
983 
984 	gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
985 
986 	enable_mask = i9xx_display_irq_enable_mask(display) |
987 		I915_MASTER_ERROR_INTERRUPT;
988 
989 	dev_priv->gen2_imr_mask = ~enable_mask;
990 
991 	enable_mask |= I915_USER_INTERRUPT;
992 
993 	if (IS_G4X(dev_priv))
994 		enable_mask |= I915_BSD_USER_INTERRUPT;
995 
996 	gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
997 
998 	i965_display_irq_postinstall(display);
999 }
1000 
1001 static irqreturn_t i965_irq_handler(int irq, void *arg)
1002 {
1003 	struct drm_i915_private *dev_priv = arg;
1004 	struct intel_display *display = dev_priv->display;
1005 	irqreturn_t ret = IRQ_NONE;
1006 
1007 	if (!intel_irqs_enabled(dev_priv))
1008 		return IRQ_NONE;
1009 
1010 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1011 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1012 
1013 	do {
1014 		u32 pipe_stats[I915_MAX_PIPES] = {};
1015 		u32 eir = 0, eir_stuck = 0;
1016 		u32 hotplug_status = 0;
1017 		u32 iir;
1018 
1019 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1020 		if (iir == 0)
1021 			break;
1022 
1023 		ret = IRQ_HANDLED;
1024 
1025 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1026 			hotplug_status = i9xx_hpd_irq_ack(display);
1027 
1028 		/* Call regardless, as some status bits might not be
1029 		 * signalled in IIR */
1030 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
1031 
1032 		if (iir & I915_MASTER_ERROR_INTERRUPT)
1033 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
1034 
1035 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1036 
1037 		if (iir & I915_USER_INTERRUPT)
1038 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
1039 					    iir);
1040 
1041 		if (iir & I915_BSD_USER_INTERRUPT)
1042 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
1043 					    iir >> 25);
1044 
1045 		if (iir & I915_MASTER_ERROR_INTERRUPT)
1046 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1047 
1048 		if (hotplug_status)
1049 			i9xx_hpd_irq_handler(display, hotplug_status);
1050 
1051 		i965_pipestat_irq_handler(display, iir, pipe_stats);
1052 	} while (0);
1053 
1054 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
1055 
1056 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1057 
1058 	return ret;
1059 }
1060 
1061 /**
1062  * intel_irq_init - initializes irq support
1063  * @dev_priv: i915 device instance
1064  *
1065  * This function initializes all the irq support including work items, timers
1066  * and all the vtables. It does not setup the interrupt itself though.
1067  */
1068 void intel_irq_init(struct drm_i915_private *dev_priv)
1069 {
1070 	int i;
1071 
1072 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1073 	for (i = 0; i < MAX_L3_SLICES; ++i)
1074 		dev_priv->l3_parity.remap_info[i] = NULL;
1075 
1076 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1077 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
1078 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
1079 }
1080 
1081 /**
1082  * intel_irq_fini - deinitializes IRQ support
1083  * @i915: i915 device instance
1084  *
1085  * This function deinitializes all the IRQ support.
1086  */
1087 void intel_irq_fini(struct drm_i915_private *i915)
1088 {
1089 	int i;
1090 
1091 	for (i = 0; i < MAX_L3_SLICES; ++i)
1092 		kfree(i915->l3_parity.remap_info[i]);
1093 }
1094 
1095 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1096 {
1097 	if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1098 		return dg1_irq_handler;
1099 	else if (GRAPHICS_VER(dev_priv) >= 11)
1100 		return gen11_irq_handler;
1101 	else if (IS_CHERRYVIEW(dev_priv))
1102 		return cherryview_irq_handler;
1103 	else if (GRAPHICS_VER(dev_priv) >= 8)
1104 		return gen8_irq_handler;
1105 	else if (IS_VALLEYVIEW(dev_priv))
1106 		return valleyview_irq_handler;
1107 	else if (GRAPHICS_VER(dev_priv) >= 5)
1108 		return ilk_irq_handler;
1109 	else if (GRAPHICS_VER(dev_priv) == 4)
1110 		return i965_irq_handler;
1111 	else
1112 		return i915_irq_handler;
1113 }
1114 
1115 static void intel_irq_reset(struct drm_i915_private *dev_priv)
1116 {
1117 	if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1118 		dg1_irq_reset(dev_priv);
1119 	else if (GRAPHICS_VER(dev_priv) >= 11)
1120 		gen11_irq_reset(dev_priv);
1121 	else if (IS_CHERRYVIEW(dev_priv))
1122 		cherryview_irq_reset(dev_priv);
1123 	else if (GRAPHICS_VER(dev_priv) >= 8)
1124 		gen8_irq_reset(dev_priv);
1125 	else if (IS_VALLEYVIEW(dev_priv))
1126 		valleyview_irq_reset(dev_priv);
1127 	else if (GRAPHICS_VER(dev_priv) >= 5)
1128 		ilk_irq_reset(dev_priv);
1129 	else if (GRAPHICS_VER(dev_priv) == 4)
1130 		i965_irq_reset(dev_priv);
1131 	else
1132 		i915_irq_reset(dev_priv);
1133 }
1134 
1135 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1136 {
1137 	if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1138 		dg1_irq_postinstall(dev_priv);
1139 	else if (GRAPHICS_VER(dev_priv) >= 11)
1140 		gen11_irq_postinstall(dev_priv);
1141 	else if (IS_CHERRYVIEW(dev_priv))
1142 		cherryview_irq_postinstall(dev_priv);
1143 	else if (GRAPHICS_VER(dev_priv) >= 8)
1144 		gen8_irq_postinstall(dev_priv);
1145 	else if (IS_VALLEYVIEW(dev_priv))
1146 		valleyview_irq_postinstall(dev_priv);
1147 	else if (GRAPHICS_VER(dev_priv) >= 5)
1148 		ilk_irq_postinstall(dev_priv);
1149 	else if (GRAPHICS_VER(dev_priv) == 4)
1150 		i965_irq_postinstall(dev_priv);
1151 	else
1152 		i915_irq_postinstall(dev_priv);
1153 }
1154 
1155 /**
1156  * intel_irq_install - enables the hardware interrupt
1157  * @dev_priv: i915 device instance
1158  *
1159  * This function enables the hardware interrupt handling, but leaves the hotplug
1160  * handling still disabled. It is called after intel_irq_init().
1161  *
1162  * In the driver load and resume code we need working interrupts in a few places
1163  * but don't want to deal with the hassle of concurrent probe and hotplug
1164  * workers. Hence the split into this two-stage approach.
1165  */
1166 int intel_irq_install(struct drm_i915_private *dev_priv)
1167 {
1168 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1169 	int ret;
1170 
1171 	/*
1172 	 * We enable some interrupt sources in our postinstall hooks, so mark
1173 	 * interrupts as enabled _before_ actually enabling them to avoid
1174 	 * special cases in our ordering checks.
1175 	 */
1176 	dev_priv->irqs_enabled = true;
1177 
1178 	intel_irq_reset(dev_priv);
1179 
1180 	ret = request_irq(irq, intel_irq_handler(dev_priv),
1181 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
1182 	if (ret < 0) {
1183 		dev_priv->irqs_enabled = false;
1184 		return ret;
1185 	}
1186 
1187 	intel_irq_postinstall(dev_priv);
1188 
1189 	return ret;
1190 }
1191 
1192 /**
1193  * intel_irq_uninstall - finalizes all irq handling
1194  * @dev_priv: i915 device instance
1195  *
1196  * This stops interrupt and hotplug handling and unregisters and frees all
1197  * resources acquired in the init functions.
1198  */
1199 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
1200 {
1201 	struct intel_display *display = dev_priv->display;
1202 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1203 
1204 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
1205 		return;
1206 
1207 	intel_irq_reset(dev_priv);
1208 
1209 	free_irq(irq, dev_priv);
1210 
1211 	intel_hpd_cancel_work(display);
1212 	dev_priv->irqs_enabled = false;
1213 }
1214 
1215 /**
1216  * intel_irq_suspend - Suspend interrupts
1217  * @i915: i915 device instance
1218  *
1219  * This function is used to disable interrupts at runtime.
1220  */
1221 void intel_irq_suspend(struct drm_i915_private *i915)
1222 {
1223 	intel_irq_reset(i915);
1224 	i915->irqs_enabled = false;
1225 	intel_synchronize_irq(i915);
1226 }
1227 
1228 /**
1229  * intel_irq_resume - Resume interrupts
1230  * @i915: i915 device instance
1231  *
1232  * This function is used to enable interrupts at runtime.
1233  */
1234 void intel_irq_resume(struct drm_i915_private *i915)
1235 {
1236 	i915->irqs_enabled = true;
1237 	intel_irq_reset(i915);
1238 	intel_irq_postinstall(i915);
1239 }
1240 
1241 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1242 {
1243 	return dev_priv->irqs_enabled;
1244 }
1245 
1246 void intel_synchronize_irq(struct drm_i915_private *i915)
1247 {
1248 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1249 }
1250 
1251 void intel_synchronize_hardirq(struct drm_i915_private *i915)
1252 {
1253 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1254 }
1255