xref: /linux/drivers/gpu/drm/i915/i915_irq.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33 
34 #include <drm/drm_drv.h>
35 #include <drm/drm_print.h>
36 
37 #include "display/intel_display_core.h"
38 #include "display/intel_display_irq.h"
39 #include "display/intel_hotplug.h"
40 #include "display/intel_hotplug_irq.h"
41 #include "display/intel_lpe_audio.h"
42 #include "display/intel_psr_regs.h"
43 
44 #include "gt/intel_breadcrumbs.h"
45 #include "gt/intel_gt.h"
46 #include "gt/intel_gt_irq.h"
47 #include "gt/intel_gt_pm_irq.h"
48 #include "gt/intel_gt_regs.h"
49 #include "gt/intel_rps.h"
50 
51 #include "i915_driver.h"
52 #include "i915_drv.h"
53 #include "i915_irq.h"
54 #include "i915_reg.h"
55 
56 /**
57  * DOC: interrupt handling
58  *
59  * These functions provide the basic support for enabling and disabling the
60  * interrupt handling support. There's a lot more functionality in i915_irq.c
61  * and related files, but that will be described in separate chapters.
62  */
63 
64 /*
65  * Interrupt statistic for PMU. Increments the counter only if the
66  * interrupt originated from the GPU so interrupts from a device which
67  * shares the interrupt line are not accounted.
68  */
69 static inline void pmu_irq_stats(struct drm_i915_private *i915,
70 				 irqreturn_t res)
71 {
72 	if (unlikely(res != IRQ_HANDLED))
73 		return;
74 
75 	/*
76 	 * A clever compiler translates that into INC. A not so clever one
77 	 * should at least prevent store tearing.
78 	 */
79 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
80 }
81 
82 void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
83 {
84 	intel_uncore_write(uncore, regs.imr, 0xffffffff);
85 	intel_uncore_posting_read(uncore, regs.imr);
86 
87 	intel_uncore_write(uncore, regs.ier, 0);
88 
89 	/* IIR can theoretically queue up two events. Be paranoid. */
90 	intel_uncore_write(uncore, regs.iir, 0xffffffff);
91 	intel_uncore_posting_read(uncore, regs.iir);
92 	intel_uncore_write(uncore, regs.iir, 0xffffffff);
93 	intel_uncore_posting_read(uncore, regs.iir);
94 }
95 
96 /*
97  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
98  */
99 void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
100 {
101 	u32 val = intel_uncore_read(uncore, reg);
102 
103 	if (val == 0)
104 		return;
105 
106 	drm_WARN(&uncore->i915->drm, 1,
107 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
108 		 i915_mmio_reg_offset(reg), val);
109 	intel_uncore_write(uncore, reg, 0xffffffff);
110 	intel_uncore_posting_read(uncore, reg);
111 	intel_uncore_write(uncore, reg, 0xffffffff);
112 	intel_uncore_posting_read(uncore, reg);
113 }
114 
115 void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
116 		   u32 imr_val, u32 ier_val)
117 {
118 	gen2_assert_iir_is_zero(uncore, regs.iir);
119 
120 	intel_uncore_write(uncore, regs.ier, ier_val);
121 	intel_uncore_write(uncore, regs.imr, imr_val);
122 	intel_uncore_posting_read(uncore, regs.imr);
123 }
124 
125 void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
126 {
127 	intel_uncore_write(uncore, regs.emr, 0xffffffff);
128 	intel_uncore_posting_read(uncore, regs.emr);
129 
130 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
131 	intel_uncore_posting_read(uncore, regs.eir);
132 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
133 	intel_uncore_posting_read(uncore, regs.eir);
134 }
135 
136 void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
137 		     u32 emr_val)
138 {
139 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
140 	intel_uncore_posting_read(uncore, regs.eir);
141 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
142 	intel_uncore_posting_read(uncore, regs.eir);
143 
144 	intel_uncore_write(uncore, regs.emr, emr_val);
145 	intel_uncore_posting_read(uncore, regs.emr);
146 }
147 
148 /**
149  * ivb_parity_work - Workqueue called when a parity error interrupt
150  * occurred.
151  * @work: workqueue struct
152  *
153  * Doesn't actually do anything except notify userspace. As a consequence of
154  * this event, userspace should try to remap the bad rows since statistically
155  * it is likely the same row is more likely to go bad again.
156  */
157 static void ivb_parity_work(struct work_struct *work)
158 {
159 	struct drm_i915_private *dev_priv =
160 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
161 	struct intel_gt *gt = to_gt(dev_priv);
162 	u32 error_status, row, bank, subbank;
163 	char *parity_event[6];
164 	u32 misccpctl;
165 	u8 slice = 0;
166 
167 
168 	/* If we've screwed up tracking, just let the interrupt fire again */
169 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
170 		goto out;
171 
172 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
173 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
174 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
175 
176 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
177 		i915_reg_t reg;
178 
179 		slice--;
180 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
181 				     slice >= NUM_L3_SLICES(dev_priv)))
182 			break;
183 
184 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
185 
186 		reg = GEN7_L3CDERRST1(slice);
187 
188 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
189 		row = GEN7_PARITY_ERROR_ROW(error_status);
190 		bank = GEN7_PARITY_ERROR_BANK(error_status);
191 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
192 
193 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
194 		intel_uncore_posting_read(&dev_priv->uncore, reg);
195 
196 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
197 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
198 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
199 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
200 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
201 		parity_event[5] = NULL;
202 
203 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
204 				   KOBJ_CHANGE, parity_event);
205 
206 		drm_dbg(&dev_priv->drm,
207 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
208 			slice, row, bank, subbank);
209 
210 		kfree(parity_event[4]);
211 		kfree(parity_event[3]);
212 		kfree(parity_event[2]);
213 		kfree(parity_event[1]);
214 	}
215 
216 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
217 
218 out:
219 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
220 	spin_lock_irq(gt->irq_lock);
221 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
222 	spin_unlock_irq(gt->irq_lock);
223 
224 }
225 
226 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
227 {
228 	struct drm_i915_private *dev_priv = arg;
229 	struct intel_display *display = dev_priv->display;
230 	irqreturn_t ret = IRQ_NONE;
231 
232 	if (!intel_irqs_enabled(dev_priv))
233 		return IRQ_NONE;
234 
235 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
236 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
237 
238 	do {
239 		u32 iir, gt_iir, pm_iir;
240 		u32 eir = 0, dpinvgtt = 0;
241 		u32 pipe_stats[I915_MAX_PIPES] = {};
242 		u32 hotplug_status = 0;
243 		u32 ier = 0;
244 
245 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
246 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
247 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
248 
249 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
250 			break;
251 
252 		ret = IRQ_HANDLED;
253 
254 		/*
255 		 * Theory on interrupt generation, based on empirical evidence:
256 		 *
257 		 * x = ((VLV_IIR & VLV_IER) ||
258 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
259 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
260 		 *
261 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
262 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
263 		 * guarantee the CPU interrupt will be raised again even if we
264 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
265 		 * bits this time around.
266 		 */
267 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
268 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
269 
270 		if (gt_iir)
271 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
272 		if (pm_iir)
273 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
274 
275 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
276 			hotplug_status = i9xx_hpd_irq_ack(display);
277 
278 		if (iir & I915_MASTER_ERROR_INTERRUPT)
279 			vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
280 
281 		/* Call regardless, as some status bits might not be
282 		 * signalled in IIR */
283 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
284 
285 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
286 			   I915_LPE_PIPE_B_INTERRUPT))
287 			intel_lpe_audio_irq_handler(display);
288 
289 		/*
290 		 * VLV_IIR is single buffered, and reflects the level
291 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
292 		 */
293 		if (iir)
294 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
295 
296 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
297 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
298 
299 		if (gt_iir)
300 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
301 		if (pm_iir)
302 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
303 
304 		if (hotplug_status)
305 			i9xx_hpd_irq_handler(display, hotplug_status);
306 
307 		if (iir & I915_MASTER_ERROR_INTERRUPT)
308 			vlv_display_error_irq_handler(display, eir, dpinvgtt);
309 
310 		valleyview_pipestat_irq_handler(display, pipe_stats);
311 	} while (0);
312 
313 	pmu_irq_stats(dev_priv, ret);
314 
315 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
316 
317 	return ret;
318 }
319 
320 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
321 {
322 	struct drm_i915_private *dev_priv = arg;
323 	struct intel_display *display = dev_priv->display;
324 	irqreturn_t ret = IRQ_NONE;
325 
326 	if (!intel_irqs_enabled(dev_priv))
327 		return IRQ_NONE;
328 
329 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
330 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
331 
332 	do {
333 		u32 master_ctl, iir;
334 		u32 eir = 0, dpinvgtt = 0;
335 		u32 pipe_stats[I915_MAX_PIPES] = {};
336 		u32 hotplug_status = 0;
337 		u32 ier = 0;
338 
339 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
340 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
341 
342 		if (master_ctl == 0 && iir == 0)
343 			break;
344 
345 		ret = IRQ_HANDLED;
346 
347 		/*
348 		 * Theory on interrupt generation, based on empirical evidence:
349 		 *
350 		 * x = ((VLV_IIR & VLV_IER) ||
351 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
352 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
353 		 *
354 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
355 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
356 		 * guarantee the CPU interrupt will be raised again even if we
357 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
358 		 * bits this time around.
359 		 */
360 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
361 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
362 
363 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
364 
365 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
366 			hotplug_status = i9xx_hpd_irq_ack(display);
367 
368 		if (iir & I915_MASTER_ERROR_INTERRUPT)
369 			vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
370 
371 		/* Call regardless, as some status bits might not be
372 		 * signalled in IIR */
373 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
374 
375 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
376 			   I915_LPE_PIPE_B_INTERRUPT |
377 			   I915_LPE_PIPE_C_INTERRUPT))
378 			intel_lpe_audio_irq_handler(display);
379 
380 		/*
381 		 * VLV_IIR is single buffered, and reflects the level
382 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
383 		 */
384 		if (iir)
385 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
386 
387 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
388 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
389 
390 		if (hotplug_status)
391 			i9xx_hpd_irq_handler(display, hotplug_status);
392 
393 		if (iir & I915_MASTER_ERROR_INTERRUPT)
394 			vlv_display_error_irq_handler(display, eir, dpinvgtt);
395 
396 		valleyview_pipestat_irq_handler(display, pipe_stats);
397 	} while (0);
398 
399 	pmu_irq_stats(dev_priv, ret);
400 
401 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
402 
403 	return ret;
404 }
405 
406 /*
407  * To handle irqs with the minimum potential races with fresh interrupts, we:
408  * 1 - Disable Master Interrupt Control.
409  * 2 - Find the source(s) of the interrupt.
410  * 3 - Clear the Interrupt Identity bits (IIR).
411  * 4 - Process the interrupt(s) that had bits set in the IIRs.
412  * 5 - Re-enable Master Interrupt Control.
413  */
414 static irqreturn_t ilk_irq_handler(int irq, void *arg)
415 {
416 	struct drm_i915_private *i915 = arg;
417 	struct intel_display *display = i915->display;
418 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
419 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
420 	irqreturn_t ret = IRQ_NONE;
421 
422 	if (unlikely(!intel_irqs_enabled(i915)))
423 		return IRQ_NONE;
424 
425 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
426 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
427 
428 	/* disable master interrupt before clearing iir  */
429 	de_ier = raw_reg_read(regs, DEIER);
430 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
431 
432 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
433 	 * interrupts will will be stored on its back queue, and then we'll be
434 	 * able to process them after we restore SDEIER (as soon as we restore
435 	 * it, we'll get an interrupt if SDEIIR still has something to process
436 	 * due to its back queue). */
437 	if (!HAS_PCH_NOP(display)) {
438 		sde_ier = raw_reg_read(regs, SDEIER);
439 		raw_reg_write(regs, SDEIER, 0);
440 	}
441 
442 	/* Find, clear, then process each source of interrupt */
443 
444 	gt_iir = raw_reg_read(regs, GTIIR);
445 	if (gt_iir) {
446 		raw_reg_write(regs, GTIIR, gt_iir);
447 		if (GRAPHICS_VER(i915) >= 6)
448 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
449 		else
450 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
451 		ret = IRQ_HANDLED;
452 	}
453 
454 	de_iir = raw_reg_read(regs, DEIIR);
455 	if (de_iir) {
456 		raw_reg_write(regs, DEIIR, de_iir);
457 		if (DISPLAY_VER(display) >= 7)
458 			ivb_display_irq_handler(display, de_iir);
459 		else
460 			ilk_display_irq_handler(display, de_iir);
461 		ret = IRQ_HANDLED;
462 	}
463 
464 	if (GRAPHICS_VER(i915) >= 6) {
465 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
466 		if (pm_iir) {
467 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
468 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
469 			ret = IRQ_HANDLED;
470 		}
471 	}
472 
473 	raw_reg_write(regs, DEIER, de_ier);
474 	if (sde_ier)
475 		raw_reg_write(regs, SDEIER, sde_ier);
476 
477 	pmu_irq_stats(i915, ret);
478 
479 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
480 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
481 
482 	return ret;
483 }
484 
485 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
486 {
487 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
488 
489 	/*
490 	 * Now with master disabled, get a sample of level indications
491 	 * for this interrupt. Indications will be cleared on related acks.
492 	 * New indications can and will light up during processing,
493 	 * and will generate new interrupt after enabling master.
494 	 */
495 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
496 }
497 
498 static inline void gen8_master_intr_enable(void __iomem * const regs)
499 {
500 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
501 }
502 
503 static irqreturn_t gen8_irq_handler(int irq, void *arg)
504 {
505 	struct drm_i915_private *dev_priv = arg;
506 	struct intel_display *display = dev_priv->display;
507 	void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
508 	u32 master_ctl;
509 
510 	if (!intel_irqs_enabled(dev_priv))
511 		return IRQ_NONE;
512 
513 	master_ctl = gen8_master_intr_disable(regs);
514 	if (!master_ctl) {
515 		gen8_master_intr_enable(regs);
516 		return IRQ_NONE;
517 	}
518 
519 	/* Find, queue (onto bottom-halves), then clear each source */
520 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
521 
522 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
523 	if (master_ctl & ~GEN8_GT_IRQS) {
524 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
525 		gen8_de_irq_handler(display, master_ctl);
526 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
527 	}
528 
529 	gen8_master_intr_enable(regs);
530 
531 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
532 
533 	return IRQ_HANDLED;
534 }
535 
536 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
537 {
538 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
539 
540 	/*
541 	 * Now with master disabled, get a sample of level indications
542 	 * for this interrupt. Indications will be cleared on related acks.
543 	 * New indications can and will light up during processing,
544 	 * and will generate new interrupt after enabling master.
545 	 */
546 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
547 }
548 
549 static inline void gen11_master_intr_enable(void __iomem * const regs)
550 {
551 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
552 }
553 
554 static irqreturn_t gen11_irq_handler(int irq, void *arg)
555 {
556 	struct drm_i915_private *i915 = arg;
557 	struct intel_display *display = i915->display;
558 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
559 	struct intel_gt *gt = to_gt(i915);
560 	u32 master_ctl;
561 	u32 gu_misc_iir;
562 
563 	if (!intel_irqs_enabled(i915))
564 		return IRQ_NONE;
565 
566 	master_ctl = gen11_master_intr_disable(regs);
567 	if (!master_ctl) {
568 		gen11_master_intr_enable(regs);
569 		return IRQ_NONE;
570 	}
571 
572 	/* Find, queue (onto bottom-halves), then clear each source */
573 	gen11_gt_irq_handler(gt, master_ctl);
574 
575 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
576 	if (master_ctl & GEN11_DISPLAY_IRQ)
577 		gen11_display_irq_handler(display);
578 
579 	gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
580 
581 	gen11_master_intr_enable(regs);
582 
583 	gen11_gu_misc_irq_handler(display, gu_misc_iir);
584 
585 	pmu_irq_stats(i915, IRQ_HANDLED);
586 
587 	return IRQ_HANDLED;
588 }
589 
590 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
591 {
592 	u32 val;
593 
594 	/* First disable interrupts */
595 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
596 
597 	/* Get the indication levels and ack the master unit */
598 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
599 	if (unlikely(!val))
600 		return 0;
601 
602 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
603 
604 	return val;
605 }
606 
607 static inline void dg1_master_intr_enable(void __iomem * const regs)
608 {
609 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
610 }
611 
612 static irqreturn_t dg1_irq_handler(int irq, void *arg)
613 {
614 	struct drm_i915_private * const i915 = arg;
615 	struct intel_display *display = i915->display;
616 	struct intel_gt *gt = to_gt(i915);
617 	void __iomem * const regs = intel_uncore_regs(gt->uncore);
618 	u32 master_tile_ctl, master_ctl;
619 	u32 gu_misc_iir;
620 
621 	if (!intel_irqs_enabled(i915))
622 		return IRQ_NONE;
623 
624 	master_tile_ctl = dg1_master_intr_disable(regs);
625 	if (!master_tile_ctl) {
626 		dg1_master_intr_enable(regs);
627 		return IRQ_NONE;
628 	}
629 
630 	/* FIXME: we only support tile 0 for now. */
631 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
632 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
633 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
634 	} else {
635 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
636 			master_tile_ctl);
637 		dg1_master_intr_enable(regs);
638 		return IRQ_NONE;
639 	}
640 
641 	gen11_gt_irq_handler(gt, master_ctl);
642 
643 	if (master_ctl & GEN11_DISPLAY_IRQ)
644 		gen11_display_irq_handler(display);
645 
646 	gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
647 
648 	dg1_master_intr_enable(regs);
649 
650 	gen11_gu_misc_irq_handler(display, gu_misc_iir);
651 
652 	pmu_irq_stats(i915, IRQ_HANDLED);
653 
654 	return IRQ_HANDLED;
655 }
656 
657 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
658 {
659 	struct intel_display *display = dev_priv->display;
660 	struct intel_uncore *uncore = &dev_priv->uncore;
661 
662 	gen2_irq_reset(uncore, DE_IRQ_REGS);
663 	dev_priv->irq_mask = ~0u;
664 
665 	if (GRAPHICS_VER(dev_priv) == 7)
666 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
667 
668 	if (IS_HASWELL(dev_priv)) {
669 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
670 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
671 	}
672 
673 	gen5_gt_irq_reset(to_gt(dev_priv));
674 
675 	ibx_display_irq_reset(display);
676 }
677 
678 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
679 {
680 	struct intel_display *display = dev_priv->display;
681 
682 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
683 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
684 
685 	gen5_gt_irq_reset(to_gt(dev_priv));
686 
687 	vlv_display_irq_reset(display);
688 }
689 
690 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
691 {
692 	struct intel_display *display = dev_priv->display;
693 	struct intel_uncore *uncore = &dev_priv->uncore;
694 
695 	gen8_master_intr_disable(intel_uncore_regs(uncore));
696 
697 	gen8_gt_irq_reset(to_gt(dev_priv));
698 	gen8_display_irq_reset(display);
699 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
700 }
701 
702 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
703 {
704 	struct intel_display *display = dev_priv->display;
705 	struct intel_gt *gt = to_gt(dev_priv);
706 	struct intel_uncore *uncore = gt->uncore;
707 
708 	gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
709 
710 	gen11_gt_irq_reset(gt);
711 	gen11_display_irq_reset(display);
712 
713 	gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
714 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
715 }
716 
717 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
718 {
719 	struct intel_display *display = dev_priv->display;
720 	struct intel_uncore *uncore = &dev_priv->uncore;
721 	struct intel_gt *gt;
722 	unsigned int i;
723 
724 	dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
725 
726 	for_each_gt(gt, dev_priv, i)
727 		gen11_gt_irq_reset(gt);
728 
729 	gen11_display_irq_reset(display);
730 
731 	gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
732 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
733 
734 	intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0);
735 }
736 
737 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
738 {
739 	struct intel_display *display = dev_priv->display;
740 	struct intel_uncore *uncore = &dev_priv->uncore;
741 
742 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
743 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
744 
745 	gen8_gt_irq_reset(to_gt(dev_priv));
746 
747 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
748 
749 	vlv_display_irq_reset(display);
750 }
751 
752 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
753 {
754 	struct intel_display *display = dev_priv->display;
755 
756 	gen5_gt_irq_postinstall(to_gt(dev_priv));
757 
758 	ilk_de_irq_postinstall(display);
759 }
760 
761 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
762 {
763 	struct intel_display *display = dev_priv->display;
764 
765 	gen5_gt_irq_postinstall(to_gt(dev_priv));
766 
767 	vlv_display_irq_postinstall(display);
768 
769 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
770 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
771 }
772 
773 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
774 {
775 	struct intel_display *display = dev_priv->display;
776 
777 	gen8_gt_irq_postinstall(to_gt(dev_priv));
778 	gen8_de_irq_postinstall(display);
779 
780 	gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
781 }
782 
783 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
784 {
785 	struct intel_display *display = dev_priv->display;
786 	struct intel_gt *gt = to_gt(dev_priv);
787 	struct intel_uncore *uncore = gt->uncore;
788 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
789 
790 	gen11_gt_irq_postinstall(gt);
791 	gen11_de_irq_postinstall(display);
792 
793 	gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
794 
795 	gen11_master_intr_enable(intel_uncore_regs(uncore));
796 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
797 }
798 
799 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
800 {
801 	struct intel_display *display = dev_priv->display;
802 	struct intel_uncore *uncore = &dev_priv->uncore;
803 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
804 	struct intel_gt *gt;
805 	unsigned int i;
806 
807 	for_each_gt(gt, dev_priv, i)
808 		gen11_gt_irq_postinstall(gt);
809 
810 	gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
811 
812 	dg1_de_irq_postinstall(display);
813 
814 	dg1_master_intr_enable(intel_uncore_regs(uncore));
815 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
816 }
817 
818 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
819 {
820 	struct intel_display *display = dev_priv->display;
821 
822 	gen8_gt_irq_postinstall(to_gt(dev_priv));
823 
824 	vlv_display_irq_postinstall(display);
825 
826 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
827 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
828 }
829 
830 static u32 i9xx_error_mask(struct drm_i915_private *i915)
831 {
832 	struct intel_display *display = i915->display;
833 	/*
834 	 * On gen2/3 FBC generates (seemingly spurious)
835 	 * display INVALID_GTT/INVALID_GTT_PTE table errors.
836 	 *
837 	 * Also gen3 bspec has this to say:
838 	 * "DISPA_INVALID_GTT_PTE
839 	 "  [DevNapa] : Reserved. This bit does not reflect the page
840 	 "              table error for the display plane A."
841 	 *
842 	 * Unfortunately we can't mask off individual PGTBL_ER bits,
843 	 * so we just have to mask off all page table errors via EMR.
844 	 */
845 	if (HAS_FBC(display))
846 		return I915_ERROR_MEMORY_REFRESH;
847 	else
848 		return I915_ERROR_PAGE_TABLE |
849 			I915_ERROR_MEMORY_REFRESH;
850 }
851 
852 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
853 			       u32 *eir, u32 *eir_stuck)
854 {
855 	u32 emr;
856 
857 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
858 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
859 
860 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
861 	if (*eir_stuck == 0)
862 		return;
863 
864 	/*
865 	 * Toggle all EMR bits to make sure we get an edge
866 	 * in the ISR master error bit if we don't clear
867 	 * all the EIR bits. Otherwise the edge triggered
868 	 * IIR on i965/g4x wouldn't notice that an interrupt
869 	 * is still pending. Also some EIR bits can't be
870 	 * cleared except by handling the underlying error
871 	 * (or by a GPU reset) so we mask any bit that
872 	 * remains set.
873 	 */
874 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
875 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
876 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
877 }
878 
879 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
880 				   u32 eir, u32 eir_stuck)
881 {
882 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
883 
884 	if (eir_stuck)
885 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
886 			eir_stuck);
887 
888 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
889 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
890 }
891 
892 static void i915_irq_reset(struct drm_i915_private *dev_priv)
893 {
894 	struct intel_display *display = dev_priv->display;
895 	struct intel_uncore *uncore = &dev_priv->uncore;
896 
897 	i9xx_display_irq_reset(display);
898 
899 	gen2_error_reset(uncore, GEN2_ERROR_REGS);
900 	gen2_irq_reset(uncore, GEN2_IRQ_REGS);
901 	dev_priv->irq_mask = ~0u;
902 }
903 
904 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
905 {
906 	struct intel_display *display = dev_priv->display;
907 	struct intel_uncore *uncore = &dev_priv->uncore;
908 	u32 enable_mask;
909 
910 	gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
911 
912 	dev_priv->irq_mask =
913 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
914 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
915 		  I915_MASTER_ERROR_INTERRUPT);
916 
917 	enable_mask =
918 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
919 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
920 		I915_MASTER_ERROR_INTERRUPT |
921 		I915_USER_INTERRUPT;
922 
923 	if (DISPLAY_VER(display) >= 3) {
924 		dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
925 		enable_mask |= I915_ASLE_INTERRUPT;
926 	}
927 
928 	if (HAS_HOTPLUG(display)) {
929 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
930 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
931 	}
932 
933 	gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
934 
935 	i915_display_irq_postinstall(display);
936 }
937 
938 static irqreturn_t i915_irq_handler(int irq, void *arg)
939 {
940 	struct drm_i915_private *dev_priv = arg;
941 	struct intel_display *display = dev_priv->display;
942 	irqreturn_t ret = IRQ_NONE;
943 
944 	if (!intel_irqs_enabled(dev_priv))
945 		return IRQ_NONE;
946 
947 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
948 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
949 
950 	do {
951 		u32 pipe_stats[I915_MAX_PIPES] = {};
952 		u32 eir = 0, eir_stuck = 0;
953 		u32 hotplug_status = 0;
954 		u32 iir;
955 
956 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
957 		if (iir == 0)
958 			break;
959 
960 		ret = IRQ_HANDLED;
961 
962 		if (HAS_HOTPLUG(display) &&
963 		    iir & I915_DISPLAY_PORT_INTERRUPT)
964 			hotplug_status = i9xx_hpd_irq_ack(display);
965 
966 		/* Call regardless, as some status bits might not be
967 		 * signalled in IIR */
968 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
969 
970 		if (iir & I915_MASTER_ERROR_INTERRUPT)
971 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
972 
973 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
974 
975 		if (iir & I915_USER_INTERRUPT)
976 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
977 
978 		if (iir & I915_MASTER_ERROR_INTERRUPT)
979 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
980 
981 		if (hotplug_status)
982 			i9xx_hpd_irq_handler(display, hotplug_status);
983 
984 		i915_pipestat_irq_handler(display, iir, pipe_stats);
985 	} while (0);
986 
987 	pmu_irq_stats(dev_priv, ret);
988 
989 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
990 
991 	return ret;
992 }
993 
994 static void i965_irq_reset(struct drm_i915_private *dev_priv)
995 {
996 	struct intel_display *display = dev_priv->display;
997 	struct intel_uncore *uncore = &dev_priv->uncore;
998 
999 	i9xx_display_irq_reset(display);
1000 
1001 	gen2_error_reset(uncore, GEN2_ERROR_REGS);
1002 	gen2_irq_reset(uncore, GEN2_IRQ_REGS);
1003 	dev_priv->irq_mask = ~0u;
1004 }
1005 
1006 static u32 i965_error_mask(struct drm_i915_private *i915)
1007 {
1008 	/*
1009 	 * Enable some error detection, note the instruction error mask
1010 	 * bit is reserved, so we leave it masked.
1011 	 *
1012 	 * i965 FBC no longer generates spurious GTT errors,
1013 	 * so we can always enable the page table errors.
1014 	 */
1015 	if (IS_G4X(i915))
1016 		return GM45_ERROR_PAGE_TABLE |
1017 			GM45_ERROR_MEM_PRIV |
1018 			GM45_ERROR_CP_PRIV |
1019 			I915_ERROR_MEMORY_REFRESH;
1020 	else
1021 		return I915_ERROR_PAGE_TABLE |
1022 			I915_ERROR_MEMORY_REFRESH;
1023 }
1024 
1025 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
1026 {
1027 	struct intel_display *display = dev_priv->display;
1028 	struct intel_uncore *uncore = &dev_priv->uncore;
1029 	u32 enable_mask;
1030 
1031 	gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
1032 
1033 	dev_priv->irq_mask =
1034 		~(I915_ASLE_INTERRUPT |
1035 		  I915_DISPLAY_PORT_INTERRUPT |
1036 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1037 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1038 		  I915_MASTER_ERROR_INTERRUPT);
1039 
1040 	enable_mask =
1041 		I915_ASLE_INTERRUPT |
1042 		I915_DISPLAY_PORT_INTERRUPT |
1043 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1044 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1045 		I915_MASTER_ERROR_INTERRUPT |
1046 		I915_USER_INTERRUPT;
1047 
1048 	if (IS_G4X(dev_priv))
1049 		enable_mask |= I915_BSD_USER_INTERRUPT;
1050 
1051 	gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
1052 
1053 	i965_display_irq_postinstall(display);
1054 }
1055 
1056 static irqreturn_t i965_irq_handler(int irq, void *arg)
1057 {
1058 	struct drm_i915_private *dev_priv = arg;
1059 	struct intel_display *display = dev_priv->display;
1060 	irqreturn_t ret = IRQ_NONE;
1061 
1062 	if (!intel_irqs_enabled(dev_priv))
1063 		return IRQ_NONE;
1064 
1065 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1066 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1067 
1068 	do {
1069 		u32 pipe_stats[I915_MAX_PIPES] = {};
1070 		u32 eir = 0, eir_stuck = 0;
1071 		u32 hotplug_status = 0;
1072 		u32 iir;
1073 
1074 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1075 		if (iir == 0)
1076 			break;
1077 
1078 		ret = IRQ_HANDLED;
1079 
1080 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1081 			hotplug_status = i9xx_hpd_irq_ack(display);
1082 
1083 		/* Call regardless, as some status bits might not be
1084 		 * signalled in IIR */
1085 		i9xx_pipestat_irq_ack(display, iir, pipe_stats);
1086 
1087 		if (iir & I915_MASTER_ERROR_INTERRUPT)
1088 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
1089 
1090 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1091 
1092 		if (iir & I915_USER_INTERRUPT)
1093 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
1094 					    iir);
1095 
1096 		if (iir & I915_BSD_USER_INTERRUPT)
1097 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
1098 					    iir >> 25);
1099 
1100 		if (iir & I915_MASTER_ERROR_INTERRUPT)
1101 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1102 
1103 		if (hotplug_status)
1104 			i9xx_hpd_irq_handler(display, hotplug_status);
1105 
1106 		i965_pipestat_irq_handler(display, iir, pipe_stats);
1107 	} while (0);
1108 
1109 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
1110 
1111 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1112 
1113 	return ret;
1114 }
1115 
1116 /**
1117  * intel_irq_init - initializes irq support
1118  * @dev_priv: i915 device instance
1119  *
1120  * This function initializes all the irq support including work items, timers
1121  * and all the vtables. It does not setup the interrupt itself though.
1122  */
1123 void intel_irq_init(struct drm_i915_private *dev_priv)
1124 {
1125 	int i;
1126 
1127 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1128 	for (i = 0; i < MAX_L3_SLICES; ++i)
1129 		dev_priv->l3_parity.remap_info[i] = NULL;
1130 
1131 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1132 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
1133 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
1134 }
1135 
1136 /**
1137  * intel_irq_fini - deinitializes IRQ support
1138  * @i915: i915 device instance
1139  *
1140  * This function deinitializes all the IRQ support.
1141  */
1142 void intel_irq_fini(struct drm_i915_private *i915)
1143 {
1144 	int i;
1145 
1146 	for (i = 0; i < MAX_L3_SLICES; ++i)
1147 		kfree(i915->l3_parity.remap_info[i]);
1148 }
1149 
1150 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1151 {
1152 	if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1153 		return dg1_irq_handler;
1154 	else if (GRAPHICS_VER(dev_priv) >= 11)
1155 		return gen11_irq_handler;
1156 	else if (IS_CHERRYVIEW(dev_priv))
1157 		return cherryview_irq_handler;
1158 	else if (GRAPHICS_VER(dev_priv) >= 8)
1159 		return gen8_irq_handler;
1160 	else if (IS_VALLEYVIEW(dev_priv))
1161 		return valleyview_irq_handler;
1162 	else if (GRAPHICS_VER(dev_priv) >= 5)
1163 		return ilk_irq_handler;
1164 	else if (GRAPHICS_VER(dev_priv) == 4)
1165 		return i965_irq_handler;
1166 	else
1167 		return i915_irq_handler;
1168 }
1169 
1170 static void intel_irq_reset(struct drm_i915_private *dev_priv)
1171 {
1172 	if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1173 		dg1_irq_reset(dev_priv);
1174 	else if (GRAPHICS_VER(dev_priv) >= 11)
1175 		gen11_irq_reset(dev_priv);
1176 	else if (IS_CHERRYVIEW(dev_priv))
1177 		cherryview_irq_reset(dev_priv);
1178 	else if (GRAPHICS_VER(dev_priv) >= 8)
1179 		gen8_irq_reset(dev_priv);
1180 	else if (IS_VALLEYVIEW(dev_priv))
1181 		valleyview_irq_reset(dev_priv);
1182 	else if (GRAPHICS_VER(dev_priv) >= 5)
1183 		ilk_irq_reset(dev_priv);
1184 	else if (GRAPHICS_VER(dev_priv) == 4)
1185 		i965_irq_reset(dev_priv);
1186 	else
1187 		i915_irq_reset(dev_priv);
1188 }
1189 
1190 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1191 {
1192 	if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1193 		dg1_irq_postinstall(dev_priv);
1194 	else if (GRAPHICS_VER(dev_priv) >= 11)
1195 		gen11_irq_postinstall(dev_priv);
1196 	else if (IS_CHERRYVIEW(dev_priv))
1197 		cherryview_irq_postinstall(dev_priv);
1198 	else if (GRAPHICS_VER(dev_priv) >= 8)
1199 		gen8_irq_postinstall(dev_priv);
1200 	else if (IS_VALLEYVIEW(dev_priv))
1201 		valleyview_irq_postinstall(dev_priv);
1202 	else if (GRAPHICS_VER(dev_priv) >= 5)
1203 		ilk_irq_postinstall(dev_priv);
1204 	else if (GRAPHICS_VER(dev_priv) == 4)
1205 		i965_irq_postinstall(dev_priv);
1206 	else
1207 		i915_irq_postinstall(dev_priv);
1208 }
1209 
1210 /**
1211  * intel_irq_install - enables the hardware interrupt
1212  * @dev_priv: i915 device instance
1213  *
1214  * This function enables the hardware interrupt handling, but leaves the hotplug
1215  * handling still disabled. It is called after intel_irq_init().
1216  *
1217  * In the driver load and resume code we need working interrupts in a few places
1218  * but don't want to deal with the hassle of concurrent probe and hotplug
1219  * workers. Hence the split into this two-stage approach.
1220  */
1221 int intel_irq_install(struct drm_i915_private *dev_priv)
1222 {
1223 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1224 	int ret;
1225 
1226 	/*
1227 	 * We enable some interrupt sources in our postinstall hooks, so mark
1228 	 * interrupts as enabled _before_ actually enabling them to avoid
1229 	 * special cases in our ordering checks.
1230 	 */
1231 	dev_priv->irqs_enabled = true;
1232 
1233 	intel_irq_reset(dev_priv);
1234 
1235 	ret = request_irq(irq, intel_irq_handler(dev_priv),
1236 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
1237 	if (ret < 0) {
1238 		dev_priv->irqs_enabled = false;
1239 		return ret;
1240 	}
1241 
1242 	intel_irq_postinstall(dev_priv);
1243 
1244 	return ret;
1245 }
1246 
1247 /**
1248  * intel_irq_uninstall - finalizes all irq handling
1249  * @dev_priv: i915 device instance
1250  *
1251  * This stops interrupt and hotplug handling and unregisters and frees all
1252  * resources acquired in the init functions.
1253  */
1254 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
1255 {
1256 	struct intel_display *display = dev_priv->display;
1257 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1258 
1259 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
1260 		return;
1261 
1262 	intel_irq_reset(dev_priv);
1263 
1264 	free_irq(irq, dev_priv);
1265 
1266 	intel_hpd_cancel_work(display);
1267 	dev_priv->irqs_enabled = false;
1268 }
1269 
1270 /**
1271  * intel_irq_suspend - Suspend interrupts
1272  * @i915: i915 device instance
1273  *
1274  * This function is used to disable interrupts at runtime.
1275  */
1276 void intel_irq_suspend(struct drm_i915_private *i915)
1277 {
1278 	intel_irq_reset(i915);
1279 	i915->irqs_enabled = false;
1280 	intel_synchronize_irq(i915);
1281 }
1282 
1283 /**
1284  * intel_irq_resume - Resume interrupts
1285  * @i915: i915 device instance
1286  *
1287  * This function is used to enable interrupts at runtime.
1288  */
1289 void intel_irq_resume(struct drm_i915_private *i915)
1290 {
1291 	i915->irqs_enabled = true;
1292 	intel_irq_reset(i915);
1293 	intel_irq_postinstall(i915);
1294 }
1295 
1296 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1297 {
1298 	return dev_priv->irqs_enabled;
1299 }
1300 
1301 void intel_synchronize_irq(struct drm_i915_private *i915)
1302 {
1303 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1304 }
1305 
1306 void intel_synchronize_hardirq(struct drm_i915_private *i915)
1307 {
1308 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1309 }
1310