xref: /linux/drivers/gpu/drm/i915/i915_irq.c (revision c19f5a0341e0a54e61469218fd9419633db5c937)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33 
34 #include <drm/drm_drv.h>
35 
36 #include "display/intel_display_irq.h"
37 #include "display/intel_hotplug.h"
38 #include "display/intel_hotplug_irq.h"
39 #include "display/intel_lpe_audio.h"
40 #include "display/intel_psr_regs.h"
41 
42 #include "gt/intel_breadcrumbs.h"
43 #include "gt/intel_gt.h"
44 #include "gt/intel_gt_irq.h"
45 #include "gt/intel_gt_pm_irq.h"
46 #include "gt/intel_gt_regs.h"
47 #include "gt/intel_rps.h"
48 
49 #include "i915_driver.h"
50 #include "i915_drv.h"
51 #include "i915_irq.h"
52 #include "i915_reg.h"
53 
54 /**
55  * DOC: interrupt handling
56  *
57  * These functions provide the basic support for enabling and disabling the
58  * interrupt handling support. There's a lot more functionality in i915_irq.c
59  * and related files, but that will be described in separate chapters.
60  */
61 
62 /*
63  * Interrupt statistic for PMU. Increments the counter only if the
64  * interrupt originated from the GPU so interrupts from a device which
65  * shares the interrupt line are not accounted.
66  */
67 static inline void pmu_irq_stats(struct drm_i915_private *i915,
68 				 irqreturn_t res)
69 {
70 	if (unlikely(res != IRQ_HANDLED))
71 		return;
72 
73 	/*
74 	 * A clever compiler translates that into INC. A not so clever one
75 	 * should at least prevent store tearing.
76 	 */
77 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
78 }
79 
80 void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
81 {
82 	intel_uncore_write(uncore, regs.imr, 0xffffffff);
83 	intel_uncore_posting_read(uncore, regs.imr);
84 
85 	intel_uncore_write(uncore, regs.ier, 0);
86 
87 	/* IIR can theoretically queue up two events. Be paranoid. */
88 	intel_uncore_write(uncore, regs.iir, 0xffffffff);
89 	intel_uncore_posting_read(uncore, regs.iir);
90 	intel_uncore_write(uncore, regs.iir, 0xffffffff);
91 	intel_uncore_posting_read(uncore, regs.iir);
92 }
93 
94 /*
95  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
96  */
97 void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
98 {
99 	u32 val = intel_uncore_read(uncore, reg);
100 
101 	if (val == 0)
102 		return;
103 
104 	drm_WARN(&uncore->i915->drm, 1,
105 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
106 		 i915_mmio_reg_offset(reg), val);
107 	intel_uncore_write(uncore, reg, 0xffffffff);
108 	intel_uncore_posting_read(uncore, reg);
109 	intel_uncore_write(uncore, reg, 0xffffffff);
110 	intel_uncore_posting_read(uncore, reg);
111 }
112 
113 void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
114 		   u32 imr_val, u32 ier_val)
115 {
116 	gen2_assert_iir_is_zero(uncore, regs.iir);
117 
118 	intel_uncore_write(uncore, regs.ier, ier_val);
119 	intel_uncore_write(uncore, regs.imr, imr_val);
120 	intel_uncore_posting_read(uncore, regs.imr);
121 }
122 
123 void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
124 {
125 	intel_uncore_write(uncore, regs.emr, 0xffffffff);
126 	intel_uncore_posting_read(uncore, regs.emr);
127 
128 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
129 	intel_uncore_posting_read(uncore, regs.eir);
130 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
131 	intel_uncore_posting_read(uncore, regs.eir);
132 }
133 
134 void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
135 		     u32 emr_val)
136 {
137 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
138 	intel_uncore_posting_read(uncore, regs.eir);
139 	intel_uncore_write(uncore, regs.eir, 0xffffffff);
140 	intel_uncore_posting_read(uncore, regs.eir);
141 
142 	intel_uncore_write(uncore, regs.emr, emr_val);
143 	intel_uncore_posting_read(uncore, regs.emr);
144 }
145 
146 /**
147  * ivb_parity_work - Workqueue called when a parity error interrupt
148  * occurred.
149  * @work: workqueue struct
150  *
151  * Doesn't actually do anything except notify userspace. As a consequence of
152  * this event, userspace should try to remap the bad rows since statistically
153  * it is likely the same row is more likely to go bad again.
154  */
155 static void ivb_parity_work(struct work_struct *work)
156 {
157 	struct drm_i915_private *dev_priv =
158 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
159 	struct intel_gt *gt = to_gt(dev_priv);
160 	u32 error_status, row, bank, subbank;
161 	char *parity_event[6];
162 	u32 misccpctl;
163 	u8 slice = 0;
164 
165 	/* We must turn off DOP level clock gating to access the L3 registers.
166 	 * In order to prevent a get/put style interface, acquire struct mutex
167 	 * any time we access those registers.
168 	 */
169 	mutex_lock(&dev_priv->drm.struct_mutex);
170 
171 	/* If we've screwed up tracking, just let the interrupt fire again */
172 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
173 		goto out;
174 
175 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
176 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
177 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
178 
179 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
180 		i915_reg_t reg;
181 
182 		slice--;
183 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
184 				     slice >= NUM_L3_SLICES(dev_priv)))
185 			break;
186 
187 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
188 
189 		reg = GEN7_L3CDERRST1(slice);
190 
191 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
192 		row = GEN7_PARITY_ERROR_ROW(error_status);
193 		bank = GEN7_PARITY_ERROR_BANK(error_status);
194 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
195 
196 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
197 		intel_uncore_posting_read(&dev_priv->uncore, reg);
198 
199 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
200 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
201 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
202 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
203 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
204 		parity_event[5] = NULL;
205 
206 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
207 				   KOBJ_CHANGE, parity_event);
208 
209 		drm_dbg(&dev_priv->drm,
210 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
211 			slice, row, bank, subbank);
212 
213 		kfree(parity_event[4]);
214 		kfree(parity_event[3]);
215 		kfree(parity_event[2]);
216 		kfree(parity_event[1]);
217 	}
218 
219 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
220 
221 out:
222 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
223 	spin_lock_irq(gt->irq_lock);
224 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
225 	spin_unlock_irq(gt->irq_lock);
226 
227 	mutex_unlock(&dev_priv->drm.struct_mutex);
228 }
229 
230 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
231 {
232 	struct drm_i915_private *dev_priv = arg;
233 	struct intel_display *display = &dev_priv->display;
234 	irqreturn_t ret = IRQ_NONE;
235 
236 	if (!intel_irqs_enabled(dev_priv))
237 		return IRQ_NONE;
238 
239 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
240 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
241 
242 	do {
243 		u32 iir, gt_iir, pm_iir;
244 		u32 eir = 0, dpinvgtt = 0;
245 		u32 pipe_stats[I915_MAX_PIPES] = {};
246 		u32 hotplug_status = 0;
247 		u32 ier = 0;
248 
249 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
250 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
251 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
252 
253 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
254 			break;
255 
256 		ret = IRQ_HANDLED;
257 
258 		/*
259 		 * Theory on interrupt generation, based on empirical evidence:
260 		 *
261 		 * x = ((VLV_IIR & VLV_IER) ||
262 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
263 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
264 		 *
265 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
266 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
267 		 * guarantee the CPU interrupt will be raised again even if we
268 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
269 		 * bits this time around.
270 		 */
271 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
272 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
273 
274 		if (gt_iir)
275 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
276 		if (pm_iir)
277 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
278 
279 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
280 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
281 
282 		if (iir & I915_MASTER_ERROR_INTERRUPT)
283 			vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
284 
285 		/* Call regardless, as some status bits might not be
286 		 * signalled in IIR */
287 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
288 
289 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
290 			   I915_LPE_PIPE_B_INTERRUPT))
291 			intel_lpe_audio_irq_handler(display);
292 
293 		/*
294 		 * VLV_IIR is single buffered, and reflects the level
295 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
296 		 */
297 		if (iir)
298 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
299 
300 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
301 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
302 
303 		if (gt_iir)
304 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
305 		if (pm_iir)
306 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
307 
308 		if (hotplug_status)
309 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
310 
311 		if (iir & I915_MASTER_ERROR_INTERRUPT)
312 			vlv_display_error_irq_handler(display, eir, dpinvgtt);
313 
314 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
315 	} while (0);
316 
317 	pmu_irq_stats(dev_priv, ret);
318 
319 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
320 
321 	return ret;
322 }
323 
324 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
325 {
326 	struct drm_i915_private *dev_priv = arg;
327 	struct intel_display *display = &dev_priv->display;
328 	irqreturn_t ret = IRQ_NONE;
329 
330 	if (!intel_irqs_enabled(dev_priv))
331 		return IRQ_NONE;
332 
333 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
334 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
335 
336 	do {
337 		u32 master_ctl, iir;
338 		u32 eir = 0, dpinvgtt = 0;
339 		u32 pipe_stats[I915_MAX_PIPES] = {};
340 		u32 hotplug_status = 0;
341 		u32 ier = 0;
342 
343 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
344 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
345 
346 		if (master_ctl == 0 && iir == 0)
347 			break;
348 
349 		ret = IRQ_HANDLED;
350 
351 		/*
352 		 * Theory on interrupt generation, based on empirical evidence:
353 		 *
354 		 * x = ((VLV_IIR & VLV_IER) ||
355 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
356 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
357 		 *
358 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
359 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
360 		 * guarantee the CPU interrupt will be raised again even if we
361 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
362 		 * bits this time around.
363 		 */
364 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
365 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
366 
367 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
368 
369 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
370 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
371 
372 		if (iir & I915_MASTER_ERROR_INTERRUPT)
373 			vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
374 
375 		/* Call regardless, as some status bits might not be
376 		 * signalled in IIR */
377 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
378 
379 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
380 			   I915_LPE_PIPE_B_INTERRUPT |
381 			   I915_LPE_PIPE_C_INTERRUPT))
382 			intel_lpe_audio_irq_handler(display);
383 
384 		/*
385 		 * VLV_IIR is single buffered, and reflects the level
386 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
387 		 */
388 		if (iir)
389 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
390 
391 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
392 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
393 
394 		if (hotplug_status)
395 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
396 
397 		if (iir & I915_MASTER_ERROR_INTERRUPT)
398 			vlv_display_error_irq_handler(display, eir, dpinvgtt);
399 
400 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
401 	} while (0);
402 
403 	pmu_irq_stats(dev_priv, ret);
404 
405 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
406 
407 	return ret;
408 }
409 
410 /*
411  * To handle irqs with the minimum potential races with fresh interrupts, we:
412  * 1 - Disable Master Interrupt Control.
413  * 2 - Find the source(s) of the interrupt.
414  * 3 - Clear the Interrupt Identity bits (IIR).
415  * 4 - Process the interrupt(s) that had bits set in the IIRs.
416  * 5 - Re-enable Master Interrupt Control.
417  */
418 static irqreturn_t ilk_irq_handler(int irq, void *arg)
419 {
420 	struct drm_i915_private *i915 = arg;
421 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
422 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
423 	irqreturn_t ret = IRQ_NONE;
424 
425 	if (unlikely(!intel_irqs_enabled(i915)))
426 		return IRQ_NONE;
427 
428 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
429 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
430 
431 	/* disable master interrupt before clearing iir  */
432 	de_ier = raw_reg_read(regs, DEIER);
433 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
434 
435 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
436 	 * interrupts will will be stored on its back queue, and then we'll be
437 	 * able to process them after we restore SDEIER (as soon as we restore
438 	 * it, we'll get an interrupt if SDEIIR still has something to process
439 	 * due to its back queue). */
440 	if (!HAS_PCH_NOP(i915)) {
441 		sde_ier = raw_reg_read(regs, SDEIER);
442 		raw_reg_write(regs, SDEIER, 0);
443 	}
444 
445 	/* Find, clear, then process each source of interrupt */
446 
447 	gt_iir = raw_reg_read(regs, GTIIR);
448 	if (gt_iir) {
449 		raw_reg_write(regs, GTIIR, gt_iir);
450 		if (GRAPHICS_VER(i915) >= 6)
451 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
452 		else
453 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
454 		ret = IRQ_HANDLED;
455 	}
456 
457 	de_iir = raw_reg_read(regs, DEIIR);
458 	if (de_iir) {
459 		raw_reg_write(regs, DEIIR, de_iir);
460 		if (DISPLAY_VER(i915) >= 7)
461 			ivb_display_irq_handler(i915, de_iir);
462 		else
463 			ilk_display_irq_handler(i915, de_iir);
464 		ret = IRQ_HANDLED;
465 	}
466 
467 	if (GRAPHICS_VER(i915) >= 6) {
468 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
469 		if (pm_iir) {
470 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
471 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
472 			ret = IRQ_HANDLED;
473 		}
474 	}
475 
476 	raw_reg_write(regs, DEIER, de_ier);
477 	if (sde_ier)
478 		raw_reg_write(regs, SDEIER, sde_ier);
479 
480 	pmu_irq_stats(i915, ret);
481 
482 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
483 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
484 
485 	return ret;
486 }
487 
488 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
489 {
490 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
491 
492 	/*
493 	 * Now with master disabled, get a sample of level indications
494 	 * for this interrupt. Indications will be cleared on related acks.
495 	 * New indications can and will light up during processing,
496 	 * and will generate new interrupt after enabling master.
497 	 */
498 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
499 }
500 
501 static inline void gen8_master_intr_enable(void __iomem * const regs)
502 {
503 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
504 }
505 
506 static irqreturn_t gen8_irq_handler(int irq, void *arg)
507 {
508 	struct drm_i915_private *dev_priv = arg;
509 	void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
510 	u32 master_ctl;
511 
512 	if (!intel_irqs_enabled(dev_priv))
513 		return IRQ_NONE;
514 
515 	master_ctl = gen8_master_intr_disable(regs);
516 	if (!master_ctl) {
517 		gen8_master_intr_enable(regs);
518 		return IRQ_NONE;
519 	}
520 
521 	/* Find, queue (onto bottom-halves), then clear each source */
522 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
523 
524 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
525 	if (master_ctl & ~GEN8_GT_IRQS) {
526 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
527 		gen8_de_irq_handler(dev_priv, master_ctl);
528 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
529 	}
530 
531 	gen8_master_intr_enable(regs);
532 
533 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
534 
535 	return IRQ_HANDLED;
536 }
537 
538 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
539 {
540 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
541 
542 	/*
543 	 * Now with master disabled, get a sample of level indications
544 	 * for this interrupt. Indications will be cleared on related acks.
545 	 * New indications can and will light up during processing,
546 	 * and will generate new interrupt after enabling master.
547 	 */
548 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
549 }
550 
551 static inline void gen11_master_intr_enable(void __iomem * const regs)
552 {
553 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
554 }
555 
556 static irqreturn_t gen11_irq_handler(int irq, void *arg)
557 {
558 	struct drm_i915_private *i915 = arg;
559 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
560 	struct intel_gt *gt = to_gt(i915);
561 	u32 master_ctl;
562 	u32 gu_misc_iir;
563 
564 	if (!intel_irqs_enabled(i915))
565 		return IRQ_NONE;
566 
567 	master_ctl = gen11_master_intr_disable(regs);
568 	if (!master_ctl) {
569 		gen11_master_intr_enable(regs);
570 		return IRQ_NONE;
571 	}
572 
573 	/* Find, queue (onto bottom-halves), then clear each source */
574 	gen11_gt_irq_handler(gt, master_ctl);
575 
576 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
577 	if (master_ctl & GEN11_DISPLAY_IRQ)
578 		gen11_display_irq_handler(i915);
579 
580 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
581 
582 	gen11_master_intr_enable(regs);
583 
584 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
585 
586 	pmu_irq_stats(i915, IRQ_HANDLED);
587 
588 	return IRQ_HANDLED;
589 }
590 
591 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
592 {
593 	u32 val;
594 
595 	/* First disable interrupts */
596 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
597 
598 	/* Get the indication levels and ack the master unit */
599 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
600 	if (unlikely(!val))
601 		return 0;
602 
603 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
604 
605 	return val;
606 }
607 
608 static inline void dg1_master_intr_enable(void __iomem * const regs)
609 {
610 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
611 }
612 
613 static irqreturn_t dg1_irq_handler(int irq, void *arg)
614 {
615 	struct drm_i915_private * const i915 = arg;
616 	struct intel_gt *gt = to_gt(i915);
617 	void __iomem * const regs = intel_uncore_regs(gt->uncore);
618 	u32 master_tile_ctl, master_ctl;
619 	u32 gu_misc_iir;
620 
621 	if (!intel_irqs_enabled(i915))
622 		return IRQ_NONE;
623 
624 	master_tile_ctl = dg1_master_intr_disable(regs);
625 	if (!master_tile_ctl) {
626 		dg1_master_intr_enable(regs);
627 		return IRQ_NONE;
628 	}
629 
630 	/* FIXME: we only support tile 0 for now. */
631 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
632 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
633 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
634 	} else {
635 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
636 			master_tile_ctl);
637 		dg1_master_intr_enable(regs);
638 		return IRQ_NONE;
639 	}
640 
641 	gen11_gt_irq_handler(gt, master_ctl);
642 
643 	if (master_ctl & GEN11_DISPLAY_IRQ)
644 		gen11_display_irq_handler(i915);
645 
646 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
647 
648 	dg1_master_intr_enable(regs);
649 
650 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
651 
652 	pmu_irq_stats(i915, IRQ_HANDLED);
653 
654 	return IRQ_HANDLED;
655 }
656 
657 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
658 {
659 	struct intel_uncore *uncore = &dev_priv->uncore;
660 
661 	if (HAS_PCH_NOP(dev_priv))
662 		return;
663 
664 	gen2_irq_reset(uncore, SDE_IRQ_REGS);
665 
666 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
667 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
668 }
669 
670 /* drm_dma.h hooks
671 */
672 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
673 {
674 	struct intel_uncore *uncore = &dev_priv->uncore;
675 
676 	gen2_irq_reset(uncore, DE_IRQ_REGS);
677 	dev_priv->irq_mask = ~0u;
678 
679 	if (GRAPHICS_VER(dev_priv) == 7)
680 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
681 
682 	if (IS_HASWELL(dev_priv)) {
683 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
684 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
685 	}
686 
687 	gen5_gt_irq_reset(to_gt(dev_priv));
688 
689 	ibx_irq_reset(dev_priv);
690 }
691 
692 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
693 {
694 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
695 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
696 
697 	gen5_gt_irq_reset(to_gt(dev_priv));
698 
699 	spin_lock_irq(&dev_priv->irq_lock);
700 	vlv_display_irq_reset(dev_priv);
701 	spin_unlock_irq(&dev_priv->irq_lock);
702 }
703 
704 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
705 {
706 	struct intel_uncore *uncore = &dev_priv->uncore;
707 
708 	gen8_master_intr_disable(intel_uncore_regs(uncore));
709 
710 	gen8_gt_irq_reset(to_gt(dev_priv));
711 	gen8_display_irq_reset(dev_priv);
712 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
713 
714 	if (HAS_PCH_SPLIT(dev_priv))
715 		ibx_irq_reset(dev_priv);
716 
717 }
718 
719 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
720 {
721 	struct intel_gt *gt = to_gt(dev_priv);
722 	struct intel_uncore *uncore = gt->uncore;
723 
724 	gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
725 
726 	gen11_gt_irq_reset(gt);
727 	gen11_display_irq_reset(dev_priv);
728 
729 	gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
730 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
731 }
732 
733 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
734 {
735 	struct intel_uncore *uncore = &dev_priv->uncore;
736 	struct intel_gt *gt;
737 	unsigned int i;
738 
739 	dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
740 
741 	for_each_gt(gt, dev_priv, i)
742 		gen11_gt_irq_reset(gt);
743 
744 	gen11_display_irq_reset(dev_priv);
745 
746 	gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
747 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
748 
749 	intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0);
750 }
751 
752 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
753 {
754 	struct intel_uncore *uncore = &dev_priv->uncore;
755 
756 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
757 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
758 
759 	gen8_gt_irq_reset(to_gt(dev_priv));
760 
761 	gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
762 
763 	spin_lock_irq(&dev_priv->irq_lock);
764 	vlv_display_irq_reset(dev_priv);
765 	spin_unlock_irq(&dev_priv->irq_lock);
766 }
767 
768 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
769 {
770 	gen5_gt_irq_postinstall(to_gt(dev_priv));
771 
772 	ilk_de_irq_postinstall(dev_priv);
773 }
774 
775 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
776 {
777 	gen5_gt_irq_postinstall(to_gt(dev_priv));
778 
779 	spin_lock_irq(&dev_priv->irq_lock);
780 	vlv_display_irq_postinstall(dev_priv);
781 	spin_unlock_irq(&dev_priv->irq_lock);
782 
783 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
784 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
785 }
786 
787 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
788 {
789 	gen8_gt_irq_postinstall(to_gt(dev_priv));
790 	gen8_de_irq_postinstall(dev_priv);
791 
792 	gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
793 }
794 
795 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
796 {
797 	struct intel_gt *gt = to_gt(dev_priv);
798 	struct intel_uncore *uncore = gt->uncore;
799 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
800 
801 	gen11_gt_irq_postinstall(gt);
802 	gen11_de_irq_postinstall(dev_priv);
803 
804 	gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
805 
806 	gen11_master_intr_enable(intel_uncore_regs(uncore));
807 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
808 }
809 
810 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
811 {
812 	struct intel_uncore *uncore = &dev_priv->uncore;
813 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
814 	struct intel_gt *gt;
815 	unsigned int i;
816 
817 	for_each_gt(gt, dev_priv, i)
818 		gen11_gt_irq_postinstall(gt);
819 
820 	gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
821 
822 	dg1_de_irq_postinstall(dev_priv);
823 
824 	dg1_master_intr_enable(intel_uncore_regs(uncore));
825 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
826 }
827 
828 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
829 {
830 	gen8_gt_irq_postinstall(to_gt(dev_priv));
831 
832 	spin_lock_irq(&dev_priv->irq_lock);
833 	vlv_display_irq_postinstall(dev_priv);
834 	spin_unlock_irq(&dev_priv->irq_lock);
835 
836 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
837 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
838 }
839 
840 static u32 i9xx_error_mask(struct drm_i915_private *i915)
841 {
842 	/*
843 	 * On gen2/3 FBC generates (seemingly spurious)
844 	 * display INVALID_GTT/INVALID_GTT_PTE table errors.
845 	 *
846 	 * Also gen3 bspec has this to say:
847 	 * "DISPA_INVALID_GTT_PTE
848 	 "  [DevNapa] : Reserved. This bit does not reflect the page
849 	 "              table error for the display plane A."
850 	 *
851 	 * Unfortunately we can't mask off individual PGTBL_ER bits,
852 	 * so we just have to mask off all page table errors via EMR.
853 	 */
854 	if (HAS_FBC(i915))
855 		return I915_ERROR_MEMORY_REFRESH;
856 	else
857 		return I915_ERROR_PAGE_TABLE |
858 			I915_ERROR_MEMORY_REFRESH;
859 }
860 
861 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
862 			       u32 *eir, u32 *eir_stuck)
863 {
864 	u32 emr;
865 
866 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
867 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
868 
869 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
870 	if (*eir_stuck == 0)
871 		return;
872 
873 	/*
874 	 * Toggle all EMR bits to make sure we get an edge
875 	 * in the ISR master error bit if we don't clear
876 	 * all the EIR bits. Otherwise the edge triggered
877 	 * IIR on i965/g4x wouldn't notice that an interrupt
878 	 * is still pending. Also some EIR bits can't be
879 	 * cleared except by handling the underlying error
880 	 * (or by a GPU reset) so we mask any bit that
881 	 * remains set.
882 	 */
883 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
884 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
885 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
886 }
887 
888 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
889 				   u32 eir, u32 eir_stuck)
890 {
891 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
892 
893 	if (eir_stuck)
894 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
895 			eir_stuck);
896 
897 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
898 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
899 }
900 
901 static void i915_irq_reset(struct drm_i915_private *dev_priv)
902 {
903 	struct intel_uncore *uncore = &dev_priv->uncore;
904 
905 	i9xx_display_irq_reset(dev_priv);
906 
907 	gen2_error_reset(uncore, GEN2_ERROR_REGS);
908 	gen2_irq_reset(uncore, GEN2_IRQ_REGS);
909 	dev_priv->irq_mask = ~0u;
910 }
911 
912 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
913 {
914 	struct intel_uncore *uncore = &dev_priv->uncore;
915 	u32 enable_mask;
916 
917 	gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
918 
919 	dev_priv->irq_mask =
920 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
921 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
922 		  I915_MASTER_ERROR_INTERRUPT);
923 
924 	enable_mask =
925 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
926 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
927 		I915_MASTER_ERROR_INTERRUPT |
928 		I915_USER_INTERRUPT;
929 
930 	if (DISPLAY_VER(dev_priv) >= 3) {
931 		dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
932 		enable_mask |= I915_ASLE_INTERRUPT;
933 	}
934 
935 	if (I915_HAS_HOTPLUG(dev_priv)) {
936 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
937 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
938 	}
939 
940 	gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
941 
942 	/* Interrupt setup is already guaranteed to be single-threaded, this is
943 	 * just to make the assert_spin_locked check happy. */
944 	spin_lock_irq(&dev_priv->irq_lock);
945 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
946 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
947 	spin_unlock_irq(&dev_priv->irq_lock);
948 
949 	i915_enable_asle_pipestat(dev_priv);
950 }
951 
952 static irqreturn_t i915_irq_handler(int irq, void *arg)
953 {
954 	struct drm_i915_private *dev_priv = arg;
955 	irqreturn_t ret = IRQ_NONE;
956 
957 	if (!intel_irqs_enabled(dev_priv))
958 		return IRQ_NONE;
959 
960 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
961 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
962 
963 	do {
964 		u32 pipe_stats[I915_MAX_PIPES] = {};
965 		u32 eir = 0, eir_stuck = 0;
966 		u32 hotplug_status = 0;
967 		u32 iir;
968 
969 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
970 		if (iir == 0)
971 			break;
972 
973 		ret = IRQ_HANDLED;
974 
975 		if (I915_HAS_HOTPLUG(dev_priv) &&
976 		    iir & I915_DISPLAY_PORT_INTERRUPT)
977 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
978 
979 		/* Call regardless, as some status bits might not be
980 		 * signalled in IIR */
981 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
982 
983 		if (iir & I915_MASTER_ERROR_INTERRUPT)
984 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
985 
986 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
987 
988 		if (iir & I915_USER_INTERRUPT)
989 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
990 
991 		if (iir & I915_MASTER_ERROR_INTERRUPT)
992 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
993 
994 		if (hotplug_status)
995 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
996 
997 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
998 	} while (0);
999 
1000 	pmu_irq_stats(dev_priv, ret);
1001 
1002 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1003 
1004 	return ret;
1005 }
1006 
1007 static void i965_irq_reset(struct drm_i915_private *dev_priv)
1008 {
1009 	struct intel_uncore *uncore = &dev_priv->uncore;
1010 
1011 	i9xx_display_irq_reset(dev_priv);
1012 
1013 	gen2_error_reset(uncore, GEN2_ERROR_REGS);
1014 	gen2_irq_reset(uncore, GEN2_IRQ_REGS);
1015 	dev_priv->irq_mask = ~0u;
1016 }
1017 
1018 static u32 i965_error_mask(struct drm_i915_private *i915)
1019 {
1020 	/*
1021 	 * Enable some error detection, note the instruction error mask
1022 	 * bit is reserved, so we leave it masked.
1023 	 *
1024 	 * i965 FBC no longer generates spurious GTT errors,
1025 	 * so we can always enable the page table errors.
1026 	 */
1027 	if (IS_G4X(i915))
1028 		return GM45_ERROR_PAGE_TABLE |
1029 			GM45_ERROR_MEM_PRIV |
1030 			GM45_ERROR_CP_PRIV |
1031 			I915_ERROR_MEMORY_REFRESH;
1032 	else
1033 		return I915_ERROR_PAGE_TABLE |
1034 			I915_ERROR_MEMORY_REFRESH;
1035 }
1036 
1037 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
1038 {
1039 	struct intel_uncore *uncore = &dev_priv->uncore;
1040 	u32 enable_mask;
1041 
1042 	gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
1043 
1044 	dev_priv->irq_mask =
1045 		~(I915_ASLE_INTERRUPT |
1046 		  I915_DISPLAY_PORT_INTERRUPT |
1047 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1048 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1049 		  I915_MASTER_ERROR_INTERRUPT);
1050 
1051 	enable_mask =
1052 		I915_ASLE_INTERRUPT |
1053 		I915_DISPLAY_PORT_INTERRUPT |
1054 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1055 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1056 		I915_MASTER_ERROR_INTERRUPT |
1057 		I915_USER_INTERRUPT;
1058 
1059 	if (IS_G4X(dev_priv))
1060 		enable_mask |= I915_BSD_USER_INTERRUPT;
1061 
1062 	gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
1063 
1064 	/* Interrupt setup is already guaranteed to be single-threaded, this is
1065 	 * just to make the assert_spin_locked check happy. */
1066 	spin_lock_irq(&dev_priv->irq_lock);
1067 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1068 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1069 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1070 	spin_unlock_irq(&dev_priv->irq_lock);
1071 
1072 	i915_enable_asle_pipestat(dev_priv);
1073 }
1074 
1075 static irqreturn_t i965_irq_handler(int irq, void *arg)
1076 {
1077 	struct drm_i915_private *dev_priv = arg;
1078 	irqreturn_t ret = IRQ_NONE;
1079 
1080 	if (!intel_irqs_enabled(dev_priv))
1081 		return IRQ_NONE;
1082 
1083 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1084 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1085 
1086 	do {
1087 		u32 pipe_stats[I915_MAX_PIPES] = {};
1088 		u32 eir = 0, eir_stuck = 0;
1089 		u32 hotplug_status = 0;
1090 		u32 iir;
1091 
1092 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1093 		if (iir == 0)
1094 			break;
1095 
1096 		ret = IRQ_HANDLED;
1097 
1098 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1099 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1100 
1101 		/* Call regardless, as some status bits might not be
1102 		 * signalled in IIR */
1103 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1104 
1105 		if (iir & I915_MASTER_ERROR_INTERRUPT)
1106 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
1107 
1108 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1109 
1110 		if (iir & I915_USER_INTERRUPT)
1111 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
1112 					    iir);
1113 
1114 		if (iir & I915_BSD_USER_INTERRUPT)
1115 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
1116 					    iir >> 25);
1117 
1118 		if (iir & I915_MASTER_ERROR_INTERRUPT)
1119 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1120 
1121 		if (hotplug_status)
1122 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1123 
1124 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1125 	} while (0);
1126 
1127 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
1128 
1129 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1130 
1131 	return ret;
1132 }
1133 
1134 /**
1135  * intel_irq_init - initializes irq support
1136  * @dev_priv: i915 device instance
1137  *
1138  * This function initializes all the irq support including work items, timers
1139  * and all the vtables. It does not setup the interrupt itself though.
1140  */
1141 void intel_irq_init(struct drm_i915_private *dev_priv)
1142 {
1143 	int i;
1144 
1145 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1146 	for (i = 0; i < MAX_L3_SLICES; ++i)
1147 		dev_priv->l3_parity.remap_info[i] = NULL;
1148 
1149 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1150 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
1151 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
1152 }
1153 
1154 /**
1155  * intel_irq_fini - deinitializes IRQ support
1156  * @i915: i915 device instance
1157  *
1158  * This function deinitializes all the IRQ support.
1159  */
1160 void intel_irq_fini(struct drm_i915_private *i915)
1161 {
1162 	int i;
1163 
1164 	for (i = 0; i < MAX_L3_SLICES; ++i)
1165 		kfree(i915->l3_parity.remap_info[i]);
1166 }
1167 
1168 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1169 {
1170 	if (HAS_GMCH(dev_priv)) {
1171 		if (IS_CHERRYVIEW(dev_priv))
1172 			return cherryview_irq_handler;
1173 		else if (IS_VALLEYVIEW(dev_priv))
1174 			return valleyview_irq_handler;
1175 		else if (GRAPHICS_VER(dev_priv) == 4)
1176 			return i965_irq_handler;
1177 		else
1178 			return i915_irq_handler;
1179 	} else {
1180 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1181 			return dg1_irq_handler;
1182 		else if (GRAPHICS_VER(dev_priv) >= 11)
1183 			return gen11_irq_handler;
1184 		else if (GRAPHICS_VER(dev_priv) >= 8)
1185 			return gen8_irq_handler;
1186 		else
1187 			return ilk_irq_handler;
1188 	}
1189 }
1190 
1191 static void intel_irq_reset(struct drm_i915_private *dev_priv)
1192 {
1193 	if (HAS_GMCH(dev_priv)) {
1194 		if (IS_CHERRYVIEW(dev_priv))
1195 			cherryview_irq_reset(dev_priv);
1196 		else if (IS_VALLEYVIEW(dev_priv))
1197 			valleyview_irq_reset(dev_priv);
1198 		else if (GRAPHICS_VER(dev_priv) == 4)
1199 			i965_irq_reset(dev_priv);
1200 		else
1201 			i915_irq_reset(dev_priv);
1202 	} else {
1203 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1204 			dg1_irq_reset(dev_priv);
1205 		else if (GRAPHICS_VER(dev_priv) >= 11)
1206 			gen11_irq_reset(dev_priv);
1207 		else if (GRAPHICS_VER(dev_priv) >= 8)
1208 			gen8_irq_reset(dev_priv);
1209 		else
1210 			ilk_irq_reset(dev_priv);
1211 	}
1212 }
1213 
1214 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1215 {
1216 	if (HAS_GMCH(dev_priv)) {
1217 		if (IS_CHERRYVIEW(dev_priv))
1218 			cherryview_irq_postinstall(dev_priv);
1219 		else if (IS_VALLEYVIEW(dev_priv))
1220 			valleyview_irq_postinstall(dev_priv);
1221 		else if (GRAPHICS_VER(dev_priv) == 4)
1222 			i965_irq_postinstall(dev_priv);
1223 		else
1224 			i915_irq_postinstall(dev_priv);
1225 	} else {
1226 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1227 			dg1_irq_postinstall(dev_priv);
1228 		else if (GRAPHICS_VER(dev_priv) >= 11)
1229 			gen11_irq_postinstall(dev_priv);
1230 		else if (GRAPHICS_VER(dev_priv) >= 8)
1231 			gen8_irq_postinstall(dev_priv);
1232 		else
1233 			ilk_irq_postinstall(dev_priv);
1234 	}
1235 }
1236 
1237 /**
1238  * intel_irq_install - enables the hardware interrupt
1239  * @dev_priv: i915 device instance
1240  *
1241  * This function enables the hardware interrupt handling, but leaves the hotplug
1242  * handling still disabled. It is called after intel_irq_init().
1243  *
1244  * In the driver load and resume code we need working interrupts in a few places
1245  * but don't want to deal with the hassle of concurrent probe and hotplug
1246  * workers. Hence the split into this two-stage approach.
1247  */
1248 int intel_irq_install(struct drm_i915_private *dev_priv)
1249 {
1250 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1251 	int ret;
1252 
1253 	/*
1254 	 * We enable some interrupt sources in our postinstall hooks, so mark
1255 	 * interrupts as enabled _before_ actually enabling them to avoid
1256 	 * special cases in our ordering checks.
1257 	 */
1258 	dev_priv->irqs_enabled = true;
1259 
1260 	intel_irq_reset(dev_priv);
1261 
1262 	ret = request_irq(irq, intel_irq_handler(dev_priv),
1263 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
1264 	if (ret < 0) {
1265 		dev_priv->irqs_enabled = false;
1266 		return ret;
1267 	}
1268 
1269 	intel_irq_postinstall(dev_priv);
1270 
1271 	return ret;
1272 }
1273 
1274 /**
1275  * intel_irq_uninstall - finalizes all irq handling
1276  * @dev_priv: i915 device instance
1277  *
1278  * This stops interrupt and hotplug handling and unregisters and frees all
1279  * resources acquired in the init functions.
1280  */
1281 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
1282 {
1283 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1284 
1285 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
1286 		return;
1287 
1288 	intel_irq_reset(dev_priv);
1289 
1290 	free_irq(irq, dev_priv);
1291 
1292 	intel_hpd_cancel_work(dev_priv);
1293 	dev_priv->irqs_enabled = false;
1294 }
1295 
1296 /**
1297  * intel_irq_suspend - Suspend interrupts
1298  * @i915: i915 device instance
1299  *
1300  * This function is used to disable interrupts at runtime.
1301  */
1302 void intel_irq_suspend(struct drm_i915_private *i915)
1303 {
1304 	intel_irq_reset(i915);
1305 	i915->irqs_enabled = false;
1306 	intel_synchronize_irq(i915);
1307 }
1308 
1309 /**
1310  * intel_irq_resume - Resume interrupts
1311  * @i915: i915 device instance
1312  *
1313  * This function is used to enable interrupts at runtime.
1314  */
1315 void intel_irq_resume(struct drm_i915_private *i915)
1316 {
1317 	i915->irqs_enabled = true;
1318 	intel_irq_reset(i915);
1319 	intel_irq_postinstall(i915);
1320 }
1321 
1322 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1323 {
1324 	return dev_priv->irqs_enabled;
1325 }
1326 
1327 void intel_synchronize_irq(struct drm_i915_private *i915)
1328 {
1329 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1330 }
1331 
1332 void intel_synchronize_hardirq(struct drm_i915_private *i915)
1333 {
1334 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1335 }
1336