xref: /linux/drivers/gpu/drm/i915/display/intel_display_irq.c (revision db0d2d7572153490449e360d5ebf298badf5f395)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/drm_vblank.h>
7 
8 #include "gt/intel_rps.h"
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "i915_reg.h"
12 #include "icl_dsi_regs.h"
13 #include "intel_atomic_plane.h"
14 #include "intel_crtc.h"
15 #include "intel_de.h"
16 #include "intel_display_irq.h"
17 #include "intel_display_trace.h"
18 #include "intel_display_types.h"
19 #include "intel_dmc_wl.h"
20 #include "intel_dp_aux.h"
21 #include "intel_dsb.h"
22 #include "intel_fdi_regs.h"
23 #include "intel_fifo_underrun.h"
24 #include "intel_gmbus.h"
25 #include "intel_hotplug_irq.h"
26 #include "intel_pipe_crc_regs.h"
27 #include "intel_pmdemand.h"
28 #include "intel_psr.h"
29 #include "intel_psr_regs.h"
30 #include "intel_uncore.h"
31 
32 static void
33 intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs,
34 			    u32 imr_val, u32 ier_val)
35 {
36 	intel_dmc_wl_get(display, regs.imr);
37 	intel_dmc_wl_get(display, regs.ier);
38 	intel_dmc_wl_get(display, regs.iir);
39 
40 	gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val);
41 
42 	intel_dmc_wl_put(display, regs.iir);
43 	intel_dmc_wl_put(display, regs.ier);
44 	intel_dmc_wl_put(display, regs.imr);
45 }
46 
47 static void
48 intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs)
49 {
50 	intel_dmc_wl_get(display, regs.imr);
51 	intel_dmc_wl_get(display, regs.ier);
52 	intel_dmc_wl_get(display, regs.iir);
53 
54 	gen2_irq_reset(to_intel_uncore(display->drm), regs);
55 
56 	intel_dmc_wl_put(display, regs.iir);
57 	intel_dmc_wl_put(display, regs.ier);
58 	intel_dmc_wl_put(display, regs.imr);
59 }
60 
61 static void
62 intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg)
63 {
64 	intel_dmc_wl_get(display, reg);
65 
66 	gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg);
67 
68 	intel_dmc_wl_put(display, reg);
69 }
70 
71 struct pipe_fault_handler {
72 	bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id);
73 	u32 fault;
74 	enum plane_id plane_id;
75 };
76 
77 static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id)
78 {
79 	struct intel_display *display = to_intel_display(crtc);
80 	struct intel_plane_error error = {};
81 	struct intel_plane *plane;
82 
83 	plane = intel_crtc_get_plane(crtc, plane_id);
84 	if (!plane || !plane->capture_error)
85 		return false;
86 
87 	plane->capture_error(crtc, plane, &error);
88 
89 	drm_err_ratelimited(display->drm,
90 			    "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n",
91 			    crtc->base.base.id, crtc->base.name,
92 			    plane->base.base.id, plane->base.name,
93 			    error.ctl, error.surf, error.surflive);
94 
95 	return true;
96 }
97 
98 static void intel_pipe_fault_irq_handler(struct intel_display *display,
99 					 const struct pipe_fault_handler *handlers,
100 					 enum pipe pipe, u32 fault_errors)
101 {
102 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
103 	const struct pipe_fault_handler *handler;
104 
105 	for (handler = handlers; handler && handler->fault; handler++) {
106 		if ((fault_errors & handler->fault) == 0)
107 			continue;
108 
109 		if (handler->handle(crtc, handler->plane_id))
110 			fault_errors &= ~handler->fault;
111 	}
112 
113 	WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n",
114 		  crtc->base.base.id, crtc->base.name, fault_errors);
115 }
116 
117 static void
118 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
119 {
120 	struct intel_display *display = &dev_priv->display;
121 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
122 
123 	drm_crtc_handle_vblank(&crtc->base);
124 }
125 
126 /**
127  * ilk_update_display_irq - update DEIMR
128  * @dev_priv: driver private
129  * @interrupt_mask: mask of interrupt bits to update
130  * @enabled_irq_mask: mask of interrupt bits to enable
131  */
132 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
133 			    u32 interrupt_mask, u32 enabled_irq_mask)
134 {
135 	struct intel_display *display = &dev_priv->display;
136 	u32 new_val;
137 
138 	lockdep_assert_held(&dev_priv->irq_lock);
139 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
140 
141 	new_val = dev_priv->irq_mask;
142 	new_val &= ~interrupt_mask;
143 	new_val |= (~enabled_irq_mask & interrupt_mask);
144 
145 	if (new_val != dev_priv->irq_mask &&
146 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
147 		dev_priv->irq_mask = new_val;
148 		intel_de_write(display, DEIMR, dev_priv->irq_mask);
149 		intel_de_posting_read(display, DEIMR);
150 	}
151 }
152 
153 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
154 {
155 	ilk_update_display_irq(i915, bits, bits);
156 }
157 
158 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
159 {
160 	ilk_update_display_irq(i915, bits, 0);
161 }
162 
163 /**
164  * bdw_update_port_irq - update DE port interrupt
165  * @dev_priv: driver private
166  * @interrupt_mask: mask of interrupt bits to update
167  * @enabled_irq_mask: mask of interrupt bits to enable
168  */
169 void bdw_update_port_irq(struct drm_i915_private *dev_priv,
170 			 u32 interrupt_mask, u32 enabled_irq_mask)
171 {
172 	struct intel_display *display = &dev_priv->display;
173 	u32 new_val;
174 	u32 old_val;
175 
176 	lockdep_assert_held(&dev_priv->irq_lock);
177 
178 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
179 
180 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
181 		return;
182 
183 	old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
184 
185 	new_val = old_val;
186 	new_val &= ~interrupt_mask;
187 	new_val |= (~enabled_irq_mask & interrupt_mask);
188 
189 	if (new_val != old_val) {
190 		intel_de_write(display, GEN8_DE_PORT_IMR, new_val);
191 		intel_de_posting_read(display, GEN8_DE_PORT_IMR);
192 	}
193 }
194 
195 /**
196  * bdw_update_pipe_irq - update DE pipe interrupt
197  * @dev_priv: driver private
198  * @pipe: pipe whose interrupt to update
199  * @interrupt_mask: mask of interrupt bits to update
200  * @enabled_irq_mask: mask of interrupt bits to enable
201  */
202 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
203 				enum pipe pipe, u32 interrupt_mask,
204 				u32 enabled_irq_mask)
205 {
206 	struct intel_display *display = &dev_priv->display;
207 	u32 new_val;
208 
209 	lockdep_assert_held(&dev_priv->irq_lock);
210 
211 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
212 
213 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
214 		return;
215 
216 	new_val = dev_priv->display.irq.de_irq_mask[pipe];
217 	new_val &= ~interrupt_mask;
218 	new_val |= (~enabled_irq_mask & interrupt_mask);
219 
220 	if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
221 		dev_priv->display.irq.de_irq_mask[pipe] = new_val;
222 		intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
223 		intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
224 	}
225 }
226 
227 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
228 			 enum pipe pipe, u32 bits)
229 {
230 	bdw_update_pipe_irq(i915, pipe, bits, bits);
231 }
232 
233 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
234 			  enum pipe pipe, u32 bits)
235 {
236 	bdw_update_pipe_irq(i915, pipe, bits, 0);
237 }
238 
239 /**
240  * ibx_display_interrupt_update - update SDEIMR
241  * @dev_priv: driver private
242  * @interrupt_mask: mask of interrupt bits to update
243  * @enabled_irq_mask: mask of interrupt bits to enable
244  */
245 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
246 				  u32 interrupt_mask,
247 				  u32 enabled_irq_mask)
248 {
249 	struct intel_display *display = &dev_priv->display;
250 	u32 sdeimr = intel_de_read(display, SDEIMR);
251 
252 	sdeimr &= ~interrupt_mask;
253 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
254 
255 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
256 
257 	lockdep_assert_held(&dev_priv->irq_lock);
258 
259 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
260 		return;
261 
262 	intel_de_write(display, SDEIMR, sdeimr);
263 	intel_de_posting_read(display, SDEIMR);
264 }
265 
266 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
267 {
268 	ibx_display_interrupt_update(i915, bits, bits);
269 }
270 
271 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
272 {
273 	ibx_display_interrupt_update(i915, bits, 0);
274 }
275 
276 u32 i915_pipestat_enable_mask(struct intel_display *display,
277 			      enum pipe pipe)
278 {
279 	struct drm_i915_private *dev_priv = to_i915(display->drm);
280 	u32 status_mask = display->irq.pipestat_irq_mask[pipe];
281 	u32 enable_mask = status_mask << 16;
282 
283 	lockdep_assert_held(&dev_priv->irq_lock);
284 
285 	if (DISPLAY_VER(display) < 5)
286 		goto out;
287 
288 	/*
289 	 * On pipe A we don't support the PSR interrupt yet,
290 	 * on pipe B and C the same bit MBZ.
291 	 */
292 	if (drm_WARN_ON_ONCE(display->drm,
293 			     status_mask & PIPE_A_PSR_STATUS_VLV))
294 		return 0;
295 	/*
296 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
297 	 * A the same bit is for perf counters which we don't use either.
298 	 */
299 	if (drm_WARN_ON_ONCE(display->drm,
300 			     status_mask & PIPE_B_PSR_STATUS_VLV))
301 		return 0;
302 
303 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
304 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
305 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
306 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
307 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
308 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
309 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
310 
311 out:
312 	drm_WARN_ONCE(display->drm,
313 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
314 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
315 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
316 		      pipe_name(pipe), enable_mask, status_mask);
317 
318 	return enable_mask;
319 }
320 
321 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
322 			  enum pipe pipe, u32 status_mask)
323 {
324 	struct intel_display *display = &dev_priv->display;
325 	i915_reg_t reg = PIPESTAT(dev_priv, pipe);
326 	u32 enable_mask;
327 
328 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
329 		      "pipe %c: status_mask=0x%x\n",
330 		      pipe_name(pipe), status_mask);
331 
332 	lockdep_assert_held(&dev_priv->irq_lock);
333 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
334 
335 	if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
336 		return;
337 
338 	dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
339 	enable_mask = i915_pipestat_enable_mask(display, pipe);
340 
341 	intel_de_write(display, reg, enable_mask | status_mask);
342 	intel_de_posting_read(display, reg);
343 }
344 
345 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
346 			   enum pipe pipe, u32 status_mask)
347 {
348 	struct intel_display *display = &dev_priv->display;
349 	i915_reg_t reg = PIPESTAT(dev_priv, pipe);
350 	u32 enable_mask;
351 
352 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
353 		      "pipe %c: status_mask=0x%x\n",
354 		      pipe_name(pipe), status_mask);
355 
356 	lockdep_assert_held(&dev_priv->irq_lock);
357 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
358 
359 	if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
360 		return;
361 
362 	dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
363 	enable_mask = i915_pipestat_enable_mask(display, pipe);
364 
365 	intel_de_write(display, reg, enable_mask | status_mask);
366 	intel_de_posting_read(display, reg);
367 }
368 
369 static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
370 {
371 	struct drm_i915_private *i915 = to_i915(display->drm);
372 
373 	if (IS_I85X(i915))
374 		return true;
375 
376 	if (IS_PINEVIEW(i915))
377 		return true;
378 
379 	return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
380 }
381 
382 /**
383  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
384  * @dev_priv: i915 device private
385  */
386 void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
387 {
388 	struct intel_display *display = &dev_priv->display;
389 
390 	if (!intel_opregion_asle_present(display))
391 		return;
392 
393 	if (!i915_has_legacy_blc_interrupt(display))
394 		return;
395 
396 	spin_lock_irq(&dev_priv->irq_lock);
397 
398 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
399 	if (DISPLAY_VER(dev_priv) >= 4)
400 		i915_enable_pipestat(dev_priv, PIPE_A,
401 				     PIPE_LEGACY_BLC_EVENT_STATUS);
402 
403 	spin_unlock_irq(&dev_priv->irq_lock);
404 }
405 
406 #if IS_ENABLED(CONFIG_DEBUG_FS)
407 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
408 					 enum pipe pipe,
409 					 u32 crc0, u32 crc1,
410 					 u32 crc2, u32 crc3,
411 					 u32 crc4)
412 {
413 	struct intel_display *display = &dev_priv->display;
414 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
415 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
416 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
417 
418 	trace_intel_pipe_crc(crtc, crcs);
419 
420 	spin_lock(&pipe_crc->lock);
421 	/*
422 	 * For some not yet identified reason, the first CRC is
423 	 * bonkers. So let's just wait for the next vblank and read
424 	 * out the buggy result.
425 	 *
426 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
427 	 * don't trust that one either.
428 	 */
429 	if (pipe_crc->skipped <= 0 ||
430 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
431 		pipe_crc->skipped++;
432 		spin_unlock(&pipe_crc->lock);
433 		return;
434 	}
435 	spin_unlock(&pipe_crc->lock);
436 
437 	drm_crtc_add_crc_entry(&crtc->base, true,
438 			       drm_crtc_accurate_vblank_count(&crtc->base),
439 			       crcs);
440 }
441 #else
442 static inline void
443 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
444 			     enum pipe pipe,
445 			     u32 crc0, u32 crc1,
446 			     u32 crc2, u32 crc3,
447 			     u32 crc4) {}
448 #endif
449 
450 static void flip_done_handler(struct drm_i915_private *i915,
451 			      enum pipe pipe)
452 {
453 	struct intel_display *display = &i915->display;
454 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
455 
456 	spin_lock(&i915->drm.event_lock);
457 
458 	if (crtc->flip_done_event) {
459 		trace_intel_crtc_flip_done(crtc);
460 		drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
461 		crtc->flip_done_event = NULL;
462 	}
463 
464 	spin_unlock(&i915->drm.event_lock);
465 }
466 
467 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
468 				     enum pipe pipe)
469 {
470 	struct intel_display *display = &dev_priv->display;
471 
472 	display_pipe_crc_irq_handler(dev_priv, pipe,
473 				     intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
474 				     0, 0, 0, 0);
475 }
476 
477 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
478 				     enum pipe pipe)
479 {
480 	struct intel_display *display = &dev_priv->display;
481 
482 	display_pipe_crc_irq_handler(dev_priv, pipe,
483 				     intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
484 				     intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
485 				     intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
486 				     intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)),
487 				     intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
488 }
489 
490 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
491 				      enum pipe pipe)
492 {
493 	struct intel_display *display = &dev_priv->display;
494 	u32 res1, res2;
495 
496 	if (DISPLAY_VER(dev_priv) >= 3)
497 		res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
498 	else
499 		res1 = 0;
500 
501 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
502 		res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
503 	else
504 		res2 = 0;
505 
506 	display_pipe_crc_irq_handler(dev_priv, pipe,
507 				     intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
508 				     intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
509 				     intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
510 				     res1, res2);
511 }
512 
513 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
514 {
515 	struct intel_display *display = &dev_priv->display;
516 	enum pipe pipe;
517 
518 	for_each_pipe(dev_priv, pipe) {
519 		intel_de_write(display,
520 			       PIPESTAT(dev_priv, pipe),
521 			       PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
522 
523 		dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
524 	}
525 }
526 
527 void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
528 			   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
529 {
530 	struct intel_display *display = &dev_priv->display;
531 	enum pipe pipe;
532 
533 	spin_lock(&dev_priv->irq_lock);
534 
535 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
536 	    !dev_priv->display.irq.vlv_display_irqs_enabled) {
537 		spin_unlock(&dev_priv->irq_lock);
538 		return;
539 	}
540 
541 	for_each_pipe(dev_priv, pipe) {
542 		i915_reg_t reg;
543 		u32 status_mask, enable_mask, iir_bit = 0;
544 
545 		/*
546 		 * PIPESTAT bits get signalled even when the interrupt is
547 		 * disabled with the mask bits, and some of the status bits do
548 		 * not generate interrupts at all (like the underrun bit). Hence
549 		 * we need to be careful that we only handle what we want to
550 		 * handle.
551 		 */
552 
553 		/* fifo underruns are filterered in the underrun handler. */
554 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
555 
556 		switch (pipe) {
557 		default:
558 		case PIPE_A:
559 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
560 			break;
561 		case PIPE_B:
562 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
563 			break;
564 		case PIPE_C:
565 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
566 			break;
567 		}
568 		if (iir & iir_bit)
569 			status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
570 
571 		if (!status_mask)
572 			continue;
573 
574 		reg = PIPESTAT(dev_priv, pipe);
575 		pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
576 		enable_mask = i915_pipestat_enable_mask(display, pipe);
577 
578 		/*
579 		 * Clear the PIPE*STAT regs before the IIR
580 		 *
581 		 * Toggle the enable bits to make sure we get an
582 		 * edge in the ISR pipe event bit if we don't clear
583 		 * all the enabled status bits. Otherwise the edge
584 		 * triggered IIR on i965/g4x wouldn't notice that
585 		 * an interrupt is still pending.
586 		 */
587 		if (pipe_stats[pipe]) {
588 			intel_de_write(display, reg, pipe_stats[pipe]);
589 			intel_de_write(display, reg, enable_mask);
590 		}
591 	}
592 	spin_unlock(&dev_priv->irq_lock);
593 }
594 
595 void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
596 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
597 {
598 	struct intel_display *display = &dev_priv->display;
599 	bool blc_event = false;
600 	enum pipe pipe;
601 
602 	for_each_pipe(dev_priv, pipe) {
603 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
604 			intel_handle_vblank(dev_priv, pipe);
605 
606 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
607 			blc_event = true;
608 
609 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
610 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
611 
612 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
613 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
614 	}
615 
616 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
617 		intel_opregion_asle_intr(display);
618 }
619 
620 void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
621 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
622 {
623 	struct intel_display *display = &dev_priv->display;
624 	bool blc_event = false;
625 	enum pipe pipe;
626 
627 	for_each_pipe(dev_priv, pipe) {
628 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
629 			intel_handle_vblank(dev_priv, pipe);
630 
631 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
632 			blc_event = true;
633 
634 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
635 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
636 
637 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
638 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
639 	}
640 
641 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
642 		intel_opregion_asle_intr(display);
643 
644 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
645 		intel_gmbus_irq_handler(display);
646 }
647 
648 void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
649 				     u32 pipe_stats[I915_MAX_PIPES])
650 {
651 	struct intel_display *display = &dev_priv->display;
652 	enum pipe pipe;
653 
654 	for_each_pipe(dev_priv, pipe) {
655 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
656 			intel_handle_vblank(dev_priv, pipe);
657 
658 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
659 			flip_done_handler(dev_priv, pipe);
660 
661 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
662 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
663 
664 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
665 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
666 	}
667 
668 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
669 		intel_gmbus_irq_handler(display);
670 }
671 
672 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
673 {
674 	struct intel_display *display = &dev_priv->display;
675 	enum pipe pipe;
676 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
677 
678 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
679 
680 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
681 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
682 			       SDE_AUDIO_POWER_SHIFT);
683 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
684 			port_name(port));
685 	}
686 
687 	if (pch_iir & SDE_AUX_MASK)
688 		intel_dp_aux_irq_handler(display);
689 
690 	if (pch_iir & SDE_GMBUS)
691 		intel_gmbus_irq_handler(display);
692 
693 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
694 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
695 
696 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
697 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
698 
699 	if (pch_iir & SDE_POISON)
700 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
701 
702 	if (pch_iir & SDE_FDI_MASK) {
703 		for_each_pipe(dev_priv, pipe)
704 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
705 				pipe_name(pipe),
706 				intel_de_read(display, FDI_RX_IIR(pipe)));
707 	}
708 
709 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
710 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
711 
712 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
713 		drm_dbg(&dev_priv->drm,
714 			"PCH transcoder CRC error interrupt\n");
715 
716 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
717 		intel_pch_fifo_underrun_irq_handler(display, PIPE_A);
718 
719 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
720 		intel_pch_fifo_underrun_irq_handler(display, PIPE_B);
721 }
722 
723 static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe)
724 {
725 	switch (pipe) {
726 	case PIPE_A:
727 		return ERR_INT_SPRITE_A_FAULT |
728 			ERR_INT_PRIMARY_A_FAULT |
729 			ERR_INT_CURSOR_A_FAULT;
730 	case PIPE_B:
731 		return ERR_INT_SPRITE_B_FAULT |
732 			ERR_INT_PRIMARY_B_FAULT |
733 			ERR_INT_CURSOR_B_FAULT;
734 	case PIPE_C:
735 		return ERR_INT_SPRITE_C_FAULT |
736 			ERR_INT_PRIMARY_C_FAULT |
737 			ERR_INT_CURSOR_C_FAULT;
738 	default:
739 		return 0;
740 	}
741 }
742 
743 static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
744 	{ .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
745 	{ .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
746 	{ .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
747 	{ .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
748 	{ .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
749 	{ .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
750 	{ .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
751 	{ .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
752 	{ .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
753 	{}
754 };
755 
756 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
757 {
758 	struct intel_display *display = &dev_priv->display;
759 	u32 err_int = intel_de_read(display, GEN7_ERR_INT);
760 	enum pipe pipe;
761 
762 	if (err_int & ERR_INT_POISON)
763 		drm_err(&dev_priv->drm, "Poison interrupt\n");
764 
765 	if (err_int & ERR_INT_INVALID_GTT_PTE)
766 		drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
767 
768 	if (err_int & ERR_INT_INVALID_PTE_DATA)
769 		drm_err_ratelimited(display->drm, "Invalid PTE data\n");
770 
771 	for_each_pipe(dev_priv, pipe) {
772 		u32 fault_errors;
773 
774 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
775 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
776 
777 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
778 			if (IS_IVYBRIDGE(dev_priv))
779 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
780 			else
781 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
782 		}
783 
784 		fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
785 		if (fault_errors)
786 			intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers,
787 						     pipe, fault_errors);
788 	}
789 
790 	intel_de_write(display, GEN7_ERR_INT, err_int);
791 }
792 
793 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
794 {
795 	struct intel_display *display = &dev_priv->display;
796 	u32 serr_int = intel_de_read(display, SERR_INT);
797 	enum pipe pipe;
798 
799 	if (serr_int & SERR_INT_POISON)
800 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
801 
802 	for_each_pipe(dev_priv, pipe)
803 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
804 			intel_pch_fifo_underrun_irq_handler(display, pipe);
805 
806 	intel_de_write(display, SERR_INT, serr_int);
807 }
808 
809 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
810 {
811 	struct intel_display *display = &dev_priv->display;
812 	enum pipe pipe;
813 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
814 
815 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
816 
817 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
818 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
819 			       SDE_AUDIO_POWER_SHIFT_CPT);
820 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
821 			port_name(port));
822 	}
823 
824 	if (pch_iir & SDE_AUX_MASK_CPT)
825 		intel_dp_aux_irq_handler(display);
826 
827 	if (pch_iir & SDE_GMBUS_CPT)
828 		intel_gmbus_irq_handler(display);
829 
830 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
831 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
832 
833 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
834 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
835 
836 	if (pch_iir & SDE_FDI_MASK_CPT) {
837 		for_each_pipe(dev_priv, pipe)
838 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
839 				pipe_name(pipe),
840 				intel_de_read(display, FDI_RX_IIR(pipe)));
841 	}
842 
843 	if (pch_iir & SDE_ERROR_CPT)
844 		cpt_serr_int_handler(dev_priv);
845 }
846 
847 static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
848 {
849 	switch (pipe) {
850 	case PIPE_A:
851 		return GTT_FAULT_SPRITE_A_FAULT |
852 			GTT_FAULT_PRIMARY_A_FAULT |
853 			GTT_FAULT_CURSOR_A_FAULT;
854 	case PIPE_B:
855 		return GTT_FAULT_SPRITE_B_FAULT |
856 			GTT_FAULT_PRIMARY_B_FAULT |
857 			GTT_FAULT_CURSOR_B_FAULT;
858 	default:
859 		return 0;
860 	}
861 }
862 
863 static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = {
864 	{ .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
865 	{ .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
866 	{ .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
867 	{ .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
868 	{ .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
869 	{ .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
870 	{}
871 };
872 
873 static void ilk_gtt_fault_irq_handler(struct intel_display *display)
874 {
875 	enum pipe pipe;
876 	u32 gtt_fault;
877 
878 	gtt_fault = intel_de_read(display, ILK_GTT_FAULT);
879 	intel_de_write(display, ILK_GTT_FAULT, gtt_fault);
880 
881 	if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE)
882 		drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
883 
884 	if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA)
885 		drm_err_ratelimited(display->drm, "Invalid PTE data\n");
886 
887 	for_each_pipe(display, pipe) {
888 		u32 fault_errors;
889 
890 		fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe);
891 		if (fault_errors)
892 			intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers,
893 						     pipe, fault_errors);
894 	}
895 }
896 
897 void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
898 {
899 	struct intel_display *display = &dev_priv->display;
900 	enum pipe pipe;
901 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
902 
903 	if (hotplug_trigger)
904 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
905 
906 	if (de_iir & DE_AUX_CHANNEL_A)
907 		intel_dp_aux_irq_handler(display);
908 
909 	if (de_iir & DE_GSE)
910 		intel_opregion_asle_intr(display);
911 
912 	if (de_iir & DE_POISON)
913 		drm_err(&dev_priv->drm, "Poison interrupt\n");
914 
915 	if (de_iir & DE_GTT_FAULT)
916 		ilk_gtt_fault_irq_handler(display);
917 
918 	for_each_pipe(dev_priv, pipe) {
919 		if (de_iir & DE_PIPE_VBLANK(pipe))
920 			intel_handle_vblank(dev_priv, pipe);
921 
922 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
923 			flip_done_handler(dev_priv, pipe);
924 
925 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
926 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
927 
928 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
929 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
930 	}
931 
932 	/* check event from PCH */
933 	if (de_iir & DE_PCH_EVENT) {
934 		u32 pch_iir = intel_de_read(display, SDEIIR);
935 
936 		if (HAS_PCH_CPT(dev_priv))
937 			cpt_irq_handler(dev_priv, pch_iir);
938 		else
939 			ibx_irq_handler(dev_priv, pch_iir);
940 
941 		/* should clear PCH hotplug event before clear CPU irq */
942 		intel_de_write(display, SDEIIR, pch_iir);
943 	}
944 
945 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
946 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
947 }
948 
949 void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
950 {
951 	struct intel_display *display = &dev_priv->display;
952 	enum pipe pipe;
953 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
954 
955 	if (hotplug_trigger)
956 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
957 
958 	if (de_iir & DE_ERR_INT_IVB)
959 		ivb_err_int_handler(dev_priv);
960 
961 	if (de_iir & DE_EDP_PSR_INT_HSW) {
962 		struct intel_encoder *encoder;
963 
964 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
965 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
966 			u32 psr_iir;
967 
968 			psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0);
969 			intel_psr_irq_handler(intel_dp, psr_iir);
970 			break;
971 		}
972 	}
973 
974 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
975 		intel_dp_aux_irq_handler(display);
976 
977 	if (de_iir & DE_GSE_IVB)
978 		intel_opregion_asle_intr(display);
979 
980 	for_each_pipe(dev_priv, pipe) {
981 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
982 			intel_handle_vblank(dev_priv, pipe);
983 
984 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
985 			flip_done_handler(dev_priv, pipe);
986 	}
987 
988 	/* check event from PCH */
989 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
990 		u32 pch_iir = intel_de_read(display, SDEIIR);
991 
992 		cpt_irq_handler(dev_priv, pch_iir);
993 
994 		/* clear PCH hotplug event before clear CPU irq */
995 		intel_de_write(display, SDEIIR, pch_iir);
996 	}
997 }
998 
999 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
1000 {
1001 	u32 mask;
1002 
1003 	if (DISPLAY_VER(dev_priv) >= 20)
1004 		return 0;
1005 	else if (DISPLAY_VER(dev_priv) >= 14)
1006 		return TGL_DE_PORT_AUX_DDIA |
1007 			TGL_DE_PORT_AUX_DDIB;
1008 	else if (DISPLAY_VER(dev_priv) >= 13)
1009 		return TGL_DE_PORT_AUX_DDIA |
1010 			TGL_DE_PORT_AUX_DDIB |
1011 			TGL_DE_PORT_AUX_DDIC |
1012 			XELPD_DE_PORT_AUX_DDID |
1013 			XELPD_DE_PORT_AUX_DDIE |
1014 			TGL_DE_PORT_AUX_USBC1 |
1015 			TGL_DE_PORT_AUX_USBC2 |
1016 			TGL_DE_PORT_AUX_USBC3 |
1017 			TGL_DE_PORT_AUX_USBC4;
1018 	else if (DISPLAY_VER(dev_priv) >= 12)
1019 		return TGL_DE_PORT_AUX_DDIA |
1020 			TGL_DE_PORT_AUX_DDIB |
1021 			TGL_DE_PORT_AUX_DDIC |
1022 			TGL_DE_PORT_AUX_USBC1 |
1023 			TGL_DE_PORT_AUX_USBC2 |
1024 			TGL_DE_PORT_AUX_USBC3 |
1025 			TGL_DE_PORT_AUX_USBC4 |
1026 			TGL_DE_PORT_AUX_USBC5 |
1027 			TGL_DE_PORT_AUX_USBC6;
1028 
1029 	mask = GEN8_AUX_CHANNEL_A;
1030 	if (DISPLAY_VER(dev_priv) >= 9)
1031 		mask |= GEN9_AUX_CHANNEL_B |
1032 			GEN9_AUX_CHANNEL_C |
1033 			GEN9_AUX_CHANNEL_D;
1034 
1035 	if (DISPLAY_VER(dev_priv) == 11) {
1036 		mask |= ICL_AUX_CHANNEL_F;
1037 		mask |= ICL_AUX_CHANNEL_E;
1038 	}
1039 
1040 	return mask;
1041 }
1042 
1043 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
1044 {
1045 	struct intel_display *display = &dev_priv->display;
1046 
1047 	if (DISPLAY_VER(display) >= 14)
1048 		return MTL_PIPEDMC_ATS_FAULT |
1049 			MTL_PLANE_ATS_FAULT |
1050 			GEN12_PIPEDMC_FAULT |
1051 			GEN9_PIPE_CURSOR_FAULT |
1052 			GEN11_PIPE_PLANE5_FAULT |
1053 			GEN9_PIPE_PLANE4_FAULT |
1054 			GEN9_PIPE_PLANE3_FAULT |
1055 			GEN9_PIPE_PLANE2_FAULT |
1056 			GEN9_PIPE_PLANE1_FAULT;
1057 	else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
1058 		return GEN12_PIPEDMC_FAULT |
1059 			GEN9_PIPE_CURSOR_FAULT |
1060 			GEN11_PIPE_PLANE5_FAULT |
1061 			GEN9_PIPE_PLANE4_FAULT |
1062 			GEN9_PIPE_PLANE3_FAULT |
1063 			GEN9_PIPE_PLANE2_FAULT |
1064 			GEN9_PIPE_PLANE1_FAULT;
1065 	else if (DISPLAY_VER(display) == 12)
1066 		return GEN12_PIPEDMC_FAULT |
1067 			GEN9_PIPE_CURSOR_FAULT |
1068 			GEN11_PIPE_PLANE7_FAULT |
1069 			GEN11_PIPE_PLANE6_FAULT |
1070 			GEN11_PIPE_PLANE5_FAULT |
1071 			GEN9_PIPE_PLANE4_FAULT |
1072 			GEN9_PIPE_PLANE3_FAULT |
1073 			GEN9_PIPE_PLANE2_FAULT |
1074 			GEN9_PIPE_PLANE1_FAULT;
1075 	else if (DISPLAY_VER(display) == 11)
1076 		return GEN9_PIPE_CURSOR_FAULT |
1077 			GEN11_PIPE_PLANE7_FAULT |
1078 			GEN11_PIPE_PLANE6_FAULT |
1079 			GEN11_PIPE_PLANE5_FAULT |
1080 			GEN9_PIPE_PLANE4_FAULT |
1081 			GEN9_PIPE_PLANE3_FAULT |
1082 			GEN9_PIPE_PLANE2_FAULT |
1083 			GEN9_PIPE_PLANE1_FAULT;
1084 	else if (DISPLAY_VER(display) >= 9)
1085 		return GEN9_PIPE_CURSOR_FAULT |
1086 			GEN9_PIPE_PLANE4_FAULT |
1087 			GEN9_PIPE_PLANE3_FAULT |
1088 			GEN9_PIPE_PLANE2_FAULT |
1089 			GEN9_PIPE_PLANE1_FAULT;
1090 	else
1091 		return GEN8_PIPE_CURSOR_FAULT |
1092 			GEN8_PIPE_SPRITE_FAULT |
1093 			GEN8_PIPE_PRIMARY_FAULT;
1094 }
1095 
1096 static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
1097 {
1098 	struct intel_display *display = to_intel_display(crtc);
1099 
1100 	drm_err_ratelimited(display->drm,
1101 			    "[CRTC:%d:%s] PLANE ATS fault\n",
1102 			    crtc->base.base.id, crtc->base.name);
1103 
1104 	return true;
1105 }
1106 
1107 static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
1108 {
1109 	struct intel_display *display = to_intel_display(crtc);
1110 
1111 	drm_err_ratelimited(display->drm,
1112 			    "[CRTC:%d:%s] PIPEDMC ATS fault\n",
1113 			    crtc->base.base.id, crtc->base.name);
1114 
1115 	return true;
1116 }
1117 
1118 static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id)
1119 {
1120 	struct intel_display *display = to_intel_display(crtc);
1121 
1122 	drm_err_ratelimited(display->drm,
1123 			    "[CRTC:%d:%s] PIPEDMC fault\n",
1124 			    crtc->base.base.id, crtc->base.name);
1125 
1126 	return true;
1127 }
1128 
1129 static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = {
1130 	{ .fault = MTL_PLANE_ATS_FAULT,     .handle = handle_plane_ats_fault, },
1131 	{ .fault = MTL_PIPEDMC_ATS_FAULT,   .handle = handle_pipedmc_ats_fault, },
1132 	{ .fault = GEN12_PIPEDMC_FAULT,     .handle = handle_pipedmc_fault, },
1133 	{ .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
1134 	{ .fault = GEN9_PIPE_PLANE4_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_4, },
1135 	{ .fault = GEN9_PIPE_PLANE3_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_3, },
1136 	{ .fault = GEN9_PIPE_PLANE2_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_2, },
1137 	{ .fault = GEN9_PIPE_PLANE1_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_1, },
1138 	{ .fault = GEN9_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1139 	{}
1140 };
1141 
1142 static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = {
1143 	{ .fault = GEN12_PIPEDMC_FAULT,     .handle = handle_pipedmc_fault, },
1144 	{ .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
1145 	{ .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
1146 	{ .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
1147 	{ .fault = GEN9_PIPE_PLANE4_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_4, },
1148 	{ .fault = GEN9_PIPE_PLANE3_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_3, },
1149 	{ .fault = GEN9_PIPE_PLANE2_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_2, },
1150 	{ .fault = GEN9_PIPE_PLANE1_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_1, },
1151 	{ .fault = GEN9_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1152 	{}
1153 };
1154 
1155 static const struct pipe_fault_handler icl_pipe_fault_handlers[] = {
1156 	{ .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
1157 	{ .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
1158 	{ .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
1159 	{ .fault = GEN9_PIPE_PLANE4_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_4, },
1160 	{ .fault = GEN9_PIPE_PLANE3_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_3, },
1161 	{ .fault = GEN9_PIPE_PLANE2_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_2, },
1162 	{ .fault = GEN9_PIPE_PLANE1_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_1, },
1163 	{ .fault = GEN9_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1164 	{}
1165 };
1166 
1167 static const struct pipe_fault_handler skl_pipe_fault_handlers[] = {
1168 	{ .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
1169 	{ .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
1170 	{ .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
1171 	{ .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
1172 	{ .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1173 	{}
1174 };
1175 
1176 static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = {
1177 	{ .fault = GEN8_PIPE_SPRITE_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1178 	{ .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1179 	{ .fault = GEN8_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1180 	{}
1181 };
1182 
1183 static const struct pipe_fault_handler *
1184 gen8_pipe_fault_handlers(struct intel_display *display)
1185 {
1186 	if (DISPLAY_VER(display) >= 14)
1187 		return mtl_pipe_fault_handlers;
1188 	else if (DISPLAY_VER(display) >= 12)
1189 		return tgl_pipe_fault_handlers;
1190 	else if (DISPLAY_VER(display) >= 11)
1191 		return icl_pipe_fault_handlers;
1192 	else if (DISPLAY_VER(display) >= 9)
1193 		return skl_pipe_fault_handlers;
1194 	else
1195 		return bdw_pipe_fault_handlers;
1196 }
1197 
1198 static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
1199 {
1200 	wake_up_all(&dev_priv->display.pmdemand.waitqueue);
1201 }
1202 
1203 static void
1204 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1205 {
1206 	struct intel_display *display = &dev_priv->display;
1207 	bool found = false;
1208 
1209 	if (HAS_DBUF_OVERLAP_DETECTION(display)) {
1210 		if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) {
1211 			drm_warn(display->drm,  "DBuf overlap detected\n");
1212 			found = true;
1213 		}
1214 	}
1215 
1216 	if (DISPLAY_VER(dev_priv) >= 14) {
1217 		if (iir & (XELPDP_PMDEMAND_RSP |
1218 			   XELPDP_PMDEMAND_RSPTOUT_ERR)) {
1219 			if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
1220 				drm_dbg(&dev_priv->drm,
1221 					"Error waiting for Punit PM Demand Response\n");
1222 
1223 			intel_pmdemand_irq_handler(dev_priv);
1224 			found = true;
1225 		}
1226 
1227 		if (iir & XELPDP_RM_TIMEOUT) {
1228 			u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
1229 			drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
1230 			found = true;
1231 		}
1232 	} else if (iir & GEN8_DE_MISC_GSE) {
1233 		intel_opregion_asle_intr(display);
1234 		found = true;
1235 	}
1236 
1237 	if (iir & GEN8_DE_EDP_PSR) {
1238 		struct intel_encoder *encoder;
1239 		u32 psr_iir;
1240 		i915_reg_t iir_reg;
1241 
1242 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1243 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1244 
1245 			if (DISPLAY_VER(dev_priv) >= 12)
1246 				iir_reg = TRANS_PSR_IIR(dev_priv,
1247 						        intel_dp->psr.transcoder);
1248 			else
1249 				iir_reg = EDP_PSR_IIR;
1250 
1251 			psr_iir = intel_de_rmw(display, iir_reg, 0, 0);
1252 
1253 			if (psr_iir)
1254 				found = true;
1255 
1256 			intel_psr_irq_handler(intel_dp, psr_iir);
1257 
1258 			/* prior GEN12 only have one EDP PSR */
1259 			if (DISPLAY_VER(dev_priv) < 12)
1260 				break;
1261 		}
1262 	}
1263 
1264 	if (!found)
1265 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
1266 }
1267 
1268 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
1269 					   u32 te_trigger)
1270 {
1271 	struct intel_display *display = &dev_priv->display;
1272 	enum pipe pipe = INVALID_PIPE;
1273 	enum transcoder dsi_trans;
1274 	enum port port;
1275 	u32 val;
1276 
1277 	/*
1278 	 * Incase of dual link, TE comes from DSI_1
1279 	 * this is to check if dual link is enabled
1280 	 */
1281 	val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
1282 	val &= PORT_SYNC_MODE_ENABLE;
1283 
1284 	/*
1285 	 * if dual link is enabled, then read DSI_0
1286 	 * transcoder registers
1287 	 */
1288 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
1289 						  PORT_A : PORT_B;
1290 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
1291 
1292 	/* Check if DSI configured in command mode */
1293 	val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
1294 	val = val & OP_MODE_MASK;
1295 
1296 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
1297 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
1298 		return;
1299 	}
1300 
1301 	/* Get PIPE for handling VBLANK event */
1302 	val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
1303 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
1304 	case TRANS_DDI_EDP_INPUT_A_ON:
1305 		pipe = PIPE_A;
1306 		break;
1307 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
1308 		pipe = PIPE_B;
1309 		break;
1310 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
1311 		pipe = PIPE_C;
1312 		break;
1313 	default:
1314 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
1315 		return;
1316 	}
1317 
1318 	intel_handle_vblank(dev_priv, pipe);
1319 
1320 	/* clear TE in dsi IIR */
1321 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
1322 	intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
1323 }
1324 
1325 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
1326 {
1327 	if (DISPLAY_VER(i915) >= 9)
1328 		return GEN9_PIPE_PLANE1_FLIP_DONE;
1329 	else
1330 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
1331 }
1332 
1333 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
1334 {
1335 	struct intel_display *display = &i915->display;
1336 	u32 pica_ier = 0;
1337 
1338 	*pica_iir = 0;
1339 	*pch_iir = intel_de_read(display, SDEIIR);
1340 	if (!*pch_iir)
1341 		return;
1342 
1343 	/**
1344 	 * PICA IER must be disabled/re-enabled around clearing PICA IIR and
1345 	 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
1346 	 * their flags both in the PICA and SDE IIR.
1347 	 */
1348 	if (*pch_iir & SDE_PICAINTERRUPT) {
1349 		drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
1350 
1351 		pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
1352 		*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
1353 		intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir);
1354 	}
1355 
1356 	intel_de_write(display, SDEIIR, *pch_iir);
1357 
1358 	if (pica_ier)
1359 		intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
1360 }
1361 
1362 void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
1363 {
1364 	struct intel_display *display = &dev_priv->display;
1365 	u32 iir;
1366 	enum pipe pipe;
1367 
1368 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
1369 
1370 	if (master_ctl & GEN8_DE_MISC_IRQ) {
1371 		iir = intel_de_read(display, GEN8_DE_MISC_IIR);
1372 		if (iir) {
1373 			intel_de_write(display, GEN8_DE_MISC_IIR, iir);
1374 			gen8_de_misc_irq_handler(dev_priv, iir);
1375 		} else {
1376 			drm_err_ratelimited(&dev_priv->drm,
1377 					    "The master control interrupt lied (DE MISC)!\n");
1378 		}
1379 	}
1380 
1381 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
1382 		iir = intel_de_read(display, GEN11_DE_HPD_IIR);
1383 		if (iir) {
1384 			intel_de_write(display, GEN11_DE_HPD_IIR, iir);
1385 			gen11_hpd_irq_handler(dev_priv, iir);
1386 		} else {
1387 			drm_err_ratelimited(&dev_priv->drm,
1388 					    "The master control interrupt lied, (DE HPD)!\n");
1389 		}
1390 	}
1391 
1392 	if (master_ctl & GEN8_DE_PORT_IRQ) {
1393 		iir = intel_de_read(display, GEN8_DE_PORT_IIR);
1394 		if (iir) {
1395 			bool found = false;
1396 
1397 			intel_de_write(display, GEN8_DE_PORT_IIR, iir);
1398 
1399 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
1400 				intel_dp_aux_irq_handler(display);
1401 				found = true;
1402 			}
1403 
1404 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
1405 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
1406 
1407 				if (hotplug_trigger) {
1408 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
1409 					found = true;
1410 				}
1411 			} else if (IS_BROADWELL(dev_priv)) {
1412 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
1413 
1414 				if (hotplug_trigger) {
1415 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1416 					found = true;
1417 				}
1418 			}
1419 
1420 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
1421 			    (iir & BXT_DE_PORT_GMBUS)) {
1422 				intel_gmbus_irq_handler(display);
1423 				found = true;
1424 			}
1425 
1426 			if (DISPLAY_VER(dev_priv) >= 11) {
1427 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
1428 
1429 				if (te_trigger) {
1430 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
1431 					found = true;
1432 				}
1433 			}
1434 
1435 			if (!found)
1436 				drm_err_ratelimited(&dev_priv->drm,
1437 						    "Unexpected DE Port interrupt\n");
1438 		} else {
1439 			drm_err_ratelimited(&dev_priv->drm,
1440 					    "The master control interrupt lied (DE PORT)!\n");
1441 		}
1442 	}
1443 
1444 	for_each_pipe(dev_priv, pipe) {
1445 		u32 fault_errors;
1446 
1447 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1448 			continue;
1449 
1450 		iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
1451 		if (!iir) {
1452 			drm_err_ratelimited(&dev_priv->drm,
1453 					    "The master control interrupt lied (DE PIPE)!\n");
1454 			continue;
1455 		}
1456 
1457 		intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
1458 
1459 		if (iir & GEN8_PIPE_VBLANK)
1460 			intel_handle_vblank(dev_priv, pipe);
1461 
1462 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
1463 			flip_done_handler(dev_priv, pipe);
1464 
1465 		if (HAS_DSB(dev_priv)) {
1466 			if (iir & GEN12_DSB_INT(INTEL_DSB_0))
1467 				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
1468 
1469 			if (iir & GEN12_DSB_INT(INTEL_DSB_1))
1470 				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
1471 
1472 			if (iir & GEN12_DSB_INT(INTEL_DSB_2))
1473 				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
1474 		}
1475 
1476 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
1477 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
1478 
1479 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
1480 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
1481 
1482 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
1483 		if (fault_errors)
1484 			intel_pipe_fault_irq_handler(display,
1485 						     gen8_pipe_fault_handlers(display),
1486 						     pipe, fault_errors);
1487 	}
1488 
1489 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
1490 	    master_ctl & GEN8_DE_PCH_IRQ) {
1491 		u32 pica_iir;
1492 
1493 		/*
1494 		 * FIXME(BDW): Assume for now that the new interrupt handling
1495 		 * scheme also closed the SDE interrupt handling race we've seen
1496 		 * on older pch-split platforms. But this needs testing.
1497 		 */
1498 		gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
1499 		if (iir) {
1500 			if (pica_iir)
1501 				xelpdp_pica_irq_handler(dev_priv, pica_iir);
1502 
1503 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1504 				icp_irq_handler(dev_priv, iir);
1505 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
1506 				spt_irq_handler(dev_priv, iir);
1507 			else
1508 				cpt_irq_handler(dev_priv, iir);
1509 		} else {
1510 			/*
1511 			 * Like on previous PCH there seems to be something
1512 			 * fishy going on with forwarding PCH interrupts.
1513 			 */
1514 			drm_dbg(&dev_priv->drm,
1515 				"The master control interrupt lied (SDE)!\n");
1516 		}
1517 	}
1518 }
1519 
1520 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
1521 {
1522 	struct intel_display *display = &i915->display;
1523 	u32 iir;
1524 
1525 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
1526 		return 0;
1527 
1528 	iir = intel_de_read(display, GEN11_GU_MISC_IIR);
1529 	if (likely(iir))
1530 		intel_de_write(display, GEN11_GU_MISC_IIR, iir);
1531 
1532 	return iir;
1533 }
1534 
1535 void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
1536 {
1537 	struct intel_display *display = &i915->display;
1538 
1539 	if (iir & GEN11_GU_MISC_GSE)
1540 		intel_opregion_asle_intr(display);
1541 }
1542 
1543 void gen11_display_irq_handler(struct drm_i915_private *i915)
1544 {
1545 	struct intel_display *display = &i915->display;
1546 	u32 disp_ctl;
1547 
1548 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1549 	/*
1550 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
1551 	 * for the display related bits.
1552 	 */
1553 	disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
1554 
1555 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
1556 	gen8_de_irq_handler(i915, disp_ctl);
1557 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
1558 
1559 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1560 }
1561 
1562 static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
1563 {
1564 	struct intel_display *display = &i915->display;
1565 	lockdep_assert_held(&i915->drm.vblank_time_lock);
1566 
1567 	/*
1568 	 * Vblank/CRC interrupts fail to wake the device up from C2+.
1569 	 * Disabling render clock gating during C-states avoids
1570 	 * the problem. There is a small power cost so we do this
1571 	 * only when vblank/CRC interrupts are actually enabled.
1572 	 */
1573 	if (i915->display.irq.vblank_enabled++ == 0)
1574 		intel_de_write(display, SCPD0,
1575 			       _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1576 }
1577 
1578 static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
1579 {
1580 	struct intel_display *display = &i915->display;
1581 	lockdep_assert_held(&i915->drm.vblank_time_lock);
1582 
1583 	if (--i915->display.irq.vblank_enabled == 0)
1584 		intel_de_write(display, SCPD0,
1585 			       _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1586 }
1587 
1588 void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
1589 {
1590 	spin_lock_irq(&i915->drm.vblank_time_lock);
1591 
1592 	if (enable)
1593 		i915gm_irq_cstate_wa_enable(i915);
1594 	else
1595 		i915gm_irq_cstate_wa_disable(i915);
1596 
1597 	spin_unlock_irq(&i915->drm.vblank_time_lock);
1598 }
1599 
1600 int i8xx_enable_vblank(struct drm_crtc *crtc)
1601 {
1602 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1603 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1604 	unsigned long irqflags;
1605 
1606 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1607 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1608 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1609 
1610 	return 0;
1611 }
1612 
1613 void i8xx_disable_vblank(struct drm_crtc *crtc)
1614 {
1615 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1616 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1617 	unsigned long irqflags;
1618 
1619 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1620 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1621 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1622 }
1623 
1624 int i915gm_enable_vblank(struct drm_crtc *crtc)
1625 {
1626 	struct drm_i915_private *i915 = to_i915(crtc->dev);
1627 
1628 	i915gm_irq_cstate_wa_enable(i915);
1629 
1630 	return i8xx_enable_vblank(crtc);
1631 }
1632 
1633 void i915gm_disable_vblank(struct drm_crtc *crtc)
1634 {
1635 	struct drm_i915_private *i915 = to_i915(crtc->dev);
1636 
1637 	i8xx_disable_vblank(crtc);
1638 
1639 	i915gm_irq_cstate_wa_disable(i915);
1640 }
1641 
1642 int i965_enable_vblank(struct drm_crtc *crtc)
1643 {
1644 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1645 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1646 	unsigned long irqflags;
1647 
1648 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1649 	i915_enable_pipestat(dev_priv, pipe,
1650 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
1651 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1652 
1653 	return 0;
1654 }
1655 
1656 void i965_disable_vblank(struct drm_crtc *crtc)
1657 {
1658 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1659 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1660 	unsigned long irqflags;
1661 
1662 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1663 	i915_disable_pipestat(dev_priv, pipe,
1664 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
1665 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1666 }
1667 
1668 int ilk_enable_vblank(struct drm_crtc *crtc)
1669 {
1670 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1671 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1672 	unsigned long irqflags;
1673 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1674 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1675 
1676 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1677 	ilk_enable_display_irq(dev_priv, bit);
1678 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1679 
1680 	/* Even though there is no DMC, frame counter can get stuck when
1681 	 * PSR is active as no frames are generated.
1682 	 */
1683 	if (HAS_PSR(dev_priv))
1684 		drm_crtc_vblank_restore(crtc);
1685 
1686 	return 0;
1687 }
1688 
1689 void ilk_disable_vblank(struct drm_crtc *crtc)
1690 {
1691 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1692 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1693 	unsigned long irqflags;
1694 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1695 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1696 
1697 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1698 	ilk_disable_display_irq(dev_priv, bit);
1699 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1700 }
1701 
1702 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
1703 				   bool enable)
1704 {
1705 	struct intel_display *display = to_intel_display(intel_crtc);
1706 	enum port port;
1707 
1708 	if (!(intel_crtc->mode_flags &
1709 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
1710 		return false;
1711 
1712 	/* for dual link cases we consider TE from slave */
1713 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
1714 		port = PORT_B;
1715 	else
1716 		port = PORT_A;
1717 
1718 	intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT);
1719 
1720 	intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
1721 
1722 	return true;
1723 }
1724 
1725 static void intel_display_vblank_dc_work(struct work_struct *work)
1726 {
1727 	struct intel_display *display =
1728 		container_of(work, typeof(*display), irq.vblank_dc_work);
1729 	int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
1730 
1731 	/*
1732 	 * NOTE: intel_display_power_set_target_dc_state is used only by PSR
1733 	 * code for DC3CO handling. DC3CO target state is currently disabled in
1734 	 * PSR code. If DC3CO is taken into use we need take that into account
1735 	 * here as well.
1736 	 */
1737 	intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
1738 						DC_STATE_EN_UPTO_DC6);
1739 }
1740 
1741 int bdw_enable_vblank(struct drm_crtc *_crtc)
1742 {
1743 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1744 	struct intel_display *display = to_intel_display(crtc);
1745 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1746 	enum pipe pipe = crtc->pipe;
1747 	unsigned long irqflags;
1748 
1749 	if (gen11_dsi_configure_te(crtc, true))
1750 		return 0;
1751 
1752 	if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
1753 		schedule_work(&display->irq.vblank_dc_work);
1754 
1755 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1756 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1757 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1758 
1759 	/* Even if there is no DMC, frame counter can get stuck when
1760 	 * PSR is active as no frames are generated, so check only for PSR.
1761 	 */
1762 	if (HAS_PSR(dev_priv))
1763 		drm_crtc_vblank_restore(&crtc->base);
1764 
1765 	return 0;
1766 }
1767 
1768 void bdw_disable_vblank(struct drm_crtc *_crtc)
1769 {
1770 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1771 	struct intel_display *display = to_intel_display(crtc);
1772 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1773 	enum pipe pipe = crtc->pipe;
1774 	unsigned long irqflags;
1775 
1776 	if (gen11_dsi_configure_te(crtc, false))
1777 		return;
1778 
1779 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1780 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1781 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1782 
1783 	if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
1784 		schedule_work(&display->irq.vblank_dc_work);
1785 }
1786 
1787 static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
1788 {
1789 	switch (pipe) {
1790 	case PIPE_A:
1791 		return SPRITEB_INVALID_GTT_STATUS |
1792 			SPRITEA_INVALID_GTT_STATUS |
1793 			PLANEA_INVALID_GTT_STATUS |
1794 			CURSORA_INVALID_GTT_STATUS;
1795 	case PIPE_B:
1796 		return SPRITED_INVALID_GTT_STATUS |
1797 			SPRITEC_INVALID_GTT_STATUS |
1798 			PLANEB_INVALID_GTT_STATUS |
1799 			CURSORB_INVALID_GTT_STATUS;
1800 	case PIPE_C:
1801 		return SPRITEF_INVALID_GTT_STATUS |
1802 			SPRITEE_INVALID_GTT_STATUS |
1803 			PLANEC_INVALID_GTT_STATUS |
1804 			CURSORC_INVALID_GTT_STATUS;
1805 	default:
1806 		return 0;
1807 	}
1808 }
1809 
1810 static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = {
1811 	{ .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
1812 	{ .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1813 	{ .fault = PLANEA_INVALID_GTT_STATUS,  .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1814 	{ .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR,  },
1815 	{ .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
1816 	{ .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1817 	{ .fault = PLANEB_INVALID_GTT_STATUS,  .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1818 	{ .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR,  },
1819 	{ .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
1820 	{ .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1821 	{ .fault = PLANEC_INVALID_GTT_STATUS,  .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1822 	{ .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR,  },
1823 	{}
1824 };
1825 
1826 static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt)
1827 {
1828 	u32 status, enable, tmp;
1829 
1830 	tmp = intel_de_read(display, DPINVGTT);
1831 
1832 	enable = tmp >> 16;
1833 	status = tmp & 0xffff;
1834 
1835 	/*
1836 	 * Despite what the docs claim, the status bits seem to get
1837 	 * stuck permanently (similar the old PGTBL_ER register), so
1838 	 * we have to disable and ignore them once set. They do get
1839 	 * reset if the display power well goes down, so no need to
1840 	 * track the enable mask explicitly.
1841 	 */
1842 	*dpinvgtt = status & enable;
1843 	enable &= ~status;
1844 
1845 	/* customary ack+disable then re-enable to guarantee an edge */
1846 	intel_de_write(display, DPINVGTT, status);
1847 	intel_de_write(display, DPINVGTT, enable << 16);
1848 }
1849 
1850 static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt)
1851 {
1852 	enum pipe pipe;
1853 
1854 	for_each_pipe(display, pipe) {
1855 		u32 fault_errors;
1856 
1857 		fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe);
1858 		if (fault_errors)
1859 			intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers,
1860 						     pipe, fault_errors);
1861 	}
1862 }
1863 
1864 void vlv_display_error_irq_ack(struct intel_display *display,
1865 			       u32 *eir, u32 *dpinvgtt)
1866 {
1867 	u32 emr;
1868 
1869 	*eir = intel_de_read(display, VLV_EIR);
1870 
1871 	if (*eir & VLV_ERROR_PAGE_TABLE)
1872 		vlv_page_table_error_irq_ack(display, dpinvgtt);
1873 
1874 	intel_de_write(display, VLV_EIR, *eir);
1875 
1876 	/*
1877 	 * Toggle all EMR bits to make sure we get an edge
1878 	 * in the ISR master error bit if we don't clear
1879 	 * all the EIR bits.
1880 	 */
1881 	emr = intel_de_read(display, VLV_EMR);
1882 	intel_de_write(display, VLV_EMR, 0xffffffff);
1883 	intel_de_write(display, VLV_EMR, emr);
1884 }
1885 
1886 void vlv_display_error_irq_handler(struct intel_display *display,
1887 				   u32 eir, u32 dpinvgtt)
1888 {
1889 	drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir);
1890 
1891 	if (eir & VLV_ERROR_PAGE_TABLE)
1892 		vlv_page_table_error_irq_handler(display, dpinvgtt);
1893 }
1894 
1895 static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
1896 {
1897 	struct intel_display *display = &dev_priv->display;
1898 
1899 	if (IS_CHERRYVIEW(dev_priv))
1900 		intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
1901 	else
1902 		intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
1903 
1904 	gen2_error_reset(to_intel_uncore(display->drm),
1905 			 VLV_ERROR_REGS);
1906 
1907 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
1908 	intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
1909 
1910 	i9xx_pipestat_irq_reset(dev_priv);
1911 
1912 	intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
1913 	dev_priv->irq_mask = ~0u;
1914 }
1915 
1916 void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
1917 {
1918 	if (dev_priv->display.irq.vlv_display_irqs_enabled)
1919 		_vlv_display_irq_reset(dev_priv);
1920 }
1921 
1922 void i9xx_display_irq_reset(struct drm_i915_private *i915)
1923 {
1924 	struct intel_display *display = &i915->display;
1925 
1926 	if (I915_HAS_HOTPLUG(i915)) {
1927 		i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
1928 		intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
1929 	}
1930 
1931 	i9xx_pipestat_irq_reset(i915);
1932 }
1933 
1934 static u32 vlv_error_mask(void)
1935 {
1936 	/* TODO enable other errors too? */
1937 	return VLV_ERROR_PAGE_TABLE;
1938 }
1939 
1940 void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
1941 {
1942 	struct intel_display *display = &dev_priv->display;
1943 	u32 pipestat_mask;
1944 	u32 enable_mask;
1945 	enum pipe pipe;
1946 
1947 	if (!dev_priv->display.irq.vlv_display_irqs_enabled)
1948 		return;
1949 
1950 	if (IS_CHERRYVIEW(dev_priv))
1951 		intel_de_write(display, DPINVGTT,
1952 			       DPINVGTT_STATUS_MASK_CHV |
1953 			       DPINVGTT_EN_MASK_CHV);
1954 	else
1955 		intel_de_write(display, DPINVGTT,
1956 			       DPINVGTT_STATUS_MASK_VLV |
1957 			       DPINVGTT_EN_MASK_VLV);
1958 
1959 	gen2_error_init(to_intel_uncore(display->drm),
1960 			VLV_ERROR_REGS, ~vlv_error_mask());
1961 
1962 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
1963 
1964 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1965 	for_each_pipe(dev_priv, pipe)
1966 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
1967 
1968 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
1969 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1970 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1971 		I915_LPE_PIPE_A_INTERRUPT |
1972 		I915_LPE_PIPE_B_INTERRUPT |
1973 		I915_MASTER_ERROR_INTERRUPT;
1974 
1975 	if (IS_CHERRYVIEW(dev_priv))
1976 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
1977 			I915_LPE_PIPE_C_INTERRUPT;
1978 
1979 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
1980 
1981 	dev_priv->irq_mask = ~enable_mask;
1982 
1983 	intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
1984 }
1985 
1986 void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
1987 {
1988 	struct intel_display *display = &dev_priv->display;
1989 	enum pipe pipe;
1990 
1991 	if (!HAS_DISPLAY(dev_priv))
1992 		return;
1993 
1994 	intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
1995 	intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
1996 
1997 	for_each_pipe(dev_priv, pipe)
1998 		if (intel_display_power_is_enabled(display,
1999 						   POWER_DOMAIN_PIPE(pipe)))
2000 			intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
2001 
2002 	intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
2003 	intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
2004 }
2005 
2006 void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2007 {
2008 	struct intel_display *display = &dev_priv->display;
2009 	enum pipe pipe;
2010 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2011 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2012 
2013 	if (!HAS_DISPLAY(dev_priv))
2014 		return;
2015 
2016 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
2017 
2018 	if (DISPLAY_VER(dev_priv) >= 12) {
2019 		enum transcoder trans;
2020 
2021 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2022 			enum intel_display_power_domain domain;
2023 
2024 			domain = POWER_DOMAIN_TRANSCODER(trans);
2025 			if (!intel_display_power_is_enabled(display, domain))
2026 				continue;
2027 
2028 			intel_de_write(display,
2029 				       TRANS_PSR_IMR(dev_priv, trans),
2030 				       0xffffffff);
2031 			intel_de_write(display,
2032 				       TRANS_PSR_IIR(dev_priv, trans),
2033 				       0xffffffff);
2034 		}
2035 	} else {
2036 		intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
2037 		intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
2038 	}
2039 
2040 	for_each_pipe(dev_priv, pipe)
2041 		if (intel_display_power_is_enabled(display,
2042 						   POWER_DOMAIN_PIPE(pipe)))
2043 			intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
2044 
2045 	intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
2046 	intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
2047 
2048 	if (DISPLAY_VER(dev_priv) >= 14)
2049 		intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
2050 	else
2051 		intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
2052 
2053 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2054 		intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
2055 }
2056 
2057 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2058 				     u8 pipe_mask)
2059 {
2060 	struct intel_display *display = &dev_priv->display;
2061 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
2062 		gen8_de_pipe_flip_done_mask(dev_priv);
2063 	enum pipe pipe;
2064 
2065 	spin_lock_irq(&dev_priv->irq_lock);
2066 
2067 	if (!intel_irqs_enabled(dev_priv)) {
2068 		spin_unlock_irq(&dev_priv->irq_lock);
2069 		return;
2070 	}
2071 
2072 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2073 		intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
2074 					    dev_priv->display.irq.de_irq_mask[pipe],
2075 					    ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
2076 
2077 	spin_unlock_irq(&dev_priv->irq_lock);
2078 }
2079 
2080 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2081 				     u8 pipe_mask)
2082 {
2083 	struct intel_display *display = &dev_priv->display;
2084 	enum pipe pipe;
2085 
2086 	spin_lock_irq(&dev_priv->irq_lock);
2087 
2088 	if (!intel_irqs_enabled(dev_priv)) {
2089 		spin_unlock_irq(&dev_priv->irq_lock);
2090 		return;
2091 	}
2092 
2093 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2094 		intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
2095 
2096 	spin_unlock_irq(&dev_priv->irq_lock);
2097 
2098 	/* make sure we're done processing display irqs */
2099 	intel_synchronize_irq(dev_priv);
2100 }
2101 
2102 /*
2103  * SDEIER is also touched by the interrupt handler to work around missed PCH
2104  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2105  * instead we unconditionally enable all PCH interrupt sources here, but then
2106  * only unmask them as needed with SDEIMR.
2107  *
2108  * Note that we currently do this after installing the interrupt handler,
2109  * but before we enable the master interrupt. That should be sufficient
2110  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
2111  * interrupts could still race.
2112  */
2113 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
2114 {
2115 	struct intel_display *display = &dev_priv->display;
2116 	u32 mask;
2117 
2118 	if (HAS_PCH_NOP(dev_priv))
2119 		return;
2120 
2121 	if (HAS_PCH_IBX(dev_priv))
2122 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
2123 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2124 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2125 	else
2126 		mask = SDE_GMBUS_CPT;
2127 
2128 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
2129 }
2130 
2131 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
2132 {
2133 	lockdep_assert_held(&dev_priv->irq_lock);
2134 
2135 	if (dev_priv->display.irq.vlv_display_irqs_enabled)
2136 		return;
2137 
2138 	dev_priv->display.irq.vlv_display_irqs_enabled = true;
2139 
2140 	if (intel_irqs_enabled(dev_priv)) {
2141 		_vlv_display_irq_reset(dev_priv);
2142 		vlv_display_irq_postinstall(dev_priv);
2143 	}
2144 }
2145 
2146 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
2147 {
2148 	lockdep_assert_held(&dev_priv->irq_lock);
2149 
2150 	if (!dev_priv->display.irq.vlv_display_irqs_enabled)
2151 		return;
2152 
2153 	dev_priv->display.irq.vlv_display_irqs_enabled = false;
2154 
2155 	if (intel_irqs_enabled(dev_priv))
2156 		_vlv_display_irq_reset(dev_priv);
2157 }
2158 
2159 void ilk_de_irq_postinstall(struct drm_i915_private *i915)
2160 {
2161 	struct intel_display *display = &i915->display;
2162 	u32 display_mask, extra_mask;
2163 
2164 	if (DISPLAY_VER(i915) >= 7) {
2165 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2166 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
2167 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2168 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
2169 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
2170 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
2171 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
2172 			      DE_DP_A_HOTPLUG_IVB);
2173 	} else {
2174 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE |
2175 				DE_PCH_EVENT | DE_GTT_FAULT |
2176 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
2177 				DE_PIPEA_CRC_DONE | DE_POISON);
2178 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
2179 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2180 			      DE_PLANE_FLIP_DONE(PLANE_A) |
2181 			      DE_PLANE_FLIP_DONE(PLANE_B) |
2182 			      DE_DP_A_HOTPLUG);
2183 	}
2184 
2185 	if (IS_HASWELL(i915)) {
2186 		intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
2187 		display_mask |= DE_EDP_PSR_INT_HSW;
2188 	}
2189 
2190 	if (IS_IRONLAKE_M(i915))
2191 		extra_mask |= DE_PCU_EVENT;
2192 
2193 	i915->irq_mask = ~display_mask;
2194 
2195 	ibx_irq_postinstall(i915);
2196 
2197 	intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
2198 				    display_mask | extra_mask);
2199 }
2200 
2201 static void mtp_irq_postinstall(struct drm_i915_private *i915);
2202 static void icp_irq_postinstall(struct drm_i915_private *i915);
2203 
2204 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2205 {
2206 	struct intel_display *display = &dev_priv->display;
2207 
2208 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
2209 		GEN8_PIPE_CDCLK_CRC_DONE;
2210 	u32 de_pipe_enables;
2211 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
2212 	u32 de_port_enables;
2213 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
2214 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2215 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2216 	enum pipe pipe;
2217 
2218 	if (!HAS_DISPLAY(dev_priv))
2219 		return;
2220 
2221 	if (DISPLAY_VER(dev_priv) >= 14)
2222 		mtp_irq_postinstall(dev_priv);
2223 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2224 		icp_irq_postinstall(dev_priv);
2225 	else if (HAS_PCH_SPLIT(dev_priv))
2226 		ibx_irq_postinstall(dev_priv);
2227 
2228 	if (DISPLAY_VER(dev_priv) < 11)
2229 		de_misc_masked |= GEN8_DE_MISC_GSE;
2230 
2231 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
2232 		de_port_masked |= BXT_DE_PORT_GMBUS;
2233 
2234 	if (DISPLAY_VER(dev_priv) >= 14) {
2235 		de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
2236 				  XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
2237 	} else if (DISPLAY_VER(dev_priv) >= 11) {
2238 		enum port port;
2239 
2240 		if (intel_bios_is_dsi_present(display, &port))
2241 			de_port_masked |= DSI0_TE | DSI1_TE;
2242 	}
2243 
2244 	if (HAS_DBUF_OVERLAP_DETECTION(display))
2245 		de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
2246 
2247 	if (HAS_DSB(dev_priv))
2248 		de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
2249 			GEN12_DSB_INT(INTEL_DSB_1) |
2250 			GEN12_DSB_INT(INTEL_DSB_2);
2251 
2252 	de_pipe_enables = de_pipe_masked |
2253 		GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
2254 		gen8_de_pipe_flip_done_mask(dev_priv);
2255 
2256 	de_port_enables = de_port_masked;
2257 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
2258 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
2259 	else if (IS_BROADWELL(dev_priv))
2260 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
2261 
2262 	if (DISPLAY_VER(dev_priv) >= 12) {
2263 		enum transcoder trans;
2264 
2265 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2266 			enum intel_display_power_domain domain;
2267 
2268 			domain = POWER_DOMAIN_TRANSCODER(trans);
2269 			if (!intel_display_power_is_enabled(display, domain))
2270 				continue;
2271 
2272 			intel_display_irq_regs_assert_irr_is_zero(display,
2273 								  TRANS_PSR_IIR(dev_priv, trans));
2274 		}
2275 	} else {
2276 		intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
2277 	}
2278 
2279 	for_each_pipe(dev_priv, pipe) {
2280 		dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
2281 
2282 		if (intel_display_power_is_enabled(display,
2283 						   POWER_DOMAIN_PIPE(pipe)))
2284 			intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
2285 						    dev_priv->display.irq.de_irq_mask[pipe],
2286 						    de_pipe_enables);
2287 	}
2288 
2289 	intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked,
2290 				    de_port_enables);
2291 	intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
2292 				    de_misc_masked);
2293 
2294 	if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
2295 		u32 de_hpd_masked = 0;
2296 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
2297 				     GEN11_DE_TBT_HOTPLUG_MASK;
2298 
2299 		intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
2300 					    de_hpd_enables);
2301 	}
2302 }
2303 
2304 static void mtp_irq_postinstall(struct drm_i915_private *i915)
2305 {
2306 	struct intel_display *display = &i915->display;
2307 	u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
2308 	u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
2309 	u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
2310 			     XELPDP_TBT_HOTPLUG_MASK;
2311 
2312 	intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
2313 				    de_hpd_enables);
2314 
2315 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
2316 }
2317 
2318 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
2319 {
2320 	struct intel_display *display = &dev_priv->display;
2321 	u32 mask = SDE_GMBUS_ICP;
2322 
2323 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
2324 }
2325 
2326 void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
2327 {
2328 	struct intel_display *display = &dev_priv->display;
2329 
2330 	if (!HAS_DISPLAY(dev_priv))
2331 		return;
2332 
2333 	gen8_de_irq_postinstall(dev_priv);
2334 
2335 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
2336 }
2337 
2338 void dg1_de_irq_postinstall(struct drm_i915_private *i915)
2339 {
2340 	struct intel_display *display = &i915->display;
2341 
2342 	if (!HAS_DISPLAY(i915))
2343 		return;
2344 
2345 	gen8_de_irq_postinstall(i915);
2346 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
2347 }
2348 
2349 void intel_display_irq_init(struct drm_i915_private *i915)
2350 {
2351 	i915->drm.vblank_disable_immediate = true;
2352 
2353 	intel_hotplug_irq_init(i915);
2354 
2355 	INIT_WORK(&i915->display.irq.vblank_dc_work,
2356 		  intel_display_vblank_dc_work);
2357 }
2358