xref: /linux/drivers/gpu/drm/i915/display/intel_display_irq.c (revision c156ef573efe4230ef3dc1ff2ec0038fe0eb217f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/drm_vblank.h>
7 
8 #include "gt/intel_rps.h"
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "i915_reg.h"
12 #include "icl_dsi_regs.h"
13 #include "intel_crtc.h"
14 #include "intel_de.h"
15 #include "intel_display_irq.h"
16 #include "intel_display_trace.h"
17 #include "intel_display_types.h"
18 #include "intel_dmc_wl.h"
19 #include "intel_dp_aux.h"
20 #include "intel_dsb.h"
21 #include "intel_fdi_regs.h"
22 #include "intel_fifo_underrun.h"
23 #include "intel_gmbus.h"
24 #include "intel_hotplug_irq.h"
25 #include "intel_pipe_crc_regs.h"
26 #include "intel_pmdemand.h"
27 #include "intel_psr.h"
28 #include "intel_psr_regs.h"
29 #include "intel_uncore.h"
30 
31 static void
32 intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs,
33 			    u32 imr_val, u32 ier_val)
34 {
35 	intel_dmc_wl_get(display, regs.imr);
36 	intel_dmc_wl_get(display, regs.ier);
37 	intel_dmc_wl_get(display, regs.iir);
38 
39 	gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val);
40 
41 	intel_dmc_wl_put(display, regs.iir);
42 	intel_dmc_wl_put(display, regs.ier);
43 	intel_dmc_wl_put(display, regs.imr);
44 }
45 
46 static void
47 intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs)
48 {
49 	intel_dmc_wl_get(display, regs.imr);
50 	intel_dmc_wl_get(display, regs.ier);
51 	intel_dmc_wl_get(display, regs.iir);
52 
53 	gen2_irq_reset(to_intel_uncore(display->drm), regs);
54 
55 	intel_dmc_wl_put(display, regs.iir);
56 	intel_dmc_wl_put(display, regs.ier);
57 	intel_dmc_wl_put(display, regs.imr);
58 }
59 
60 static void
61 intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg)
62 {
63 	intel_dmc_wl_get(display, reg);
64 
65 	gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg);
66 
67 	intel_dmc_wl_put(display, reg);
68 }
69 
70 static void
71 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
72 {
73 	struct intel_display *display = &dev_priv->display;
74 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
75 
76 	drm_crtc_handle_vblank(&crtc->base);
77 }
78 
79 /**
80  * ilk_update_display_irq - update DEIMR
81  * @dev_priv: driver private
82  * @interrupt_mask: mask of interrupt bits to update
83  * @enabled_irq_mask: mask of interrupt bits to enable
84  */
85 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
86 			    u32 interrupt_mask, u32 enabled_irq_mask)
87 {
88 	struct intel_display *display = &dev_priv->display;
89 	u32 new_val;
90 
91 	lockdep_assert_held(&dev_priv->irq_lock);
92 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
93 
94 	new_val = dev_priv->irq_mask;
95 	new_val &= ~interrupt_mask;
96 	new_val |= (~enabled_irq_mask & interrupt_mask);
97 
98 	if (new_val != dev_priv->irq_mask &&
99 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
100 		dev_priv->irq_mask = new_val;
101 		intel_de_write(display, DEIMR, dev_priv->irq_mask);
102 		intel_de_posting_read(display, DEIMR);
103 	}
104 }
105 
106 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
107 {
108 	ilk_update_display_irq(i915, bits, bits);
109 }
110 
111 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
112 {
113 	ilk_update_display_irq(i915, bits, 0);
114 }
115 
116 /**
117  * bdw_update_port_irq - update DE port interrupt
118  * @dev_priv: driver private
119  * @interrupt_mask: mask of interrupt bits to update
120  * @enabled_irq_mask: mask of interrupt bits to enable
121  */
122 void bdw_update_port_irq(struct drm_i915_private *dev_priv,
123 			 u32 interrupt_mask, u32 enabled_irq_mask)
124 {
125 	struct intel_display *display = &dev_priv->display;
126 	u32 new_val;
127 	u32 old_val;
128 
129 	lockdep_assert_held(&dev_priv->irq_lock);
130 
131 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
132 
133 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
134 		return;
135 
136 	old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
137 
138 	new_val = old_val;
139 	new_val &= ~interrupt_mask;
140 	new_val |= (~enabled_irq_mask & interrupt_mask);
141 
142 	if (new_val != old_val) {
143 		intel_de_write(display, GEN8_DE_PORT_IMR, new_val);
144 		intel_de_posting_read(display, GEN8_DE_PORT_IMR);
145 	}
146 }
147 
148 /**
149  * bdw_update_pipe_irq - update DE pipe interrupt
150  * @dev_priv: driver private
151  * @pipe: pipe whose interrupt to update
152  * @interrupt_mask: mask of interrupt bits to update
153  * @enabled_irq_mask: mask of interrupt bits to enable
154  */
155 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
156 				enum pipe pipe, u32 interrupt_mask,
157 				u32 enabled_irq_mask)
158 {
159 	struct intel_display *display = &dev_priv->display;
160 	u32 new_val;
161 
162 	lockdep_assert_held(&dev_priv->irq_lock);
163 
164 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
165 
166 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
167 		return;
168 
169 	new_val = dev_priv->display.irq.de_irq_mask[pipe];
170 	new_val &= ~interrupt_mask;
171 	new_val |= (~enabled_irq_mask & interrupt_mask);
172 
173 	if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
174 		dev_priv->display.irq.de_irq_mask[pipe] = new_val;
175 		intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
176 		intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
177 	}
178 }
179 
180 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
181 			 enum pipe pipe, u32 bits)
182 {
183 	bdw_update_pipe_irq(i915, pipe, bits, bits);
184 }
185 
186 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
187 			  enum pipe pipe, u32 bits)
188 {
189 	bdw_update_pipe_irq(i915, pipe, bits, 0);
190 }
191 
192 /**
193  * ibx_display_interrupt_update - update SDEIMR
194  * @dev_priv: driver private
195  * @interrupt_mask: mask of interrupt bits to update
196  * @enabled_irq_mask: mask of interrupt bits to enable
197  */
198 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
199 				  u32 interrupt_mask,
200 				  u32 enabled_irq_mask)
201 {
202 	struct intel_display *display = &dev_priv->display;
203 	u32 sdeimr = intel_de_read(display, SDEIMR);
204 
205 	sdeimr &= ~interrupt_mask;
206 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
207 
208 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
209 
210 	lockdep_assert_held(&dev_priv->irq_lock);
211 
212 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
213 		return;
214 
215 	intel_de_write(display, SDEIMR, sdeimr);
216 	intel_de_posting_read(display, SDEIMR);
217 }
218 
219 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
220 {
221 	ibx_display_interrupt_update(i915, bits, bits);
222 }
223 
224 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
225 {
226 	ibx_display_interrupt_update(i915, bits, 0);
227 }
228 
229 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
230 			      enum pipe pipe)
231 {
232 	u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
233 	u32 enable_mask = status_mask << 16;
234 
235 	lockdep_assert_held(&dev_priv->irq_lock);
236 
237 	if (DISPLAY_VER(dev_priv) < 5)
238 		goto out;
239 
240 	/*
241 	 * On pipe A we don't support the PSR interrupt yet,
242 	 * on pipe B and C the same bit MBZ.
243 	 */
244 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
245 			     status_mask & PIPE_A_PSR_STATUS_VLV))
246 		return 0;
247 	/*
248 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
249 	 * A the same bit is for perf counters which we don't use either.
250 	 */
251 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
252 			     status_mask & PIPE_B_PSR_STATUS_VLV))
253 		return 0;
254 
255 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
256 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
257 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
258 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
259 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
260 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
261 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
262 
263 out:
264 	drm_WARN_ONCE(&dev_priv->drm,
265 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
266 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
267 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
268 		      pipe_name(pipe), enable_mask, status_mask);
269 
270 	return enable_mask;
271 }
272 
273 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
274 			  enum pipe pipe, u32 status_mask)
275 {
276 	struct intel_display *display = &dev_priv->display;
277 	i915_reg_t reg = PIPESTAT(dev_priv, pipe);
278 	u32 enable_mask;
279 
280 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
281 		      "pipe %c: status_mask=0x%x\n",
282 		      pipe_name(pipe), status_mask);
283 
284 	lockdep_assert_held(&dev_priv->irq_lock);
285 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
286 
287 	if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
288 		return;
289 
290 	dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
291 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
292 
293 	intel_de_write(display, reg, enable_mask | status_mask);
294 	intel_de_posting_read(display, reg);
295 }
296 
297 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
298 			   enum pipe pipe, u32 status_mask)
299 {
300 	struct intel_display *display = &dev_priv->display;
301 	i915_reg_t reg = PIPESTAT(dev_priv, pipe);
302 	u32 enable_mask;
303 
304 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
305 		      "pipe %c: status_mask=0x%x\n",
306 		      pipe_name(pipe), status_mask);
307 
308 	lockdep_assert_held(&dev_priv->irq_lock);
309 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
310 
311 	if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
312 		return;
313 
314 	dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
315 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
316 
317 	intel_de_write(display, reg, enable_mask | status_mask);
318 	intel_de_posting_read(display, reg);
319 }
320 
321 static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
322 {
323 	struct drm_i915_private *i915 = to_i915(display->drm);
324 
325 	if (IS_I85X(i915))
326 		return true;
327 
328 	if (IS_PINEVIEW(i915))
329 		return true;
330 
331 	return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
332 }
333 
334 /**
335  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
336  * @dev_priv: i915 device private
337  */
338 void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
339 {
340 	struct intel_display *display = &dev_priv->display;
341 
342 	if (!intel_opregion_asle_present(display))
343 		return;
344 
345 	if (!i915_has_legacy_blc_interrupt(display))
346 		return;
347 
348 	spin_lock_irq(&dev_priv->irq_lock);
349 
350 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
351 	if (DISPLAY_VER(dev_priv) >= 4)
352 		i915_enable_pipestat(dev_priv, PIPE_A,
353 				     PIPE_LEGACY_BLC_EVENT_STATUS);
354 
355 	spin_unlock_irq(&dev_priv->irq_lock);
356 }
357 
358 #if IS_ENABLED(CONFIG_DEBUG_FS)
359 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
360 					 enum pipe pipe,
361 					 u32 crc0, u32 crc1,
362 					 u32 crc2, u32 crc3,
363 					 u32 crc4)
364 {
365 	struct intel_display *display = &dev_priv->display;
366 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
367 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
368 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
369 
370 	trace_intel_pipe_crc(crtc, crcs);
371 
372 	spin_lock(&pipe_crc->lock);
373 	/*
374 	 * For some not yet identified reason, the first CRC is
375 	 * bonkers. So let's just wait for the next vblank and read
376 	 * out the buggy result.
377 	 *
378 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
379 	 * don't trust that one either.
380 	 */
381 	if (pipe_crc->skipped <= 0 ||
382 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
383 		pipe_crc->skipped++;
384 		spin_unlock(&pipe_crc->lock);
385 		return;
386 	}
387 	spin_unlock(&pipe_crc->lock);
388 
389 	drm_crtc_add_crc_entry(&crtc->base, true,
390 			       drm_crtc_accurate_vblank_count(&crtc->base),
391 			       crcs);
392 }
393 #else
394 static inline void
395 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
396 			     enum pipe pipe,
397 			     u32 crc0, u32 crc1,
398 			     u32 crc2, u32 crc3,
399 			     u32 crc4) {}
400 #endif
401 
402 static void flip_done_handler(struct drm_i915_private *i915,
403 			      enum pipe pipe)
404 {
405 	struct intel_display *display = &i915->display;
406 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
407 
408 	spin_lock(&i915->drm.event_lock);
409 
410 	if (crtc->flip_done_event) {
411 		trace_intel_crtc_flip_done(crtc);
412 		drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
413 		crtc->flip_done_event = NULL;
414 	}
415 
416 	spin_unlock(&i915->drm.event_lock);
417 }
418 
419 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
420 				     enum pipe pipe)
421 {
422 	struct intel_display *display = &dev_priv->display;
423 
424 	display_pipe_crc_irq_handler(dev_priv, pipe,
425 				     intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
426 				     0, 0, 0, 0);
427 }
428 
429 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
430 				     enum pipe pipe)
431 {
432 	struct intel_display *display = &dev_priv->display;
433 
434 	display_pipe_crc_irq_handler(dev_priv, pipe,
435 				     intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
436 				     intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
437 				     intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
438 				     intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)),
439 				     intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
440 }
441 
442 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
443 				      enum pipe pipe)
444 {
445 	struct intel_display *display = &dev_priv->display;
446 	u32 res1, res2;
447 
448 	if (DISPLAY_VER(dev_priv) >= 3)
449 		res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
450 	else
451 		res1 = 0;
452 
453 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
454 		res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
455 	else
456 		res2 = 0;
457 
458 	display_pipe_crc_irq_handler(dev_priv, pipe,
459 				     intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
460 				     intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
461 				     intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
462 				     res1, res2);
463 }
464 
465 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
466 {
467 	struct intel_display *display = &dev_priv->display;
468 	enum pipe pipe;
469 
470 	for_each_pipe(dev_priv, pipe) {
471 		intel_de_write(display,
472 			       PIPESTAT(dev_priv, pipe),
473 			       PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
474 
475 		dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
476 	}
477 }
478 
479 void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
480 			   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
481 {
482 	struct intel_display *display = &dev_priv->display;
483 	enum pipe pipe;
484 
485 	spin_lock(&dev_priv->irq_lock);
486 
487 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
488 	    !dev_priv->display.irq.vlv_display_irqs_enabled) {
489 		spin_unlock(&dev_priv->irq_lock);
490 		return;
491 	}
492 
493 	for_each_pipe(dev_priv, pipe) {
494 		i915_reg_t reg;
495 		u32 status_mask, enable_mask, iir_bit = 0;
496 
497 		/*
498 		 * PIPESTAT bits get signalled even when the interrupt is
499 		 * disabled with the mask bits, and some of the status bits do
500 		 * not generate interrupts at all (like the underrun bit). Hence
501 		 * we need to be careful that we only handle what we want to
502 		 * handle.
503 		 */
504 
505 		/* fifo underruns are filterered in the underrun handler. */
506 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
507 
508 		switch (pipe) {
509 		default:
510 		case PIPE_A:
511 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
512 			break;
513 		case PIPE_B:
514 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
515 			break;
516 		case PIPE_C:
517 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
518 			break;
519 		}
520 		if (iir & iir_bit)
521 			status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
522 
523 		if (!status_mask)
524 			continue;
525 
526 		reg = PIPESTAT(dev_priv, pipe);
527 		pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
528 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
529 
530 		/*
531 		 * Clear the PIPE*STAT regs before the IIR
532 		 *
533 		 * Toggle the enable bits to make sure we get an
534 		 * edge in the ISR pipe event bit if we don't clear
535 		 * all the enabled status bits. Otherwise the edge
536 		 * triggered IIR on i965/g4x wouldn't notice that
537 		 * an interrupt is still pending.
538 		 */
539 		if (pipe_stats[pipe]) {
540 			intel_de_write(display, reg, pipe_stats[pipe]);
541 			intel_de_write(display, reg, enable_mask);
542 		}
543 	}
544 	spin_unlock(&dev_priv->irq_lock);
545 }
546 
547 void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
548 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
549 {
550 	struct intel_display *display = &dev_priv->display;
551 	bool blc_event = false;
552 	enum pipe pipe;
553 
554 	for_each_pipe(dev_priv, pipe) {
555 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
556 			intel_handle_vblank(dev_priv, pipe);
557 
558 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
559 			blc_event = true;
560 
561 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
562 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
563 
564 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
565 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
566 	}
567 
568 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
569 		intel_opregion_asle_intr(display);
570 }
571 
572 void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
573 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
574 {
575 	struct intel_display *display = &dev_priv->display;
576 	bool blc_event = false;
577 	enum pipe pipe;
578 
579 	for_each_pipe(dev_priv, pipe) {
580 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
581 			intel_handle_vblank(dev_priv, pipe);
582 
583 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
584 			blc_event = true;
585 
586 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
587 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
588 
589 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
590 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
591 	}
592 
593 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
594 		intel_opregion_asle_intr(display);
595 
596 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
597 		intel_gmbus_irq_handler(display);
598 }
599 
600 void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
601 				     u32 pipe_stats[I915_MAX_PIPES])
602 {
603 	struct intel_display *display = &dev_priv->display;
604 	enum pipe pipe;
605 
606 	for_each_pipe(dev_priv, pipe) {
607 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
608 			intel_handle_vblank(dev_priv, pipe);
609 
610 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
611 			flip_done_handler(dev_priv, pipe);
612 
613 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
614 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
615 
616 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
617 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
618 	}
619 
620 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
621 		intel_gmbus_irq_handler(display);
622 }
623 
624 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
625 {
626 	struct intel_display *display = &dev_priv->display;
627 	enum pipe pipe;
628 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
629 
630 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
631 
632 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
633 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
634 			       SDE_AUDIO_POWER_SHIFT);
635 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
636 			port_name(port));
637 	}
638 
639 	if (pch_iir & SDE_AUX_MASK)
640 		intel_dp_aux_irq_handler(display);
641 
642 	if (pch_iir & SDE_GMBUS)
643 		intel_gmbus_irq_handler(display);
644 
645 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
646 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
647 
648 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
649 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
650 
651 	if (pch_iir & SDE_POISON)
652 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
653 
654 	if (pch_iir & SDE_FDI_MASK) {
655 		for_each_pipe(dev_priv, pipe)
656 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
657 				pipe_name(pipe),
658 				intel_de_read(display, FDI_RX_IIR(pipe)));
659 	}
660 
661 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
662 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
663 
664 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
665 		drm_dbg(&dev_priv->drm,
666 			"PCH transcoder CRC error interrupt\n");
667 
668 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
669 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
670 
671 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
672 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
673 }
674 
675 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
676 {
677 	struct intel_display *display = &dev_priv->display;
678 	u32 err_int = intel_de_read(display, GEN7_ERR_INT);
679 	enum pipe pipe;
680 
681 	if (err_int & ERR_INT_POISON)
682 		drm_err(&dev_priv->drm, "Poison interrupt\n");
683 
684 	for_each_pipe(dev_priv, pipe) {
685 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
686 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
687 
688 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
689 			if (IS_IVYBRIDGE(dev_priv))
690 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
691 			else
692 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
693 		}
694 	}
695 
696 	intel_de_write(display, GEN7_ERR_INT, err_int);
697 }
698 
699 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
700 {
701 	struct intel_display *display = &dev_priv->display;
702 	u32 serr_int = intel_de_read(display, SERR_INT);
703 	enum pipe pipe;
704 
705 	if (serr_int & SERR_INT_POISON)
706 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
707 
708 	for_each_pipe(dev_priv, pipe)
709 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
710 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
711 
712 	intel_de_write(display, SERR_INT, serr_int);
713 }
714 
715 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
716 {
717 	struct intel_display *display = &dev_priv->display;
718 	enum pipe pipe;
719 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
720 
721 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
722 
723 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
724 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
725 			       SDE_AUDIO_POWER_SHIFT_CPT);
726 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
727 			port_name(port));
728 	}
729 
730 	if (pch_iir & SDE_AUX_MASK_CPT)
731 		intel_dp_aux_irq_handler(display);
732 
733 	if (pch_iir & SDE_GMBUS_CPT)
734 		intel_gmbus_irq_handler(display);
735 
736 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
737 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
738 
739 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
740 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
741 
742 	if (pch_iir & SDE_FDI_MASK_CPT) {
743 		for_each_pipe(dev_priv, pipe)
744 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
745 				pipe_name(pipe),
746 				intel_de_read(display, FDI_RX_IIR(pipe)));
747 	}
748 
749 	if (pch_iir & SDE_ERROR_CPT)
750 		cpt_serr_int_handler(dev_priv);
751 }
752 
753 void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
754 {
755 	struct intel_display *display = &dev_priv->display;
756 	enum pipe pipe;
757 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
758 
759 	if (hotplug_trigger)
760 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
761 
762 	if (de_iir & DE_AUX_CHANNEL_A)
763 		intel_dp_aux_irq_handler(display);
764 
765 	if (de_iir & DE_GSE)
766 		intel_opregion_asle_intr(display);
767 
768 	if (de_iir & DE_POISON)
769 		drm_err(&dev_priv->drm, "Poison interrupt\n");
770 
771 	for_each_pipe(dev_priv, pipe) {
772 		if (de_iir & DE_PIPE_VBLANK(pipe))
773 			intel_handle_vblank(dev_priv, pipe);
774 
775 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
776 			flip_done_handler(dev_priv, pipe);
777 
778 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
779 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
780 
781 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
782 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
783 	}
784 
785 	/* check event from PCH */
786 	if (de_iir & DE_PCH_EVENT) {
787 		u32 pch_iir = intel_de_read(display, SDEIIR);
788 
789 		if (HAS_PCH_CPT(dev_priv))
790 			cpt_irq_handler(dev_priv, pch_iir);
791 		else
792 			ibx_irq_handler(dev_priv, pch_iir);
793 
794 		/* should clear PCH hotplug event before clear CPU irq */
795 		intel_de_write(display, SDEIIR, pch_iir);
796 	}
797 
798 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
799 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
800 }
801 
802 void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
803 {
804 	struct intel_display *display = &dev_priv->display;
805 	enum pipe pipe;
806 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
807 
808 	if (hotplug_trigger)
809 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
810 
811 	if (de_iir & DE_ERR_INT_IVB)
812 		ivb_err_int_handler(dev_priv);
813 
814 	if (de_iir & DE_EDP_PSR_INT_HSW) {
815 		struct intel_encoder *encoder;
816 
817 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
818 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
819 			u32 psr_iir;
820 
821 			psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0);
822 			intel_psr_irq_handler(intel_dp, psr_iir);
823 			break;
824 		}
825 	}
826 
827 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
828 		intel_dp_aux_irq_handler(display);
829 
830 	if (de_iir & DE_GSE_IVB)
831 		intel_opregion_asle_intr(display);
832 
833 	for_each_pipe(dev_priv, pipe) {
834 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
835 			intel_handle_vblank(dev_priv, pipe);
836 
837 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
838 			flip_done_handler(dev_priv, pipe);
839 	}
840 
841 	/* check event from PCH */
842 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
843 		u32 pch_iir = intel_de_read(display, SDEIIR);
844 
845 		cpt_irq_handler(dev_priv, pch_iir);
846 
847 		/* clear PCH hotplug event before clear CPU irq */
848 		intel_de_write(display, SDEIIR, pch_iir);
849 	}
850 }
851 
852 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
853 {
854 	u32 mask;
855 
856 	if (DISPLAY_VER(dev_priv) >= 20)
857 		return 0;
858 	else if (DISPLAY_VER(dev_priv) >= 14)
859 		return TGL_DE_PORT_AUX_DDIA |
860 			TGL_DE_PORT_AUX_DDIB;
861 	else if (DISPLAY_VER(dev_priv) >= 13)
862 		return TGL_DE_PORT_AUX_DDIA |
863 			TGL_DE_PORT_AUX_DDIB |
864 			TGL_DE_PORT_AUX_DDIC |
865 			XELPD_DE_PORT_AUX_DDID |
866 			XELPD_DE_PORT_AUX_DDIE |
867 			TGL_DE_PORT_AUX_USBC1 |
868 			TGL_DE_PORT_AUX_USBC2 |
869 			TGL_DE_PORT_AUX_USBC3 |
870 			TGL_DE_PORT_AUX_USBC4;
871 	else if (DISPLAY_VER(dev_priv) >= 12)
872 		return TGL_DE_PORT_AUX_DDIA |
873 			TGL_DE_PORT_AUX_DDIB |
874 			TGL_DE_PORT_AUX_DDIC |
875 			TGL_DE_PORT_AUX_USBC1 |
876 			TGL_DE_PORT_AUX_USBC2 |
877 			TGL_DE_PORT_AUX_USBC3 |
878 			TGL_DE_PORT_AUX_USBC4 |
879 			TGL_DE_PORT_AUX_USBC5 |
880 			TGL_DE_PORT_AUX_USBC6;
881 
882 	mask = GEN8_AUX_CHANNEL_A;
883 	if (DISPLAY_VER(dev_priv) >= 9)
884 		mask |= GEN9_AUX_CHANNEL_B |
885 			GEN9_AUX_CHANNEL_C |
886 			GEN9_AUX_CHANNEL_D;
887 
888 	if (DISPLAY_VER(dev_priv) == 11) {
889 		mask |= ICL_AUX_CHANNEL_F;
890 		mask |= ICL_AUX_CHANNEL_E;
891 	}
892 
893 	return mask;
894 }
895 
896 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
897 {
898 	struct intel_display *display = &dev_priv->display;
899 
900 	if (DISPLAY_VER(display) >= 14)
901 		return MTL_PIPEDMC_ATS_FAULT |
902 			MTL_PLANE_ATS_FAULT |
903 			GEN12_PIPEDMC_FAULT |
904 			GEN9_PIPE_CURSOR_FAULT |
905 			GEN11_PIPE_PLANE5_FAULT |
906 			GEN9_PIPE_PLANE4_FAULT |
907 			GEN9_PIPE_PLANE3_FAULT |
908 			GEN9_PIPE_PLANE2_FAULT |
909 			GEN9_PIPE_PLANE1_FAULT;
910 	if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
911 		return GEN12_PIPEDMC_FAULT |
912 			GEN9_PIPE_CURSOR_FAULT |
913 			GEN11_PIPE_PLANE5_FAULT |
914 			GEN9_PIPE_PLANE4_FAULT |
915 			GEN9_PIPE_PLANE3_FAULT |
916 			GEN9_PIPE_PLANE2_FAULT |
917 			GEN9_PIPE_PLANE1_FAULT;
918 	else if (DISPLAY_VER(display) == 12)
919 		return GEN12_PIPEDMC_FAULT |
920 			GEN9_PIPE_CURSOR_FAULT |
921 			GEN11_PIPE_PLANE7_FAULT |
922 			GEN11_PIPE_PLANE6_FAULT |
923 			GEN11_PIPE_PLANE5_FAULT |
924 			GEN9_PIPE_PLANE4_FAULT |
925 			GEN9_PIPE_PLANE3_FAULT |
926 			GEN9_PIPE_PLANE2_FAULT |
927 			GEN9_PIPE_PLANE1_FAULT;
928 	else if (DISPLAY_VER(display) == 11)
929 		return GEN9_PIPE_CURSOR_FAULT |
930 			GEN11_PIPE_PLANE7_FAULT |
931 			GEN11_PIPE_PLANE6_FAULT |
932 			GEN11_PIPE_PLANE5_FAULT |
933 			GEN9_PIPE_PLANE4_FAULT |
934 			GEN9_PIPE_PLANE3_FAULT |
935 			GEN9_PIPE_PLANE2_FAULT |
936 			GEN9_PIPE_PLANE1_FAULT;
937 	else if (DISPLAY_VER(display) >= 9)
938 		return GEN9_PIPE_CURSOR_FAULT |
939 			GEN9_PIPE_PLANE4_FAULT |
940 			GEN9_PIPE_PLANE3_FAULT |
941 			GEN9_PIPE_PLANE2_FAULT |
942 			GEN9_PIPE_PLANE1_FAULT;
943 	else
944 		return GEN8_PIPE_CURSOR_FAULT |
945 			GEN8_PIPE_SPRITE_FAULT |
946 			GEN8_PIPE_PRIMARY_FAULT;
947 }
948 
949 static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
950 {
951 	wake_up_all(&dev_priv->display.pmdemand.waitqueue);
952 }
953 
954 static void
955 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
956 {
957 	struct intel_display *display = &dev_priv->display;
958 	bool found = false;
959 
960 	if (HAS_DBUF_OVERLAP_DETECTION(display)) {
961 		if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) {
962 			drm_warn(display->drm,  "DBuf overlap detected\n");
963 			found = true;
964 		}
965 	}
966 
967 	if (DISPLAY_VER(dev_priv) >= 14) {
968 		if (iir & (XELPDP_PMDEMAND_RSP |
969 			   XELPDP_PMDEMAND_RSPTOUT_ERR)) {
970 			if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
971 				drm_dbg(&dev_priv->drm,
972 					"Error waiting for Punit PM Demand Response\n");
973 
974 			intel_pmdemand_irq_handler(dev_priv);
975 			found = true;
976 		}
977 
978 		if (iir & XELPDP_RM_TIMEOUT) {
979 			u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
980 			drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
981 			found = true;
982 		}
983 	} else if (iir & GEN8_DE_MISC_GSE) {
984 		intel_opregion_asle_intr(display);
985 		found = true;
986 	}
987 
988 	if (iir & GEN8_DE_EDP_PSR) {
989 		struct intel_encoder *encoder;
990 		u32 psr_iir;
991 		i915_reg_t iir_reg;
992 
993 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
994 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
995 
996 			if (DISPLAY_VER(dev_priv) >= 12)
997 				iir_reg = TRANS_PSR_IIR(dev_priv,
998 						        intel_dp->psr.transcoder);
999 			else
1000 				iir_reg = EDP_PSR_IIR;
1001 
1002 			psr_iir = intel_de_rmw(display, iir_reg, 0, 0);
1003 
1004 			if (psr_iir)
1005 				found = true;
1006 
1007 			intel_psr_irq_handler(intel_dp, psr_iir);
1008 
1009 			/* prior GEN12 only have one EDP PSR */
1010 			if (DISPLAY_VER(dev_priv) < 12)
1011 				break;
1012 		}
1013 	}
1014 
1015 	if (!found)
1016 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
1017 }
1018 
1019 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
1020 					   u32 te_trigger)
1021 {
1022 	struct intel_display *display = &dev_priv->display;
1023 	enum pipe pipe = INVALID_PIPE;
1024 	enum transcoder dsi_trans;
1025 	enum port port;
1026 	u32 val;
1027 
1028 	/*
1029 	 * Incase of dual link, TE comes from DSI_1
1030 	 * this is to check if dual link is enabled
1031 	 */
1032 	val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
1033 	val &= PORT_SYNC_MODE_ENABLE;
1034 
1035 	/*
1036 	 * if dual link is enabled, then read DSI_0
1037 	 * transcoder registers
1038 	 */
1039 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
1040 						  PORT_A : PORT_B;
1041 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
1042 
1043 	/* Check if DSI configured in command mode */
1044 	val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
1045 	val = val & OP_MODE_MASK;
1046 
1047 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
1048 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
1049 		return;
1050 	}
1051 
1052 	/* Get PIPE for handling VBLANK event */
1053 	val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
1054 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
1055 	case TRANS_DDI_EDP_INPUT_A_ON:
1056 		pipe = PIPE_A;
1057 		break;
1058 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
1059 		pipe = PIPE_B;
1060 		break;
1061 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
1062 		pipe = PIPE_C;
1063 		break;
1064 	default:
1065 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
1066 		return;
1067 	}
1068 
1069 	intel_handle_vblank(dev_priv, pipe);
1070 
1071 	/* clear TE in dsi IIR */
1072 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
1073 	intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
1074 }
1075 
1076 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
1077 {
1078 	if (DISPLAY_VER(i915) >= 9)
1079 		return GEN9_PIPE_PLANE1_FLIP_DONE;
1080 	else
1081 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
1082 }
1083 
1084 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
1085 {
1086 	struct intel_display *display = &i915->display;
1087 	u32 pica_ier = 0;
1088 
1089 	*pica_iir = 0;
1090 	*pch_iir = intel_de_read(display, SDEIIR);
1091 	if (!*pch_iir)
1092 		return;
1093 
1094 	/**
1095 	 * PICA IER must be disabled/re-enabled around clearing PICA IIR and
1096 	 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
1097 	 * their flags both in the PICA and SDE IIR.
1098 	 */
1099 	if (*pch_iir & SDE_PICAINTERRUPT) {
1100 		drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
1101 
1102 		pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
1103 		*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
1104 		intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir);
1105 	}
1106 
1107 	intel_de_write(display, SDEIIR, *pch_iir);
1108 
1109 	if (pica_ier)
1110 		intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
1111 }
1112 
1113 void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
1114 {
1115 	struct intel_display *display = &dev_priv->display;
1116 	u32 iir;
1117 	enum pipe pipe;
1118 
1119 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
1120 
1121 	if (master_ctl & GEN8_DE_MISC_IRQ) {
1122 		iir = intel_de_read(display, GEN8_DE_MISC_IIR);
1123 		if (iir) {
1124 			intel_de_write(display, GEN8_DE_MISC_IIR, iir);
1125 			gen8_de_misc_irq_handler(dev_priv, iir);
1126 		} else {
1127 			drm_err_ratelimited(&dev_priv->drm,
1128 					    "The master control interrupt lied (DE MISC)!\n");
1129 		}
1130 	}
1131 
1132 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
1133 		iir = intel_de_read(display, GEN11_DE_HPD_IIR);
1134 		if (iir) {
1135 			intel_de_write(display, GEN11_DE_HPD_IIR, iir);
1136 			gen11_hpd_irq_handler(dev_priv, iir);
1137 		} else {
1138 			drm_err_ratelimited(&dev_priv->drm,
1139 					    "The master control interrupt lied, (DE HPD)!\n");
1140 		}
1141 	}
1142 
1143 	if (master_ctl & GEN8_DE_PORT_IRQ) {
1144 		iir = intel_de_read(display, GEN8_DE_PORT_IIR);
1145 		if (iir) {
1146 			bool found = false;
1147 
1148 			intel_de_write(display, GEN8_DE_PORT_IIR, iir);
1149 
1150 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
1151 				intel_dp_aux_irq_handler(display);
1152 				found = true;
1153 			}
1154 
1155 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
1156 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
1157 
1158 				if (hotplug_trigger) {
1159 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
1160 					found = true;
1161 				}
1162 			} else if (IS_BROADWELL(dev_priv)) {
1163 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
1164 
1165 				if (hotplug_trigger) {
1166 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1167 					found = true;
1168 				}
1169 			}
1170 
1171 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
1172 			    (iir & BXT_DE_PORT_GMBUS)) {
1173 				intel_gmbus_irq_handler(display);
1174 				found = true;
1175 			}
1176 
1177 			if (DISPLAY_VER(dev_priv) >= 11) {
1178 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
1179 
1180 				if (te_trigger) {
1181 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
1182 					found = true;
1183 				}
1184 			}
1185 
1186 			if (!found)
1187 				drm_err_ratelimited(&dev_priv->drm,
1188 						    "Unexpected DE Port interrupt\n");
1189 		} else {
1190 			drm_err_ratelimited(&dev_priv->drm,
1191 					    "The master control interrupt lied (DE PORT)!\n");
1192 		}
1193 	}
1194 
1195 	for_each_pipe(dev_priv, pipe) {
1196 		u32 fault_errors;
1197 
1198 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1199 			continue;
1200 
1201 		iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
1202 		if (!iir) {
1203 			drm_err_ratelimited(&dev_priv->drm,
1204 					    "The master control interrupt lied (DE PIPE)!\n");
1205 			continue;
1206 		}
1207 
1208 		intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
1209 
1210 		if (iir & GEN8_PIPE_VBLANK)
1211 			intel_handle_vblank(dev_priv, pipe);
1212 
1213 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
1214 			flip_done_handler(dev_priv, pipe);
1215 
1216 		if (HAS_DSB(dev_priv)) {
1217 			if (iir & GEN12_DSB_INT(INTEL_DSB_0))
1218 				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
1219 
1220 			if (iir & GEN12_DSB_INT(INTEL_DSB_1))
1221 				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
1222 
1223 			if (iir & GEN12_DSB_INT(INTEL_DSB_2))
1224 				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
1225 		}
1226 
1227 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
1228 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
1229 
1230 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
1231 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1232 
1233 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
1234 		if (fault_errors)
1235 			drm_err_ratelimited(&dev_priv->drm,
1236 					    "Fault errors on pipe %c: 0x%08x\n",
1237 					    pipe_name(pipe),
1238 					    fault_errors);
1239 	}
1240 
1241 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
1242 	    master_ctl & GEN8_DE_PCH_IRQ) {
1243 		u32 pica_iir;
1244 
1245 		/*
1246 		 * FIXME(BDW): Assume for now that the new interrupt handling
1247 		 * scheme also closed the SDE interrupt handling race we've seen
1248 		 * on older pch-split platforms. But this needs testing.
1249 		 */
1250 		gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
1251 		if (iir) {
1252 			if (pica_iir)
1253 				xelpdp_pica_irq_handler(dev_priv, pica_iir);
1254 
1255 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1256 				icp_irq_handler(dev_priv, iir);
1257 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
1258 				spt_irq_handler(dev_priv, iir);
1259 			else
1260 				cpt_irq_handler(dev_priv, iir);
1261 		} else {
1262 			/*
1263 			 * Like on previous PCH there seems to be something
1264 			 * fishy going on with forwarding PCH interrupts.
1265 			 */
1266 			drm_dbg(&dev_priv->drm,
1267 				"The master control interrupt lied (SDE)!\n");
1268 		}
1269 	}
1270 }
1271 
1272 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
1273 {
1274 	struct intel_display *display = &i915->display;
1275 	u32 iir;
1276 
1277 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
1278 		return 0;
1279 
1280 	iir = intel_de_read(display, GEN11_GU_MISC_IIR);
1281 	if (likely(iir))
1282 		intel_de_write(display, GEN11_GU_MISC_IIR, iir);
1283 
1284 	return iir;
1285 }
1286 
1287 void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
1288 {
1289 	struct intel_display *display = &i915->display;
1290 
1291 	if (iir & GEN11_GU_MISC_GSE)
1292 		intel_opregion_asle_intr(display);
1293 }
1294 
1295 void gen11_display_irq_handler(struct drm_i915_private *i915)
1296 {
1297 	struct intel_display *display = &i915->display;
1298 	u32 disp_ctl;
1299 
1300 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1301 	/*
1302 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
1303 	 * for the display related bits.
1304 	 */
1305 	disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
1306 
1307 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
1308 	gen8_de_irq_handler(i915, disp_ctl);
1309 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
1310 
1311 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1312 }
1313 
1314 static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
1315 {
1316 	struct intel_display *display = &i915->display;
1317 	lockdep_assert_held(&i915->drm.vblank_time_lock);
1318 
1319 	/*
1320 	 * Vblank/CRC interrupts fail to wake the device up from C2+.
1321 	 * Disabling render clock gating during C-states avoids
1322 	 * the problem. There is a small power cost so we do this
1323 	 * only when vblank/CRC interrupts are actually enabled.
1324 	 */
1325 	if (i915->display.irq.vblank_enabled++ == 0)
1326 		intel_de_write(display, SCPD0,
1327 			       _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1328 }
1329 
1330 static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
1331 {
1332 	struct intel_display *display = &i915->display;
1333 	lockdep_assert_held(&i915->drm.vblank_time_lock);
1334 
1335 	if (--i915->display.irq.vblank_enabled == 0)
1336 		intel_de_write(display, SCPD0,
1337 			       _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1338 }
1339 
1340 void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
1341 {
1342 	spin_lock_irq(&i915->drm.vblank_time_lock);
1343 
1344 	if (enable)
1345 		i915gm_irq_cstate_wa_enable(i915);
1346 	else
1347 		i915gm_irq_cstate_wa_disable(i915);
1348 
1349 	spin_unlock_irq(&i915->drm.vblank_time_lock);
1350 }
1351 
1352 int i8xx_enable_vblank(struct drm_crtc *crtc)
1353 {
1354 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1355 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1356 	unsigned long irqflags;
1357 
1358 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1359 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1360 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1361 
1362 	return 0;
1363 }
1364 
1365 void i8xx_disable_vblank(struct drm_crtc *crtc)
1366 {
1367 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1368 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1369 	unsigned long irqflags;
1370 
1371 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1372 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1373 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1374 }
1375 
1376 int i915gm_enable_vblank(struct drm_crtc *crtc)
1377 {
1378 	struct drm_i915_private *i915 = to_i915(crtc->dev);
1379 
1380 	i915gm_irq_cstate_wa_enable(i915);
1381 
1382 	return i8xx_enable_vblank(crtc);
1383 }
1384 
1385 void i915gm_disable_vblank(struct drm_crtc *crtc)
1386 {
1387 	struct drm_i915_private *i915 = to_i915(crtc->dev);
1388 
1389 	i8xx_disable_vblank(crtc);
1390 
1391 	i915gm_irq_cstate_wa_disable(i915);
1392 }
1393 
1394 int i965_enable_vblank(struct drm_crtc *crtc)
1395 {
1396 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1397 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1398 	unsigned long irqflags;
1399 
1400 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1401 	i915_enable_pipestat(dev_priv, pipe,
1402 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
1403 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1404 
1405 	return 0;
1406 }
1407 
1408 void i965_disable_vblank(struct drm_crtc *crtc)
1409 {
1410 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1411 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1412 	unsigned long irqflags;
1413 
1414 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1415 	i915_disable_pipestat(dev_priv, pipe,
1416 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
1417 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1418 }
1419 
1420 int ilk_enable_vblank(struct drm_crtc *crtc)
1421 {
1422 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1423 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1424 	unsigned long irqflags;
1425 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1426 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1427 
1428 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1429 	ilk_enable_display_irq(dev_priv, bit);
1430 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1431 
1432 	/* Even though there is no DMC, frame counter can get stuck when
1433 	 * PSR is active as no frames are generated.
1434 	 */
1435 	if (HAS_PSR(dev_priv))
1436 		drm_crtc_vblank_restore(crtc);
1437 
1438 	return 0;
1439 }
1440 
1441 void ilk_disable_vblank(struct drm_crtc *crtc)
1442 {
1443 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1444 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1445 	unsigned long irqflags;
1446 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1447 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1448 
1449 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1450 	ilk_disable_display_irq(dev_priv, bit);
1451 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1452 }
1453 
1454 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
1455 				   bool enable)
1456 {
1457 	struct intel_display *display = to_intel_display(intel_crtc);
1458 	enum port port;
1459 
1460 	if (!(intel_crtc->mode_flags &
1461 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
1462 		return false;
1463 
1464 	/* for dual link cases we consider TE from slave */
1465 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
1466 		port = PORT_B;
1467 	else
1468 		port = PORT_A;
1469 
1470 	intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT);
1471 
1472 	intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
1473 
1474 	return true;
1475 }
1476 
1477 static void intel_display_vblank_dc_work(struct work_struct *work)
1478 {
1479 	struct intel_display *display =
1480 		container_of(work, typeof(*display), irq.vblank_dc_work);
1481 	int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
1482 
1483 	/*
1484 	 * NOTE: intel_display_power_set_target_dc_state is used only by PSR
1485 	 * code for DC3CO handling. DC3CO target state is currently disabled in
1486 	 * PSR code. If DC3CO is taken into use we need take that into account
1487 	 * here as well.
1488 	 */
1489 	intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
1490 						DC_STATE_EN_UPTO_DC6);
1491 }
1492 
1493 int bdw_enable_vblank(struct drm_crtc *_crtc)
1494 {
1495 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1496 	struct intel_display *display = to_intel_display(crtc);
1497 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1498 	enum pipe pipe = crtc->pipe;
1499 	unsigned long irqflags;
1500 
1501 	if (gen11_dsi_configure_te(crtc, true))
1502 		return 0;
1503 
1504 	if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
1505 		schedule_work(&display->irq.vblank_dc_work);
1506 
1507 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1508 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1509 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1510 
1511 	/* Even if there is no DMC, frame counter can get stuck when
1512 	 * PSR is active as no frames are generated, so check only for PSR.
1513 	 */
1514 	if (HAS_PSR(dev_priv))
1515 		drm_crtc_vblank_restore(&crtc->base);
1516 
1517 	return 0;
1518 }
1519 
1520 void bdw_disable_vblank(struct drm_crtc *_crtc)
1521 {
1522 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1523 	struct intel_display *display = to_intel_display(crtc);
1524 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1525 	enum pipe pipe = crtc->pipe;
1526 	unsigned long irqflags;
1527 
1528 	if (gen11_dsi_configure_te(crtc, false))
1529 		return;
1530 
1531 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1532 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1533 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1534 
1535 	if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
1536 		schedule_work(&display->irq.vblank_dc_work);
1537 }
1538 
1539 static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
1540 {
1541 	struct intel_display *display = &dev_priv->display;
1542 
1543 	if (IS_CHERRYVIEW(dev_priv))
1544 		intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
1545 	else
1546 		intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
1547 
1548 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
1549 	intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
1550 
1551 	i9xx_pipestat_irq_reset(dev_priv);
1552 
1553 	intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
1554 	dev_priv->irq_mask = ~0u;
1555 }
1556 
1557 void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
1558 {
1559 	if (dev_priv->display.irq.vlv_display_irqs_enabled)
1560 		_vlv_display_irq_reset(dev_priv);
1561 }
1562 
1563 void i9xx_display_irq_reset(struct drm_i915_private *i915)
1564 {
1565 	struct intel_display *display = &i915->display;
1566 
1567 	if (I915_HAS_HOTPLUG(i915)) {
1568 		i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
1569 		intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
1570 	}
1571 
1572 	i9xx_pipestat_irq_reset(i915);
1573 }
1574 
1575 void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
1576 {
1577 	struct intel_display *display = &dev_priv->display;
1578 	u32 pipestat_mask;
1579 	u32 enable_mask;
1580 	enum pipe pipe;
1581 
1582 	if (!dev_priv->display.irq.vlv_display_irqs_enabled)
1583 		return;
1584 
1585 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
1586 
1587 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1588 	for_each_pipe(dev_priv, pipe)
1589 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
1590 
1591 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
1592 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1593 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1594 		I915_LPE_PIPE_A_INTERRUPT |
1595 		I915_LPE_PIPE_B_INTERRUPT;
1596 
1597 	if (IS_CHERRYVIEW(dev_priv))
1598 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
1599 			I915_LPE_PIPE_C_INTERRUPT;
1600 
1601 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
1602 
1603 	dev_priv->irq_mask = ~enable_mask;
1604 
1605 	intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
1606 }
1607 
1608 void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
1609 {
1610 	struct intel_display *display = &dev_priv->display;
1611 	enum pipe pipe;
1612 
1613 	if (!HAS_DISPLAY(dev_priv))
1614 		return;
1615 
1616 	intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
1617 	intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
1618 
1619 	for_each_pipe(dev_priv, pipe)
1620 		if (intel_display_power_is_enabled(dev_priv,
1621 						   POWER_DOMAIN_PIPE(pipe)))
1622 			intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
1623 
1624 	intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
1625 	intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
1626 }
1627 
1628 void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
1629 {
1630 	struct intel_display *display = &dev_priv->display;
1631 	enum pipe pipe;
1632 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1633 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1634 
1635 	if (!HAS_DISPLAY(dev_priv))
1636 		return;
1637 
1638 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
1639 
1640 	if (DISPLAY_VER(dev_priv) >= 12) {
1641 		enum transcoder trans;
1642 
1643 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1644 			enum intel_display_power_domain domain;
1645 
1646 			domain = POWER_DOMAIN_TRANSCODER(trans);
1647 			if (!intel_display_power_is_enabled(dev_priv, domain))
1648 				continue;
1649 
1650 			intel_de_write(display,
1651 				       TRANS_PSR_IMR(dev_priv, trans),
1652 				       0xffffffff);
1653 			intel_de_write(display,
1654 				       TRANS_PSR_IIR(dev_priv, trans),
1655 				       0xffffffff);
1656 		}
1657 	} else {
1658 		intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
1659 		intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
1660 	}
1661 
1662 	for_each_pipe(dev_priv, pipe)
1663 		if (intel_display_power_is_enabled(dev_priv,
1664 						   POWER_DOMAIN_PIPE(pipe)))
1665 			intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
1666 
1667 	intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
1668 	intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
1669 
1670 	if (DISPLAY_VER(dev_priv) >= 14)
1671 		intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
1672 	else
1673 		intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
1674 
1675 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1676 		intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
1677 }
1678 
1679 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
1680 				     u8 pipe_mask)
1681 {
1682 	struct intel_display *display = &dev_priv->display;
1683 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
1684 		gen8_de_pipe_flip_done_mask(dev_priv);
1685 	enum pipe pipe;
1686 
1687 	spin_lock_irq(&dev_priv->irq_lock);
1688 
1689 	if (!intel_irqs_enabled(dev_priv)) {
1690 		spin_unlock_irq(&dev_priv->irq_lock);
1691 		return;
1692 	}
1693 
1694 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1695 		intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
1696 					    dev_priv->display.irq.de_irq_mask[pipe],
1697 					    ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
1698 
1699 	spin_unlock_irq(&dev_priv->irq_lock);
1700 }
1701 
1702 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
1703 				     u8 pipe_mask)
1704 {
1705 	struct intel_display *display = &dev_priv->display;
1706 	enum pipe pipe;
1707 
1708 	spin_lock_irq(&dev_priv->irq_lock);
1709 
1710 	if (!intel_irqs_enabled(dev_priv)) {
1711 		spin_unlock_irq(&dev_priv->irq_lock);
1712 		return;
1713 	}
1714 
1715 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1716 		intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
1717 
1718 	spin_unlock_irq(&dev_priv->irq_lock);
1719 
1720 	/* make sure we're done processing display irqs */
1721 	intel_synchronize_irq(dev_priv);
1722 }
1723 
1724 /*
1725  * SDEIER is also touched by the interrupt handler to work around missed PCH
1726  * interrupts. Hence we can't update it after the interrupt handler is enabled -
1727  * instead we unconditionally enable all PCH interrupt sources here, but then
1728  * only unmask them as needed with SDEIMR.
1729  *
1730  * Note that we currently do this after installing the interrupt handler,
1731  * but before we enable the master interrupt. That should be sufficient
1732  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
1733  * interrupts could still race.
1734  */
1735 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
1736 {
1737 	struct intel_display *display = &dev_priv->display;
1738 	u32 mask;
1739 
1740 	if (HAS_PCH_NOP(dev_priv))
1741 		return;
1742 
1743 	if (HAS_PCH_IBX(dev_priv))
1744 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
1745 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
1746 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
1747 	else
1748 		mask = SDE_GMBUS_CPT;
1749 
1750 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
1751 }
1752 
1753 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
1754 {
1755 	lockdep_assert_held(&dev_priv->irq_lock);
1756 
1757 	if (dev_priv->display.irq.vlv_display_irqs_enabled)
1758 		return;
1759 
1760 	dev_priv->display.irq.vlv_display_irqs_enabled = true;
1761 
1762 	if (intel_irqs_enabled(dev_priv)) {
1763 		_vlv_display_irq_reset(dev_priv);
1764 		vlv_display_irq_postinstall(dev_priv);
1765 	}
1766 }
1767 
1768 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
1769 {
1770 	lockdep_assert_held(&dev_priv->irq_lock);
1771 
1772 	if (!dev_priv->display.irq.vlv_display_irqs_enabled)
1773 		return;
1774 
1775 	dev_priv->display.irq.vlv_display_irqs_enabled = false;
1776 
1777 	if (intel_irqs_enabled(dev_priv))
1778 		_vlv_display_irq_reset(dev_priv);
1779 }
1780 
1781 void ilk_de_irq_postinstall(struct drm_i915_private *i915)
1782 {
1783 	struct intel_display *display = &i915->display;
1784 	u32 display_mask, extra_mask;
1785 
1786 	if (DISPLAY_VER(i915) >= 7) {
1787 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1788 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
1789 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
1790 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
1791 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
1792 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
1793 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
1794 			      DE_DP_A_HOTPLUG_IVB);
1795 	} else {
1796 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1797 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
1798 				DE_PIPEA_CRC_DONE | DE_POISON);
1799 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
1800 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
1801 			      DE_PLANE_FLIP_DONE(PLANE_A) |
1802 			      DE_PLANE_FLIP_DONE(PLANE_B) |
1803 			      DE_DP_A_HOTPLUG);
1804 	}
1805 
1806 	if (IS_HASWELL(i915)) {
1807 		intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
1808 		display_mask |= DE_EDP_PSR_INT_HSW;
1809 	}
1810 
1811 	if (IS_IRONLAKE_M(i915))
1812 		extra_mask |= DE_PCU_EVENT;
1813 
1814 	i915->irq_mask = ~display_mask;
1815 
1816 	ibx_irq_postinstall(i915);
1817 
1818 	intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
1819 				    display_mask | extra_mask);
1820 }
1821 
1822 static void mtp_irq_postinstall(struct drm_i915_private *i915);
1823 static void icp_irq_postinstall(struct drm_i915_private *i915);
1824 
1825 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
1826 {
1827 	struct intel_display *display = &dev_priv->display;
1828 
1829 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
1830 		GEN8_PIPE_CDCLK_CRC_DONE;
1831 	u32 de_pipe_enables;
1832 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
1833 	u32 de_port_enables;
1834 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
1835 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1836 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1837 	enum pipe pipe;
1838 
1839 	if (!HAS_DISPLAY(dev_priv))
1840 		return;
1841 
1842 	if (DISPLAY_VER(dev_priv) >= 14)
1843 		mtp_irq_postinstall(dev_priv);
1844 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1845 		icp_irq_postinstall(dev_priv);
1846 	else if (HAS_PCH_SPLIT(dev_priv))
1847 		ibx_irq_postinstall(dev_priv);
1848 
1849 	if (DISPLAY_VER(dev_priv) < 11)
1850 		de_misc_masked |= GEN8_DE_MISC_GSE;
1851 
1852 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1853 		de_port_masked |= BXT_DE_PORT_GMBUS;
1854 
1855 	if (DISPLAY_VER(dev_priv) >= 14) {
1856 		de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
1857 				  XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
1858 	} else if (DISPLAY_VER(dev_priv) >= 11) {
1859 		enum port port;
1860 
1861 		if (intel_bios_is_dsi_present(display, &port))
1862 			de_port_masked |= DSI0_TE | DSI1_TE;
1863 	}
1864 
1865 	if (HAS_DBUF_OVERLAP_DETECTION(display))
1866 		de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
1867 
1868 	if (HAS_DSB(dev_priv))
1869 		de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
1870 			GEN12_DSB_INT(INTEL_DSB_1) |
1871 			GEN12_DSB_INT(INTEL_DSB_2);
1872 
1873 	de_pipe_enables = de_pipe_masked |
1874 		GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
1875 		gen8_de_pipe_flip_done_mask(dev_priv);
1876 
1877 	de_port_enables = de_port_masked;
1878 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1879 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
1880 	else if (IS_BROADWELL(dev_priv))
1881 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
1882 
1883 	if (DISPLAY_VER(dev_priv) >= 12) {
1884 		enum transcoder trans;
1885 
1886 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1887 			enum intel_display_power_domain domain;
1888 
1889 			domain = POWER_DOMAIN_TRANSCODER(trans);
1890 			if (!intel_display_power_is_enabled(dev_priv, domain))
1891 				continue;
1892 
1893 			intel_display_irq_regs_assert_irr_is_zero(display,
1894 								  TRANS_PSR_IIR(dev_priv, trans));
1895 		}
1896 	} else {
1897 		intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
1898 	}
1899 
1900 	for_each_pipe(dev_priv, pipe) {
1901 		dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
1902 
1903 		if (intel_display_power_is_enabled(dev_priv,
1904 						   POWER_DOMAIN_PIPE(pipe)))
1905 			intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
1906 						    dev_priv->display.irq.de_irq_mask[pipe],
1907 						    de_pipe_enables);
1908 	}
1909 
1910 	intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked,
1911 				    de_port_enables);
1912 	intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
1913 				    de_misc_masked);
1914 
1915 	if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
1916 		u32 de_hpd_masked = 0;
1917 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
1918 				     GEN11_DE_TBT_HOTPLUG_MASK;
1919 
1920 		intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
1921 					    de_hpd_enables);
1922 	}
1923 }
1924 
1925 static void mtp_irq_postinstall(struct drm_i915_private *i915)
1926 {
1927 	struct intel_display *display = &i915->display;
1928 	u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
1929 	u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
1930 	u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
1931 			     XELPDP_TBT_HOTPLUG_MASK;
1932 
1933 	intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
1934 				    de_hpd_enables);
1935 
1936 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
1937 }
1938 
1939 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
1940 {
1941 	struct intel_display *display = &dev_priv->display;
1942 	u32 mask = SDE_GMBUS_ICP;
1943 
1944 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
1945 }
1946 
1947 void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
1948 {
1949 	struct intel_display *display = &dev_priv->display;
1950 
1951 	if (!HAS_DISPLAY(dev_priv))
1952 		return;
1953 
1954 	gen8_de_irq_postinstall(dev_priv);
1955 
1956 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
1957 }
1958 
1959 void dg1_de_irq_postinstall(struct drm_i915_private *i915)
1960 {
1961 	struct intel_display *display = &i915->display;
1962 
1963 	if (!HAS_DISPLAY(i915))
1964 		return;
1965 
1966 	gen8_de_irq_postinstall(i915);
1967 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
1968 }
1969 
1970 void intel_display_irq_init(struct drm_i915_private *i915)
1971 {
1972 	i915->drm.vblank_disable_immediate = true;
1973 
1974 	intel_hotplug_irq_init(i915);
1975 
1976 	INIT_WORK(&i915->display.irq.vblank_dc_work,
1977 		  intel_display_vblank_dc_work);
1978 }
1979