xref: /linux/drivers/gpu/drm/i915/display/intel_display_irq.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 #include <drm/drm_vblank.h>
8 
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "i915_reg.h"
12 #include "icl_dsi_regs.h"
13 #include "intel_crtc.h"
14 #include "intel_de.h"
15 #include "intel_display_irq.h"
16 #include "intel_display_regs.h"
17 #include "intel_display_rpm.h"
18 #include "intel_display_rps.h"
19 #include "intel_display_trace.h"
20 #include "intel_display_types.h"
21 #include "intel_dmc.h"
22 #include "intel_dmc_wl.h"
23 #include "intel_dp_aux.h"
24 #include "intel_dsb.h"
25 #include "intel_fdi_regs.h"
26 #include "intel_fifo_underrun.h"
27 #include "intel_gmbus.h"
28 #include "intel_hotplug_irq.h"
29 #include "intel_pipe_crc_regs.h"
30 #include "intel_plane.h"
31 #include "intel_pmdemand.h"
32 #include "intel_psr.h"
33 #include "intel_psr_regs.h"
34 #include "intel_uncore.h"
35 
36 static void
37 intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs,
38 			    u32 imr_val, u32 ier_val)
39 {
40 	intel_dmc_wl_get(display, regs.imr);
41 	intel_dmc_wl_get(display, regs.ier);
42 	intel_dmc_wl_get(display, regs.iir);
43 
44 	gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val);
45 
46 	intel_dmc_wl_put(display, regs.iir);
47 	intel_dmc_wl_put(display, regs.ier);
48 	intel_dmc_wl_put(display, regs.imr);
49 }
50 
51 static void
52 intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs)
53 {
54 	intel_dmc_wl_get(display, regs.imr);
55 	intel_dmc_wl_get(display, regs.ier);
56 	intel_dmc_wl_get(display, regs.iir);
57 
58 	gen2_irq_reset(to_intel_uncore(display->drm), regs);
59 
60 	intel_dmc_wl_put(display, regs.iir);
61 	intel_dmc_wl_put(display, regs.ier);
62 	intel_dmc_wl_put(display, regs.imr);
63 }
64 
65 static void
66 intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg)
67 {
68 	intel_dmc_wl_get(display, reg);
69 
70 	gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg);
71 
72 	intel_dmc_wl_put(display, reg);
73 }
74 
75 struct pipe_fault_handler {
76 	bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id);
77 	u32 fault;
78 	enum plane_id plane_id;
79 };
80 
81 static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id)
82 {
83 	struct intel_display *display = to_intel_display(crtc);
84 	struct intel_plane_error error = {};
85 	struct intel_plane *plane;
86 
87 	plane = intel_crtc_get_plane(crtc, plane_id);
88 	if (!plane || !plane->capture_error)
89 		return false;
90 
91 	plane->capture_error(crtc, plane, &error);
92 
93 	drm_err_ratelimited(display->drm,
94 			    "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n",
95 			    crtc->base.base.id, crtc->base.name,
96 			    plane->base.base.id, plane->base.name,
97 			    error.ctl, error.surf, error.surflive);
98 
99 	return true;
100 }
101 
102 static void intel_pipe_fault_irq_handler(struct intel_display *display,
103 					 const struct pipe_fault_handler *handlers,
104 					 enum pipe pipe, u32 fault_errors)
105 {
106 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
107 	const struct pipe_fault_handler *handler;
108 
109 	for (handler = handlers; handler && handler->fault; handler++) {
110 		if ((fault_errors & handler->fault) == 0)
111 			continue;
112 
113 		if (handler->handle(crtc, handler->plane_id))
114 			fault_errors &= ~handler->fault;
115 	}
116 
117 	WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n",
118 		  crtc->base.base.id, crtc->base.name, fault_errors);
119 }
120 
121 static void
122 intel_handle_vblank(struct intel_display *display, enum pipe pipe)
123 {
124 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
125 
126 	drm_crtc_handle_vblank(&crtc->base);
127 }
128 
129 /**
130  * ilk_update_display_irq - update DEIMR
131  * @display: display device
132  * @interrupt_mask: mask of interrupt bits to update
133  * @enabled_irq_mask: mask of interrupt bits to enable
134  */
135 void ilk_update_display_irq(struct intel_display *display,
136 			    u32 interrupt_mask, u32 enabled_irq_mask)
137 {
138 	struct drm_i915_private *dev_priv = to_i915(display->drm);
139 	u32 new_val;
140 
141 	lockdep_assert_held(&display->irq.lock);
142 	drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
143 
144 	new_val = display->irq.ilk_de_imr_mask;
145 	new_val &= ~interrupt_mask;
146 	new_val |= (~enabled_irq_mask & interrupt_mask);
147 
148 	if (new_val != display->irq.ilk_de_imr_mask &&
149 	    !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
150 		display->irq.ilk_de_imr_mask = new_val;
151 		intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask);
152 		intel_de_posting_read(display, DEIMR);
153 	}
154 }
155 
156 void ilk_enable_display_irq(struct intel_display *display, u32 bits)
157 {
158 	ilk_update_display_irq(display, bits, bits);
159 }
160 
161 void ilk_disable_display_irq(struct intel_display *display, u32 bits)
162 {
163 	ilk_update_display_irq(display, bits, 0);
164 }
165 
166 /**
167  * bdw_update_port_irq - update DE port interrupt
168  * @display: display device
169  * @interrupt_mask: mask of interrupt bits to update
170  * @enabled_irq_mask: mask of interrupt bits to enable
171  */
172 void bdw_update_port_irq(struct intel_display *display,
173 			 u32 interrupt_mask, u32 enabled_irq_mask)
174 {
175 	struct drm_i915_private *dev_priv = to_i915(display->drm);
176 	u32 new_val;
177 	u32 old_val;
178 
179 	lockdep_assert_held(&display->irq.lock);
180 
181 	drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
182 
183 	if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
184 		return;
185 
186 	old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
187 
188 	new_val = old_val;
189 	new_val &= ~interrupt_mask;
190 	new_val |= (~enabled_irq_mask & interrupt_mask);
191 
192 	if (new_val != old_val) {
193 		intel_de_write(display, GEN8_DE_PORT_IMR, new_val);
194 		intel_de_posting_read(display, GEN8_DE_PORT_IMR);
195 	}
196 }
197 
198 /**
199  * bdw_update_pipe_irq - update DE pipe interrupt
200  * @display: display device
201  * @pipe: pipe whose interrupt to update
202  * @interrupt_mask: mask of interrupt bits to update
203  * @enabled_irq_mask: mask of interrupt bits to enable
204  */
205 static void bdw_update_pipe_irq(struct intel_display *display,
206 				enum pipe pipe, u32 interrupt_mask,
207 				u32 enabled_irq_mask)
208 {
209 	struct drm_i915_private *dev_priv = to_i915(display->drm);
210 	u32 new_val;
211 
212 	lockdep_assert_held(&display->irq.lock);
213 
214 	drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
215 
216 	if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
217 		return;
218 
219 	new_val = display->irq.de_pipe_imr_mask[pipe];
220 	new_val &= ~interrupt_mask;
221 	new_val |= (~enabled_irq_mask & interrupt_mask);
222 
223 	if (new_val != display->irq.de_pipe_imr_mask[pipe]) {
224 		display->irq.de_pipe_imr_mask[pipe] = new_val;
225 		intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]);
226 		intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
227 	}
228 }
229 
230 void bdw_enable_pipe_irq(struct intel_display *display,
231 			 enum pipe pipe, u32 bits)
232 {
233 	bdw_update_pipe_irq(display, pipe, bits, bits);
234 }
235 
236 void bdw_disable_pipe_irq(struct intel_display *display,
237 			  enum pipe pipe, u32 bits)
238 {
239 	bdw_update_pipe_irq(display, pipe, bits, 0);
240 }
241 
242 /**
243  * ibx_display_interrupt_update - update SDEIMR
244  * @display: display device
245  * @interrupt_mask: mask of interrupt bits to update
246  * @enabled_irq_mask: mask of interrupt bits to enable
247  */
248 void ibx_display_interrupt_update(struct intel_display *display,
249 				  u32 interrupt_mask,
250 				  u32 enabled_irq_mask)
251 {
252 	struct drm_i915_private *dev_priv = to_i915(display->drm);
253 	u32 sdeimr = intel_de_read(display, SDEIMR);
254 
255 	sdeimr &= ~interrupt_mask;
256 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
257 
258 	drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
259 
260 	lockdep_assert_held(&display->irq.lock);
261 
262 	if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
263 		return;
264 
265 	intel_de_write(display, SDEIMR, sdeimr);
266 	intel_de_posting_read(display, SDEIMR);
267 }
268 
269 void ibx_enable_display_interrupt(struct intel_display *display, u32 bits)
270 {
271 	ibx_display_interrupt_update(display, bits, bits);
272 }
273 
274 void ibx_disable_display_interrupt(struct intel_display *display, u32 bits)
275 {
276 	ibx_display_interrupt_update(display, bits, 0);
277 }
278 
279 u32 i915_pipestat_enable_mask(struct intel_display *display,
280 			      enum pipe pipe)
281 {
282 	u32 status_mask = display->irq.pipestat_irq_mask[pipe];
283 	u32 enable_mask = status_mask << 16;
284 
285 	lockdep_assert_held(&display->irq.lock);
286 
287 	if (DISPLAY_VER(display) < 5)
288 		goto out;
289 
290 	/*
291 	 * On pipe A we don't support the PSR interrupt yet,
292 	 * on pipe B and C the same bit MBZ.
293 	 */
294 	if (drm_WARN_ON_ONCE(display->drm,
295 			     status_mask & PIPE_A_PSR_STATUS_VLV))
296 		return 0;
297 	/*
298 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
299 	 * A the same bit is for perf counters which we don't use either.
300 	 */
301 	if (drm_WARN_ON_ONCE(display->drm,
302 			     status_mask & PIPE_B_PSR_STATUS_VLV))
303 		return 0;
304 
305 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
306 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
307 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
308 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
309 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
310 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
311 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
312 
313 out:
314 	drm_WARN_ONCE(display->drm,
315 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
316 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
317 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
318 		      pipe_name(pipe), enable_mask, status_mask);
319 
320 	return enable_mask;
321 }
322 
323 void i915_enable_pipestat(struct intel_display *display,
324 			  enum pipe pipe, u32 status_mask)
325 {
326 	struct drm_i915_private *dev_priv = to_i915(display->drm);
327 	i915_reg_t reg = PIPESTAT(display, pipe);
328 	u32 enable_mask;
329 
330 	drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
331 		      "pipe %c: status_mask=0x%x\n",
332 		      pipe_name(pipe), status_mask);
333 
334 	lockdep_assert_held(&display->irq.lock);
335 	drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
336 
337 	if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
338 		return;
339 
340 	display->irq.pipestat_irq_mask[pipe] |= status_mask;
341 	enable_mask = i915_pipestat_enable_mask(display, pipe);
342 
343 	intel_de_write(display, reg, enable_mask | status_mask);
344 	intel_de_posting_read(display, reg);
345 }
346 
347 void i915_disable_pipestat(struct intel_display *display,
348 			   enum pipe pipe, u32 status_mask)
349 {
350 	struct drm_i915_private *dev_priv = to_i915(display->drm);
351 	i915_reg_t reg = PIPESTAT(display, pipe);
352 	u32 enable_mask;
353 
354 	drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
355 		      "pipe %c: status_mask=0x%x\n",
356 		      pipe_name(pipe), status_mask);
357 
358 	lockdep_assert_held(&display->irq.lock);
359 	drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
360 
361 	if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
362 		return;
363 
364 	display->irq.pipestat_irq_mask[pipe] &= ~status_mask;
365 	enable_mask = i915_pipestat_enable_mask(display, pipe);
366 
367 	intel_de_write(display, reg, enable_mask | status_mask);
368 	intel_de_posting_read(display, reg);
369 }
370 
371 static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
372 {
373 	if (display->platform.i85x)
374 		return true;
375 
376 	if (display->platform.pineview)
377 		return true;
378 
379 	return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile;
380 }
381 
382 /* enable ASLE pipestat for OpRegion */
383 static void i915_enable_asle_pipestat(struct intel_display *display)
384 {
385 	if (!intel_opregion_asle_present(display))
386 		return;
387 
388 	if (!i915_has_legacy_blc_interrupt(display))
389 		return;
390 
391 	spin_lock_irq(&display->irq.lock);
392 
393 	i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
394 	if (DISPLAY_VER(display) >= 4)
395 		i915_enable_pipestat(display, PIPE_A,
396 				     PIPE_LEGACY_BLC_EVENT_STATUS);
397 
398 	spin_unlock_irq(&display->irq.lock);
399 }
400 
401 #if IS_ENABLED(CONFIG_DEBUG_FS)
402 static void display_pipe_crc_irq_handler(struct intel_display *display,
403 					 enum pipe pipe,
404 					 u32 crc0, u32 crc1,
405 					 u32 crc2, u32 crc3,
406 					 u32 crc4)
407 {
408 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
409 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
410 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
411 
412 	trace_intel_pipe_crc(crtc, crcs);
413 
414 	spin_lock(&pipe_crc->lock);
415 	/*
416 	 * For some not yet identified reason, the first CRC is
417 	 * bonkers. So let's just wait for the next vblank and read
418 	 * out the buggy result.
419 	 *
420 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
421 	 * don't trust that one either.
422 	 */
423 	if (pipe_crc->skipped <= 0 ||
424 	    (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) {
425 		pipe_crc->skipped++;
426 		spin_unlock(&pipe_crc->lock);
427 		return;
428 	}
429 	spin_unlock(&pipe_crc->lock);
430 
431 	drm_crtc_add_crc_entry(&crtc->base, true,
432 			       drm_crtc_accurate_vblank_count(&crtc->base),
433 			       crcs);
434 }
435 #else
436 static inline void
437 display_pipe_crc_irq_handler(struct intel_display *display,
438 			     enum pipe pipe,
439 			     u32 crc0, u32 crc1,
440 			     u32 crc2, u32 crc3,
441 			     u32 crc4) {}
442 #endif
443 
444 static void flip_done_handler(struct intel_display *display,
445 			      enum pipe pipe)
446 {
447 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
448 
449 	spin_lock(&display->drm->event_lock);
450 
451 	if (crtc->flip_done_event) {
452 		trace_intel_crtc_flip_done(crtc);
453 		drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
454 		crtc->flip_done_event = NULL;
455 	}
456 
457 	spin_unlock(&display->drm->event_lock);
458 }
459 
460 static void hsw_pipe_crc_irq_handler(struct intel_display *display,
461 				     enum pipe pipe)
462 {
463 	display_pipe_crc_irq_handler(display, pipe,
464 				     intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
465 				     0, 0, 0, 0);
466 }
467 
468 static void ivb_pipe_crc_irq_handler(struct intel_display *display,
469 				     enum pipe pipe)
470 {
471 	display_pipe_crc_irq_handler(display, pipe,
472 				     intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
473 				     intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
474 				     intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
475 				     intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)),
476 				     intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
477 }
478 
479 static void i9xx_pipe_crc_irq_handler(struct intel_display *display,
480 				      enum pipe pipe)
481 {
482 	u32 res1, res2;
483 
484 	if (DISPLAY_VER(display) >= 3)
485 		res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe));
486 	else
487 		res1 = 0;
488 
489 	if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
490 		res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe));
491 	else
492 		res2 = 0;
493 
494 	display_pipe_crc_irq_handler(display, pipe,
495 				     intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)),
496 				     intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)),
497 				     intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)),
498 				     res1, res2);
499 }
500 
501 static void i9xx_pipestat_irq_reset(struct intel_display *display)
502 {
503 	enum pipe pipe;
504 
505 	for_each_pipe(display, pipe) {
506 		intel_de_write(display,
507 			       PIPESTAT(display, pipe),
508 			       PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
509 
510 		display->irq.pipestat_irq_mask[pipe] = 0;
511 	}
512 }
513 
514 void i9xx_pipestat_irq_ack(struct intel_display *display,
515 			   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
516 {
517 	enum pipe pipe;
518 
519 	spin_lock(&display->irq.lock);
520 
521 	if ((display->platform.valleyview || display->platform.cherryview) &&
522 	    !display->irq.vlv_display_irqs_enabled) {
523 		spin_unlock(&display->irq.lock);
524 		return;
525 	}
526 
527 	for_each_pipe(display, pipe) {
528 		i915_reg_t reg;
529 		u32 status_mask, enable_mask, iir_bit = 0;
530 
531 		/*
532 		 * PIPESTAT bits get signalled even when the interrupt is
533 		 * disabled with the mask bits, and some of the status bits do
534 		 * not generate interrupts at all (like the underrun bit). Hence
535 		 * we need to be careful that we only handle what we want to
536 		 * handle.
537 		 */
538 
539 		/* fifo underruns are filterered in the underrun handler. */
540 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
541 
542 		switch (pipe) {
543 		default:
544 		case PIPE_A:
545 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
546 			break;
547 		case PIPE_B:
548 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
549 			break;
550 		case PIPE_C:
551 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
552 			break;
553 		}
554 		if (iir & iir_bit)
555 			status_mask |= display->irq.pipestat_irq_mask[pipe];
556 
557 		if (!status_mask)
558 			continue;
559 
560 		reg = PIPESTAT(display, pipe);
561 		pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
562 		enable_mask = i915_pipestat_enable_mask(display, pipe);
563 
564 		/*
565 		 * Clear the PIPE*STAT regs before the IIR
566 		 *
567 		 * Toggle the enable bits to make sure we get an
568 		 * edge in the ISR pipe event bit if we don't clear
569 		 * all the enabled status bits. Otherwise the edge
570 		 * triggered IIR on i965/g4x wouldn't notice that
571 		 * an interrupt is still pending.
572 		 */
573 		if (pipe_stats[pipe]) {
574 			intel_de_write(display, reg, pipe_stats[pipe]);
575 			intel_de_write(display, reg, enable_mask);
576 		}
577 	}
578 	spin_unlock(&display->irq.lock);
579 }
580 
581 void i915_pipestat_irq_handler(struct intel_display *display,
582 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
583 {
584 	bool blc_event = false;
585 	enum pipe pipe;
586 
587 	for_each_pipe(display, pipe) {
588 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
589 			intel_handle_vblank(display, pipe);
590 
591 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
592 			blc_event = true;
593 
594 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
595 			i9xx_pipe_crc_irq_handler(display, pipe);
596 
597 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
598 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
599 	}
600 
601 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
602 		intel_opregion_asle_intr(display);
603 }
604 
605 void i965_pipestat_irq_handler(struct intel_display *display,
606 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
607 {
608 	bool blc_event = false;
609 	enum pipe pipe;
610 
611 	for_each_pipe(display, pipe) {
612 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
613 			intel_handle_vblank(display, pipe);
614 
615 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
616 			blc_event = true;
617 
618 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
619 			i9xx_pipe_crc_irq_handler(display, pipe);
620 
621 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
622 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
623 	}
624 
625 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
626 		intel_opregion_asle_intr(display);
627 
628 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
629 		intel_gmbus_irq_handler(display);
630 }
631 
632 void valleyview_pipestat_irq_handler(struct intel_display *display,
633 				     u32 pipe_stats[I915_MAX_PIPES])
634 {
635 	enum pipe pipe;
636 
637 	for_each_pipe(display, pipe) {
638 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
639 			intel_handle_vblank(display, pipe);
640 
641 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
642 			flip_done_handler(display, pipe);
643 
644 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
645 			i9xx_pipe_crc_irq_handler(display, pipe);
646 
647 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
648 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
649 	}
650 
651 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
652 		intel_gmbus_irq_handler(display);
653 }
654 
655 static void ibx_irq_handler(struct intel_display *display, u32 pch_iir)
656 {
657 	enum pipe pipe;
658 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
659 
660 	ibx_hpd_irq_handler(display, hotplug_trigger);
661 
662 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
663 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
664 			       SDE_AUDIO_POWER_SHIFT);
665 		drm_dbg(display->drm, "PCH audio power change on port %d\n",
666 			port_name(port));
667 	}
668 
669 	if (pch_iir & SDE_AUX_MASK)
670 		intel_dp_aux_irq_handler(display);
671 
672 	if (pch_iir & SDE_GMBUS)
673 		intel_gmbus_irq_handler(display);
674 
675 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
676 		drm_dbg(display->drm, "PCH HDCP audio interrupt\n");
677 
678 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
679 		drm_dbg(display->drm, "PCH transcoder audio interrupt\n");
680 
681 	if (pch_iir & SDE_POISON)
682 		drm_err(display->drm, "PCH poison interrupt\n");
683 
684 	if (pch_iir & SDE_FDI_MASK) {
685 		for_each_pipe(display, pipe)
686 			drm_dbg(display->drm, "  pipe %c FDI IIR: 0x%08x\n",
687 				pipe_name(pipe),
688 				intel_de_read(display, FDI_RX_IIR(pipe)));
689 	}
690 
691 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
692 		drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n");
693 
694 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
695 		drm_dbg(display->drm,
696 			"PCH transcoder CRC error interrupt\n");
697 
698 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
699 		intel_pch_fifo_underrun_irq_handler(display, PIPE_A);
700 
701 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
702 		intel_pch_fifo_underrun_irq_handler(display, PIPE_B);
703 }
704 
705 static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe)
706 {
707 	switch (pipe) {
708 	case PIPE_A:
709 		return ERR_INT_SPRITE_A_FAULT |
710 			ERR_INT_PRIMARY_A_FAULT |
711 			ERR_INT_CURSOR_A_FAULT;
712 	case PIPE_B:
713 		return ERR_INT_SPRITE_B_FAULT |
714 			ERR_INT_PRIMARY_B_FAULT |
715 			ERR_INT_CURSOR_B_FAULT;
716 	case PIPE_C:
717 		return ERR_INT_SPRITE_C_FAULT |
718 			ERR_INT_PRIMARY_C_FAULT |
719 			ERR_INT_CURSOR_C_FAULT;
720 	default:
721 		return 0;
722 	}
723 }
724 
725 static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
726 	{ .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
727 	{ .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
728 	{ .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
729 	{ .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
730 	{ .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
731 	{ .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
732 	{ .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
733 	{ .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
734 	{ .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
735 	{}
736 };
737 
738 static void ivb_err_int_handler(struct intel_display *display)
739 {
740 	u32 err_int = intel_de_read(display, GEN7_ERR_INT);
741 	enum pipe pipe;
742 
743 	if (err_int & ERR_INT_POISON)
744 		drm_err(display->drm, "Poison interrupt\n");
745 
746 	if (err_int & ERR_INT_INVALID_GTT_PTE)
747 		drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
748 
749 	if (err_int & ERR_INT_INVALID_PTE_DATA)
750 		drm_err_ratelimited(display->drm, "Invalid PTE data\n");
751 
752 	for_each_pipe(display, pipe) {
753 		u32 fault_errors;
754 
755 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
756 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
757 
758 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
759 			if (display->platform.ivybridge)
760 				ivb_pipe_crc_irq_handler(display, pipe);
761 			else
762 				hsw_pipe_crc_irq_handler(display, pipe);
763 		}
764 
765 		fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
766 		if (fault_errors)
767 			intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers,
768 						     pipe, fault_errors);
769 	}
770 
771 	intel_de_write(display, GEN7_ERR_INT, err_int);
772 }
773 
774 static void cpt_serr_int_handler(struct intel_display *display)
775 {
776 	u32 serr_int = intel_de_read(display, SERR_INT);
777 	enum pipe pipe;
778 
779 	if (serr_int & SERR_INT_POISON)
780 		drm_err(display->drm, "PCH poison interrupt\n");
781 
782 	for_each_pipe(display, pipe)
783 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
784 			intel_pch_fifo_underrun_irq_handler(display, pipe);
785 
786 	intel_de_write(display, SERR_INT, serr_int);
787 }
788 
789 static void cpt_irq_handler(struct intel_display *display, u32 pch_iir)
790 {
791 	enum pipe pipe;
792 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
793 
794 	ibx_hpd_irq_handler(display, hotplug_trigger);
795 
796 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
797 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
798 			       SDE_AUDIO_POWER_SHIFT_CPT);
799 		drm_dbg(display->drm, "PCH audio power change on port %c\n",
800 			port_name(port));
801 	}
802 
803 	if (pch_iir & SDE_AUX_MASK_CPT)
804 		intel_dp_aux_irq_handler(display);
805 
806 	if (pch_iir & SDE_GMBUS_CPT)
807 		intel_gmbus_irq_handler(display);
808 
809 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
810 		drm_dbg(display->drm, "Audio CP request interrupt\n");
811 
812 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
813 		drm_dbg(display->drm, "Audio CP change interrupt\n");
814 
815 	if (pch_iir & SDE_FDI_MASK_CPT) {
816 		for_each_pipe(display, pipe)
817 			drm_dbg(display->drm, "  pipe %c FDI IIR: 0x%08x\n",
818 				pipe_name(pipe),
819 				intel_de_read(display, FDI_RX_IIR(pipe)));
820 	}
821 
822 	if (pch_iir & SDE_ERROR_CPT)
823 		cpt_serr_int_handler(display);
824 }
825 
826 static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
827 {
828 	switch (pipe) {
829 	case PIPE_A:
830 		return GTT_FAULT_SPRITE_A_FAULT |
831 			GTT_FAULT_PRIMARY_A_FAULT |
832 			GTT_FAULT_CURSOR_A_FAULT;
833 	case PIPE_B:
834 		return GTT_FAULT_SPRITE_B_FAULT |
835 			GTT_FAULT_PRIMARY_B_FAULT |
836 			GTT_FAULT_CURSOR_B_FAULT;
837 	default:
838 		return 0;
839 	}
840 }
841 
842 static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = {
843 	{ .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
844 	{ .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
845 	{ .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
846 	{ .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
847 	{ .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
848 	{ .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
849 	{}
850 };
851 
852 static void ilk_gtt_fault_irq_handler(struct intel_display *display)
853 {
854 	enum pipe pipe;
855 	u32 gtt_fault;
856 
857 	gtt_fault = intel_de_read(display, ILK_GTT_FAULT);
858 	intel_de_write(display, ILK_GTT_FAULT, gtt_fault);
859 
860 	if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE)
861 		drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
862 
863 	if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA)
864 		drm_err_ratelimited(display->drm, "Invalid PTE data\n");
865 
866 	for_each_pipe(display, pipe) {
867 		u32 fault_errors;
868 
869 		fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe);
870 		if (fault_errors)
871 			intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers,
872 						     pipe, fault_errors);
873 	}
874 }
875 
876 static void _ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
877 {
878 	enum pipe pipe;
879 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
880 
881 	if (hotplug_trigger)
882 		ilk_hpd_irq_handler(display, hotplug_trigger);
883 
884 	if (de_iir & DE_AUX_CHANNEL_A)
885 		intel_dp_aux_irq_handler(display);
886 
887 	if (de_iir & DE_GSE)
888 		intel_opregion_asle_intr(display);
889 
890 	if (de_iir & DE_POISON)
891 		drm_err(display->drm, "Poison interrupt\n");
892 
893 	if (de_iir & DE_GTT_FAULT)
894 		ilk_gtt_fault_irq_handler(display);
895 
896 	for_each_pipe(display, pipe) {
897 		if (de_iir & DE_PIPE_VBLANK(pipe))
898 			intel_handle_vblank(display, pipe);
899 
900 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
901 			flip_done_handler(display, pipe);
902 
903 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
904 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
905 
906 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
907 			i9xx_pipe_crc_irq_handler(display, pipe);
908 	}
909 
910 	/* check event from PCH */
911 	if (de_iir & DE_PCH_EVENT) {
912 		u32 pch_iir = intel_de_read(display, SDEIIR);
913 
914 		if (HAS_PCH_CPT(display))
915 			cpt_irq_handler(display, pch_iir);
916 		else
917 			ibx_irq_handler(display, pch_iir);
918 
919 		/* should clear PCH hotplug event before clear CPU irq */
920 		intel_de_write(display, SDEIIR, pch_iir);
921 	}
922 
923 	if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT)
924 		ilk_display_rps_irq_handler(display);
925 }
926 
927 static void _ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
928 {
929 	enum pipe pipe;
930 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
931 
932 	if (hotplug_trigger)
933 		ilk_hpd_irq_handler(display, hotplug_trigger);
934 
935 	if (de_iir & DE_ERR_INT_IVB)
936 		ivb_err_int_handler(display);
937 
938 	if (de_iir & DE_EDP_PSR_INT_HSW) {
939 		struct intel_encoder *encoder;
940 
941 		for_each_intel_encoder_with_psr(display->drm, encoder) {
942 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
943 			u32 psr_iir;
944 
945 			psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0);
946 			intel_psr_irq_handler(intel_dp, psr_iir);
947 			break;
948 		}
949 	}
950 
951 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
952 		intel_dp_aux_irq_handler(display);
953 
954 	if (de_iir & DE_GSE_IVB)
955 		intel_opregion_asle_intr(display);
956 
957 	for_each_pipe(display, pipe) {
958 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
959 			intel_handle_vblank(display, pipe);
960 
961 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
962 			flip_done_handler(display, pipe);
963 	}
964 
965 	/* check event from PCH */
966 	if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) {
967 		u32 pch_iir = intel_de_read(display, SDEIIR);
968 
969 		cpt_irq_handler(display, pch_iir);
970 
971 		/* clear PCH hotplug event before clear CPU irq */
972 		intel_de_write(display, SDEIIR, pch_iir);
973 	}
974 }
975 
976 void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier)
977 {
978 	/* disable master interrupt before clearing iir  */
979 	*de_ier = intel_de_read_fw(display, DEIER);
980 	intel_de_write_fw(display, DEIER, *de_ier & ~DE_MASTER_IRQ_CONTROL);
981 
982 	/*
983 	 * Disable south interrupts. We'll only write to SDEIIR once, so further
984 	 * interrupts will be stored on its back queue, and then we'll be able
985 	 * to process them after we restore SDEIER (as soon as we restore it,
986 	 * we'll get an interrupt if SDEIIR still has something to process due
987 	 * to its back queue).
988 	 */
989 	if (!HAS_PCH_NOP(display)) {
990 		*sde_ier = intel_de_read_fw(display, SDEIER);
991 		intel_de_write_fw(display, SDEIER, 0);
992 	} else {
993 		*sde_ier = 0;
994 	}
995 }
996 
997 void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier)
998 {
999 	intel_de_write_fw(display, DEIER, de_ier);
1000 
1001 	if (sde_ier)
1002 		intel_de_write_fw(display, SDEIER, sde_ier);
1003 }
1004 
1005 bool ilk_display_irq_handler(struct intel_display *display)
1006 {
1007 	u32 de_iir;
1008 	bool handled = false;
1009 
1010 	de_iir = intel_de_read_fw(display, DEIIR);
1011 	if (de_iir) {
1012 		intel_de_write_fw(display, DEIIR, de_iir);
1013 		if (DISPLAY_VER(display) >= 7)
1014 			_ivb_display_irq_handler(display, de_iir);
1015 		else
1016 			_ilk_display_irq_handler(display, de_iir);
1017 		handled = true;
1018 	}
1019 
1020 	return handled;
1021 }
1022 
1023 static u32 gen8_de_port_aux_mask(struct intel_display *display)
1024 {
1025 	u32 mask;
1026 
1027 	if (DISPLAY_VER(display) >= 20)
1028 		return 0;
1029 	else if (DISPLAY_VER(display) >= 14)
1030 		return TGL_DE_PORT_AUX_DDIA |
1031 			TGL_DE_PORT_AUX_DDIB;
1032 	else if (DISPLAY_VER(display) >= 13)
1033 		return TGL_DE_PORT_AUX_DDIA |
1034 			TGL_DE_PORT_AUX_DDIB |
1035 			TGL_DE_PORT_AUX_DDIC |
1036 			XELPD_DE_PORT_AUX_DDID |
1037 			XELPD_DE_PORT_AUX_DDIE |
1038 			TGL_DE_PORT_AUX_USBC1 |
1039 			TGL_DE_PORT_AUX_USBC2 |
1040 			TGL_DE_PORT_AUX_USBC3 |
1041 			TGL_DE_PORT_AUX_USBC4;
1042 	else if (DISPLAY_VER(display) >= 12)
1043 		return TGL_DE_PORT_AUX_DDIA |
1044 			TGL_DE_PORT_AUX_DDIB |
1045 			TGL_DE_PORT_AUX_DDIC |
1046 			TGL_DE_PORT_AUX_USBC1 |
1047 			TGL_DE_PORT_AUX_USBC2 |
1048 			TGL_DE_PORT_AUX_USBC3 |
1049 			TGL_DE_PORT_AUX_USBC4 |
1050 			TGL_DE_PORT_AUX_USBC5 |
1051 			TGL_DE_PORT_AUX_USBC6;
1052 
1053 	mask = GEN8_AUX_CHANNEL_A;
1054 	if (DISPLAY_VER(display) >= 9)
1055 		mask |= GEN9_AUX_CHANNEL_B |
1056 			GEN9_AUX_CHANNEL_C |
1057 			GEN9_AUX_CHANNEL_D;
1058 
1059 	if (DISPLAY_VER(display) == 11) {
1060 		mask |= ICL_AUX_CHANNEL_F;
1061 		mask |= ICL_AUX_CHANNEL_E;
1062 	}
1063 
1064 	return mask;
1065 }
1066 
1067 static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
1068 {
1069 	if (DISPLAY_VER(display) >= 20)
1070 		return MTL_PLANE_ATS_FAULT |
1071 			GEN9_PIPE_CURSOR_FAULT |
1072 			GEN11_PIPE_PLANE5_FAULT |
1073 			GEN9_PIPE_PLANE4_FAULT |
1074 			GEN9_PIPE_PLANE3_FAULT |
1075 			GEN9_PIPE_PLANE2_FAULT |
1076 			GEN9_PIPE_PLANE1_FAULT;
1077 	else if (DISPLAY_VER(display) >= 14)
1078 		return MTL_PIPEDMC_ATS_FAULT |
1079 			MTL_PLANE_ATS_FAULT |
1080 			GEN12_PIPEDMC_FAULT |
1081 			GEN9_PIPE_CURSOR_FAULT |
1082 			GEN11_PIPE_PLANE5_FAULT |
1083 			GEN9_PIPE_PLANE4_FAULT |
1084 			GEN9_PIPE_PLANE3_FAULT |
1085 			GEN9_PIPE_PLANE2_FAULT |
1086 			GEN9_PIPE_PLANE1_FAULT;
1087 	else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
1088 		return GEN12_PIPEDMC_FAULT |
1089 			GEN9_PIPE_CURSOR_FAULT |
1090 			GEN11_PIPE_PLANE5_FAULT |
1091 			GEN9_PIPE_PLANE4_FAULT |
1092 			GEN9_PIPE_PLANE3_FAULT |
1093 			GEN9_PIPE_PLANE2_FAULT |
1094 			GEN9_PIPE_PLANE1_FAULT;
1095 	else if (DISPLAY_VER(display) == 12)
1096 		return GEN12_PIPEDMC_FAULT |
1097 			GEN9_PIPE_CURSOR_FAULT |
1098 			GEN11_PIPE_PLANE7_FAULT |
1099 			GEN11_PIPE_PLANE6_FAULT |
1100 			GEN11_PIPE_PLANE5_FAULT |
1101 			GEN9_PIPE_PLANE4_FAULT |
1102 			GEN9_PIPE_PLANE3_FAULT |
1103 			GEN9_PIPE_PLANE2_FAULT |
1104 			GEN9_PIPE_PLANE1_FAULT;
1105 	else if (DISPLAY_VER(display) == 11)
1106 		return GEN9_PIPE_CURSOR_FAULT |
1107 			GEN11_PIPE_PLANE7_FAULT |
1108 			GEN11_PIPE_PLANE6_FAULT |
1109 			GEN11_PIPE_PLANE5_FAULT |
1110 			GEN9_PIPE_PLANE4_FAULT |
1111 			GEN9_PIPE_PLANE3_FAULT |
1112 			GEN9_PIPE_PLANE2_FAULT |
1113 			GEN9_PIPE_PLANE1_FAULT;
1114 	else if (DISPLAY_VER(display) >= 9)
1115 		return GEN9_PIPE_CURSOR_FAULT |
1116 			GEN9_PIPE_PLANE4_FAULT |
1117 			GEN9_PIPE_PLANE3_FAULT |
1118 			GEN9_PIPE_PLANE2_FAULT |
1119 			GEN9_PIPE_PLANE1_FAULT;
1120 	else
1121 		return GEN8_PIPE_CURSOR_FAULT |
1122 			GEN8_PIPE_SPRITE_FAULT |
1123 			GEN8_PIPE_PRIMARY_FAULT;
1124 }
1125 
1126 static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
1127 {
1128 	struct intel_display *display = to_intel_display(crtc);
1129 
1130 	drm_err_ratelimited(display->drm,
1131 			    "[CRTC:%d:%s] PLANE ATS fault\n",
1132 			    crtc->base.base.id, crtc->base.name);
1133 
1134 	return true;
1135 }
1136 
1137 static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
1138 {
1139 	struct intel_display *display = to_intel_display(crtc);
1140 
1141 	drm_err_ratelimited(display->drm,
1142 			    "[CRTC:%d:%s] PIPEDMC ATS fault\n",
1143 			    crtc->base.base.id, crtc->base.name);
1144 
1145 	return true;
1146 }
1147 
1148 static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id)
1149 {
1150 	struct intel_display *display = to_intel_display(crtc);
1151 
1152 	drm_err_ratelimited(display->drm,
1153 			    "[CRTC:%d:%s] PIPEDMC fault\n",
1154 			    crtc->base.base.id, crtc->base.name);
1155 
1156 	return true;
1157 }
1158 
1159 static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = {
1160 	{ .fault = MTL_PLANE_ATS_FAULT,     .handle = handle_plane_ats_fault, },
1161 	{ .fault = MTL_PIPEDMC_ATS_FAULT,   .handle = handle_pipedmc_ats_fault, },
1162 	{ .fault = GEN12_PIPEDMC_FAULT,     .handle = handle_pipedmc_fault, },
1163 	{ .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
1164 	{ .fault = GEN9_PIPE_PLANE4_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_4, },
1165 	{ .fault = GEN9_PIPE_PLANE3_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_3, },
1166 	{ .fault = GEN9_PIPE_PLANE2_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_2, },
1167 	{ .fault = GEN9_PIPE_PLANE1_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_1, },
1168 	{ .fault = GEN9_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1169 	{}
1170 };
1171 
1172 static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = {
1173 	{ .fault = GEN12_PIPEDMC_FAULT,     .handle = handle_pipedmc_fault, },
1174 	{ .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
1175 	{ .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
1176 	{ .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
1177 	{ .fault = GEN9_PIPE_PLANE4_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_4, },
1178 	{ .fault = GEN9_PIPE_PLANE3_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_3, },
1179 	{ .fault = GEN9_PIPE_PLANE2_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_2, },
1180 	{ .fault = GEN9_PIPE_PLANE1_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_1, },
1181 	{ .fault = GEN9_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1182 	{}
1183 };
1184 
1185 static const struct pipe_fault_handler icl_pipe_fault_handlers[] = {
1186 	{ .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
1187 	{ .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
1188 	{ .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
1189 	{ .fault = GEN9_PIPE_PLANE4_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_4, },
1190 	{ .fault = GEN9_PIPE_PLANE3_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_3, },
1191 	{ .fault = GEN9_PIPE_PLANE2_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_2, },
1192 	{ .fault = GEN9_PIPE_PLANE1_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_1, },
1193 	{ .fault = GEN9_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1194 	{}
1195 };
1196 
1197 static const struct pipe_fault_handler skl_pipe_fault_handlers[] = {
1198 	{ .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
1199 	{ .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
1200 	{ .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
1201 	{ .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
1202 	{ .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1203 	{}
1204 };
1205 
1206 static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = {
1207 	{ .fault = GEN8_PIPE_SPRITE_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1208 	{ .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1209 	{ .fault = GEN8_PIPE_CURSOR_FAULT,  .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
1210 	{}
1211 };
1212 
1213 static const struct pipe_fault_handler *
1214 gen8_pipe_fault_handlers(struct intel_display *display)
1215 {
1216 	if (DISPLAY_VER(display) >= 14)
1217 		return mtl_pipe_fault_handlers;
1218 	else if (DISPLAY_VER(display) >= 12)
1219 		return tgl_pipe_fault_handlers;
1220 	else if (DISPLAY_VER(display) >= 11)
1221 		return icl_pipe_fault_handlers;
1222 	else if (DISPLAY_VER(display) >= 9)
1223 		return skl_pipe_fault_handlers;
1224 	else
1225 		return bdw_pipe_fault_handlers;
1226 }
1227 
1228 static void intel_pmdemand_irq_handler(struct intel_display *display)
1229 {
1230 	wake_up_all(&display->pmdemand.waitqueue);
1231 }
1232 
1233 static void
1234 gen8_de_misc_irq_handler(struct intel_display *display, u32 iir)
1235 {
1236 	bool found = false;
1237 
1238 	if (HAS_DBUF_OVERLAP_DETECTION(display)) {
1239 		if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) {
1240 			drm_warn(display->drm,  "DBuf overlap detected\n");
1241 			found = true;
1242 		}
1243 	}
1244 
1245 	if (DISPLAY_VER(display) >= 14) {
1246 		if (iir & (XELPDP_PMDEMAND_RSP |
1247 			   XELPDP_PMDEMAND_RSPTOUT_ERR)) {
1248 			if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
1249 				drm_dbg(display->drm,
1250 					"Error waiting for Punit PM Demand Response\n");
1251 
1252 			intel_pmdemand_irq_handler(display);
1253 			found = true;
1254 		}
1255 
1256 		if (iir & XELPDP_RM_TIMEOUT) {
1257 			u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
1258 			drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val);
1259 			found = true;
1260 		}
1261 	} else if (iir & GEN8_DE_MISC_GSE) {
1262 		intel_opregion_asle_intr(display);
1263 		found = true;
1264 	}
1265 
1266 	if (iir & GEN8_DE_EDP_PSR) {
1267 		struct intel_encoder *encoder;
1268 		u32 psr_iir;
1269 		i915_reg_t iir_reg;
1270 
1271 		for_each_intel_encoder_with_psr(display->drm, encoder) {
1272 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1273 
1274 			if (DISPLAY_VER(display) >= 12)
1275 				iir_reg = TRANS_PSR_IIR(display,
1276 							intel_dp->psr.transcoder);
1277 			else
1278 				iir_reg = EDP_PSR_IIR;
1279 
1280 			psr_iir = intel_de_rmw(display, iir_reg, 0, 0);
1281 
1282 			if (psr_iir)
1283 				found = true;
1284 
1285 			intel_psr_irq_handler(intel_dp, psr_iir);
1286 
1287 			/* prior GEN12 only have one EDP PSR */
1288 			if (DISPLAY_VER(display) < 12)
1289 				break;
1290 		}
1291 	}
1292 
1293 	if (!found)
1294 		drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
1295 }
1296 
1297 static void gen11_dsi_te_interrupt_handler(struct intel_display *display,
1298 					   u32 te_trigger)
1299 {
1300 	enum pipe pipe = INVALID_PIPE;
1301 	enum transcoder dsi_trans;
1302 	enum port port;
1303 	u32 val;
1304 
1305 	/*
1306 	 * Incase of dual link, TE comes from DSI_1
1307 	 * this is to check if dual link is enabled
1308 	 */
1309 	val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0));
1310 	val &= PORT_SYNC_MODE_ENABLE;
1311 
1312 	/*
1313 	 * if dual link is enabled, then read DSI_0
1314 	 * transcoder registers
1315 	 */
1316 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
1317 						  PORT_A : PORT_B;
1318 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
1319 
1320 	/* Check if DSI configured in command mode */
1321 	val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
1322 	val = val & OP_MODE_MASK;
1323 
1324 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
1325 		drm_err(display->drm, "DSI trancoder not configured in command mode\n");
1326 		return;
1327 	}
1328 
1329 	/* Get PIPE for handling VBLANK event */
1330 	val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans));
1331 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
1332 	case TRANS_DDI_EDP_INPUT_A_ON:
1333 		pipe = PIPE_A;
1334 		break;
1335 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
1336 		pipe = PIPE_B;
1337 		break;
1338 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
1339 		pipe = PIPE_C;
1340 		break;
1341 	default:
1342 		drm_err(display->drm, "Invalid PIPE\n");
1343 		return;
1344 	}
1345 
1346 	intel_handle_vblank(display, pipe);
1347 
1348 	/* clear TE in dsi IIR */
1349 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
1350 	intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
1351 }
1352 
1353 static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display)
1354 {
1355 	if (DISPLAY_VER(display) >= 9)
1356 		return GEN9_PIPE_PLANE1_FLIP_DONE;
1357 	else
1358 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
1359 }
1360 
1361 static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir)
1362 {
1363 	u32 pica_ier = 0;
1364 
1365 	*pica_iir = 0;
1366 	*pch_iir = intel_de_read(display, SDEIIR);
1367 	if (!*pch_iir)
1368 		return;
1369 
1370 	/**
1371 	 * PICA IER must be disabled/re-enabled around clearing PICA IIR and
1372 	 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
1373 	 * their flags both in the PICA and SDE IIR.
1374 	 */
1375 	if (*pch_iir & SDE_PICAINTERRUPT) {
1376 		drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL);
1377 
1378 		pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
1379 		*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
1380 		intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir);
1381 	}
1382 
1383 	intel_de_write(display, SDEIIR, *pch_iir);
1384 
1385 	if (pica_ier)
1386 		intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
1387 }
1388 
1389 void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
1390 {
1391 	u32 iir;
1392 	enum pipe pipe;
1393 
1394 	drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display));
1395 
1396 	if (master_ctl & GEN8_DE_MISC_IRQ) {
1397 		iir = intel_de_read(display, GEN8_DE_MISC_IIR);
1398 		if (iir) {
1399 			intel_de_write(display, GEN8_DE_MISC_IIR, iir);
1400 			gen8_de_misc_irq_handler(display, iir);
1401 		} else {
1402 			drm_err_ratelimited(display->drm,
1403 					    "The master control interrupt lied (DE MISC)!\n");
1404 		}
1405 	}
1406 
1407 	if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
1408 		iir = intel_de_read(display, GEN11_DE_HPD_IIR);
1409 		if (iir) {
1410 			intel_de_write(display, GEN11_DE_HPD_IIR, iir);
1411 			gen11_hpd_irq_handler(display, iir);
1412 		} else {
1413 			drm_err_ratelimited(display->drm,
1414 					    "The master control interrupt lied, (DE HPD)!\n");
1415 		}
1416 	}
1417 
1418 	if (master_ctl & GEN8_DE_PORT_IRQ) {
1419 		iir = intel_de_read(display, GEN8_DE_PORT_IIR);
1420 		if (iir) {
1421 			bool found = false;
1422 
1423 			intel_de_write(display, GEN8_DE_PORT_IIR, iir);
1424 
1425 			if (iir & gen8_de_port_aux_mask(display)) {
1426 				intel_dp_aux_irq_handler(display);
1427 				found = true;
1428 			}
1429 
1430 			if (display->platform.geminilake || display->platform.broxton) {
1431 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
1432 
1433 				if (hotplug_trigger) {
1434 					bxt_hpd_irq_handler(display, hotplug_trigger);
1435 					found = true;
1436 				}
1437 			} else if (display->platform.broadwell) {
1438 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
1439 
1440 				if (hotplug_trigger) {
1441 					ilk_hpd_irq_handler(display, hotplug_trigger);
1442 					found = true;
1443 				}
1444 			}
1445 
1446 			if ((display->platform.geminilake || display->platform.broxton) &&
1447 			    (iir & BXT_DE_PORT_GMBUS)) {
1448 				intel_gmbus_irq_handler(display);
1449 				found = true;
1450 			}
1451 
1452 			if (DISPLAY_VER(display) >= 11) {
1453 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
1454 
1455 				if (te_trigger) {
1456 					gen11_dsi_te_interrupt_handler(display, te_trigger);
1457 					found = true;
1458 				}
1459 			}
1460 
1461 			if (!found)
1462 				drm_err_ratelimited(display->drm,
1463 						    "Unexpected DE Port interrupt\n");
1464 		} else {
1465 			drm_err_ratelimited(display->drm,
1466 					    "The master control interrupt lied (DE PORT)!\n");
1467 		}
1468 	}
1469 
1470 	for_each_pipe(display, pipe) {
1471 		u32 fault_errors;
1472 
1473 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1474 			continue;
1475 
1476 		iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
1477 		if (!iir) {
1478 			drm_err_ratelimited(display->drm,
1479 					    "The master control interrupt lied (DE PIPE %c)!\n",
1480 					    pipe_name(pipe));
1481 			continue;
1482 		}
1483 
1484 		intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
1485 
1486 		if (iir & GEN8_PIPE_VBLANK)
1487 			intel_handle_vblank(display, pipe);
1488 
1489 		if (iir & gen8_de_pipe_flip_done_mask(display))
1490 			flip_done_handler(display, pipe);
1491 
1492 		if (HAS_DSB(display)) {
1493 			if (iir & GEN12_DSB_INT(INTEL_DSB_0))
1494 				intel_dsb_irq_handler(display, pipe, INTEL_DSB_0);
1495 
1496 			if (iir & GEN12_DSB_INT(INTEL_DSB_1))
1497 				intel_dsb_irq_handler(display, pipe, INTEL_DSB_1);
1498 
1499 			if (iir & GEN12_DSB_INT(INTEL_DSB_2))
1500 				intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
1501 		}
1502 
1503 		if (HAS_PIPEDMC(display) && iir & GEN12_PIPEDMC_INTERRUPT)
1504 			intel_pipedmc_irq_handler(display, pipe);
1505 
1506 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
1507 			hsw_pipe_crc_irq_handler(display, pipe);
1508 
1509 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
1510 			intel_cpu_fifo_underrun_irq_handler(display, pipe);
1511 
1512 		fault_errors = iir & gen8_de_pipe_fault_mask(display);
1513 		if (fault_errors)
1514 			intel_pipe_fault_irq_handler(display,
1515 						     gen8_pipe_fault_handlers(display),
1516 						     pipe, fault_errors);
1517 	}
1518 
1519 	if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) &&
1520 	    master_ctl & GEN8_DE_PCH_IRQ) {
1521 		u32 pica_iir;
1522 
1523 		/*
1524 		 * FIXME(BDW): Assume for now that the new interrupt handling
1525 		 * scheme also closed the SDE interrupt handling race we've seen
1526 		 * on older pch-split platforms. But this needs testing.
1527 		 */
1528 		gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir);
1529 		if (iir) {
1530 			if (pica_iir)
1531 				xelpdp_pica_irq_handler(display, pica_iir);
1532 
1533 			if (INTEL_PCH_TYPE(display) >= PCH_ICP)
1534 				icp_irq_handler(display, iir);
1535 			else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
1536 				spt_irq_handler(display, iir);
1537 			else
1538 				cpt_irq_handler(display, iir);
1539 		} else {
1540 			/*
1541 			 * Like on previous PCH there seems to be something
1542 			 * fishy going on with forwarding PCH interrupts.
1543 			 */
1544 			drm_dbg(display->drm,
1545 				"The master control interrupt lied (SDE)!\n");
1546 		}
1547 	}
1548 }
1549 
1550 u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
1551 {
1552 	u32 iir;
1553 
1554 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
1555 		return 0;
1556 
1557 	intel_display_rpm_assert_block(display);
1558 
1559 	iir = intel_de_read(display, GEN11_GU_MISC_IIR);
1560 	if (likely(iir))
1561 		intel_de_write(display, GEN11_GU_MISC_IIR, iir);
1562 
1563 	intel_display_rpm_assert_unblock(display);
1564 
1565 	return iir;
1566 }
1567 
1568 void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir)
1569 {
1570 	if (iir & GEN11_GU_MISC_GSE)
1571 		intel_opregion_asle_intr(display);
1572 }
1573 
1574 void gen11_display_irq_handler(struct intel_display *display)
1575 {
1576 	u32 disp_ctl;
1577 
1578 	intel_display_rpm_assert_block(display);
1579 	/*
1580 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
1581 	 * for the display related bits.
1582 	 */
1583 	disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
1584 
1585 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
1586 	gen8_de_irq_handler(display, disp_ctl);
1587 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
1588 
1589 	intel_display_rpm_assert_unblock(display);
1590 }
1591 
1592 static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
1593 {
1594 	lockdep_assert_held(&display->drm->vblank_time_lock);
1595 
1596 	/*
1597 	 * Vblank/CRC interrupts fail to wake the device up from C2+.
1598 	 * Disabling render clock gating during C-states avoids
1599 	 * the problem. There is a small power cost so we do this
1600 	 * only when vblank/CRC interrupts are actually enabled.
1601 	 */
1602 	if (display->irq.vblank_enabled++ == 0)
1603 		intel_de_write(display, SCPD0,
1604 			       _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1605 }
1606 
1607 static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
1608 {
1609 	lockdep_assert_held(&display->drm->vblank_time_lock);
1610 
1611 	if (--display->irq.vblank_enabled == 0)
1612 		intel_de_write(display, SCPD0,
1613 			       _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1614 }
1615 
1616 void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
1617 {
1618 	spin_lock_irq(&display->drm->vblank_time_lock);
1619 
1620 	if (enable)
1621 		i915gm_irq_cstate_wa_enable(display);
1622 	else
1623 		i915gm_irq_cstate_wa_disable(display);
1624 
1625 	spin_unlock_irq(&display->drm->vblank_time_lock);
1626 }
1627 
1628 int i8xx_enable_vblank(struct drm_crtc *crtc)
1629 {
1630 	struct intel_display *display = to_intel_display(crtc->dev);
1631 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1632 	unsigned long irqflags;
1633 
1634 	spin_lock_irqsave(&display->irq.lock, irqflags);
1635 	i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1636 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1637 
1638 	return 0;
1639 }
1640 
1641 void i8xx_disable_vblank(struct drm_crtc *crtc)
1642 {
1643 	struct intel_display *display = to_intel_display(crtc->dev);
1644 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1645 	unsigned long irqflags;
1646 
1647 	spin_lock_irqsave(&display->irq.lock, irqflags);
1648 	i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1649 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1650 }
1651 
1652 int i915gm_enable_vblank(struct drm_crtc *crtc)
1653 {
1654 	struct intel_display *display = to_intel_display(crtc->dev);
1655 
1656 	i915gm_irq_cstate_wa_enable(display);
1657 
1658 	return i8xx_enable_vblank(crtc);
1659 }
1660 
1661 void i915gm_disable_vblank(struct drm_crtc *crtc)
1662 {
1663 	struct intel_display *display = to_intel_display(crtc->dev);
1664 
1665 	i8xx_disable_vblank(crtc);
1666 
1667 	i915gm_irq_cstate_wa_disable(display);
1668 }
1669 
1670 int i965_enable_vblank(struct drm_crtc *crtc)
1671 {
1672 	struct intel_display *display = to_intel_display(crtc->dev);
1673 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1674 	unsigned long irqflags;
1675 
1676 	spin_lock_irqsave(&display->irq.lock, irqflags);
1677 	i915_enable_pipestat(display, pipe,
1678 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
1679 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1680 
1681 	return 0;
1682 }
1683 
1684 void i965_disable_vblank(struct drm_crtc *crtc)
1685 {
1686 	struct intel_display *display = to_intel_display(crtc->dev);
1687 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1688 	unsigned long irqflags;
1689 
1690 	spin_lock_irqsave(&display->irq.lock, irqflags);
1691 	i915_disable_pipestat(display, pipe,
1692 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
1693 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1694 }
1695 
1696 int ilk_enable_vblank(struct drm_crtc *crtc)
1697 {
1698 	struct intel_display *display = to_intel_display(crtc->dev);
1699 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1700 	unsigned long irqflags;
1701 	u32 bit = DISPLAY_VER(display) >= 7 ?
1702 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1703 
1704 	spin_lock_irqsave(&display->irq.lock, irqflags);
1705 	ilk_enable_display_irq(display, bit);
1706 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1707 
1708 	/* Even though there is no DMC, frame counter can get stuck when
1709 	 * PSR is active as no frames are generated.
1710 	 */
1711 	if (HAS_PSR(display))
1712 		drm_crtc_vblank_restore(crtc);
1713 
1714 	return 0;
1715 }
1716 
1717 void ilk_disable_vblank(struct drm_crtc *crtc)
1718 {
1719 	struct intel_display *display = to_intel_display(crtc->dev);
1720 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1721 	unsigned long irqflags;
1722 	u32 bit = DISPLAY_VER(display) >= 7 ?
1723 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1724 
1725 	spin_lock_irqsave(&display->irq.lock, irqflags);
1726 	ilk_disable_display_irq(display, bit);
1727 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1728 }
1729 
1730 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
1731 				   bool enable)
1732 {
1733 	struct intel_display *display = to_intel_display(intel_crtc);
1734 	enum port port;
1735 
1736 	if (!(intel_crtc->mode_flags &
1737 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
1738 		return false;
1739 
1740 	/* for dual link cases we consider TE from slave */
1741 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
1742 		port = PORT_B;
1743 	else
1744 		port = PORT_A;
1745 
1746 	intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT);
1747 
1748 	intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
1749 
1750 	return true;
1751 }
1752 
1753 static void intel_display_vblank_notify_work(struct work_struct *work)
1754 {
1755 	struct intel_display *display =
1756 		container_of(work, typeof(*display), irq.vblank_notify_work);
1757 	int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count);
1758 
1759 	intel_psr_notify_vblank_enable_disable(display, vblank_enable_count);
1760 }
1761 
1762 int bdw_enable_vblank(struct drm_crtc *_crtc)
1763 {
1764 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1765 	struct intel_display *display = to_intel_display(crtc);
1766 	enum pipe pipe = crtc->pipe;
1767 	unsigned long irqflags;
1768 
1769 	if (gen11_dsi_configure_te(crtc, true))
1770 		return 0;
1771 
1772 	if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0)
1773 		schedule_work(&display->irq.vblank_notify_work);
1774 
1775 	spin_lock_irqsave(&display->irq.lock, irqflags);
1776 	bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
1777 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1778 
1779 	/* Even if there is no DMC, frame counter can get stuck when
1780 	 * PSR is active as no frames are generated, so check only for PSR.
1781 	 */
1782 	if (HAS_PSR(display))
1783 		drm_crtc_vblank_restore(&crtc->base);
1784 
1785 	return 0;
1786 }
1787 
1788 void bdw_disable_vblank(struct drm_crtc *_crtc)
1789 {
1790 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1791 	struct intel_display *display = to_intel_display(crtc);
1792 	enum pipe pipe = crtc->pipe;
1793 	unsigned long irqflags;
1794 
1795 	if (gen11_dsi_configure_te(crtc, false))
1796 		return;
1797 
1798 	spin_lock_irqsave(&display->irq.lock, irqflags);
1799 	bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
1800 	spin_unlock_irqrestore(&display->irq.lock, irqflags);
1801 
1802 	if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0)
1803 		schedule_work(&display->irq.vblank_notify_work);
1804 }
1805 
1806 static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
1807 {
1808 	switch (pipe) {
1809 	case PIPE_A:
1810 		return SPRITEB_INVALID_GTT_STATUS |
1811 			SPRITEA_INVALID_GTT_STATUS |
1812 			PLANEA_INVALID_GTT_STATUS |
1813 			CURSORA_INVALID_GTT_STATUS;
1814 	case PIPE_B:
1815 		return SPRITED_INVALID_GTT_STATUS |
1816 			SPRITEC_INVALID_GTT_STATUS |
1817 			PLANEB_INVALID_GTT_STATUS |
1818 			CURSORB_INVALID_GTT_STATUS;
1819 	case PIPE_C:
1820 		return SPRITEF_INVALID_GTT_STATUS |
1821 			SPRITEE_INVALID_GTT_STATUS |
1822 			PLANEC_INVALID_GTT_STATUS |
1823 			CURSORC_INVALID_GTT_STATUS;
1824 	default:
1825 		return 0;
1826 	}
1827 }
1828 
1829 static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = {
1830 	{ .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
1831 	{ .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1832 	{ .fault = PLANEA_INVALID_GTT_STATUS,  .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1833 	{ .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR,  },
1834 	{ .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
1835 	{ .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1836 	{ .fault = PLANEB_INVALID_GTT_STATUS,  .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1837 	{ .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR,  },
1838 	{ .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
1839 	{ .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
1840 	{ .fault = PLANEC_INVALID_GTT_STATUS,  .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
1841 	{ .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR,  },
1842 	{}
1843 };
1844 
1845 static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt)
1846 {
1847 	u32 status, enable, tmp;
1848 
1849 	tmp = intel_de_read(display, DPINVGTT);
1850 
1851 	enable = tmp >> 16;
1852 	status = tmp & 0xffff;
1853 
1854 	/*
1855 	 * Despite what the docs claim, the status bits seem to get
1856 	 * stuck permanently (similar the old PGTBL_ER register), so
1857 	 * we have to disable and ignore them once set. They do get
1858 	 * reset if the display power well goes down, so no need to
1859 	 * track the enable mask explicitly.
1860 	 */
1861 	*dpinvgtt = status & enable;
1862 	enable &= ~status;
1863 
1864 	/* customary ack+disable then re-enable to guarantee an edge */
1865 	intel_de_write(display, DPINVGTT, status);
1866 	intel_de_write(display, DPINVGTT, enable << 16);
1867 }
1868 
1869 static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt)
1870 {
1871 	enum pipe pipe;
1872 
1873 	for_each_pipe(display, pipe) {
1874 		u32 fault_errors;
1875 
1876 		fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe);
1877 		if (fault_errors)
1878 			intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers,
1879 						     pipe, fault_errors);
1880 	}
1881 }
1882 
1883 void vlv_display_error_irq_ack(struct intel_display *display,
1884 			       u32 *eir, u32 *dpinvgtt)
1885 {
1886 	u32 emr;
1887 
1888 	*eir = intel_de_read(display, VLV_EIR);
1889 
1890 	if (*eir & VLV_ERROR_PAGE_TABLE)
1891 		vlv_page_table_error_irq_ack(display, dpinvgtt);
1892 
1893 	intel_de_write(display, VLV_EIR, *eir);
1894 
1895 	/*
1896 	 * Toggle all EMR bits to make sure we get an edge
1897 	 * in the ISR master error bit if we don't clear
1898 	 * all the EIR bits.
1899 	 */
1900 	emr = intel_de_read(display, VLV_EMR);
1901 	intel_de_write(display, VLV_EMR, 0xffffffff);
1902 	intel_de_write(display, VLV_EMR, emr);
1903 }
1904 
1905 void vlv_display_error_irq_handler(struct intel_display *display,
1906 				   u32 eir, u32 dpinvgtt)
1907 {
1908 	drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir);
1909 
1910 	if (eir & VLV_ERROR_PAGE_TABLE)
1911 		vlv_page_table_error_irq_handler(display, dpinvgtt);
1912 }
1913 
1914 static void _vlv_display_irq_reset(struct intel_display *display)
1915 {
1916 	if (display->platform.cherryview)
1917 		intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
1918 	else
1919 		intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
1920 
1921 	gen2_error_reset(to_intel_uncore(display->drm),
1922 			 VLV_ERROR_REGS);
1923 
1924 	i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
1925 	intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
1926 
1927 	i9xx_pipestat_irq_reset(display);
1928 
1929 	intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
1930 	display->irq.vlv_imr_mask = ~0u;
1931 }
1932 
1933 void vlv_display_irq_reset(struct intel_display *display)
1934 {
1935 	spin_lock_irq(&display->irq.lock);
1936 	if (display->irq.vlv_display_irqs_enabled)
1937 		_vlv_display_irq_reset(display);
1938 	spin_unlock_irq(&display->irq.lock);
1939 }
1940 
1941 void i9xx_display_irq_reset(struct intel_display *display)
1942 {
1943 	if (HAS_HOTPLUG(display)) {
1944 		i915_hotplug_interrupt_update(display, 0xffffffff, 0);
1945 		intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
1946 	}
1947 
1948 	i9xx_pipestat_irq_reset(display);
1949 }
1950 
1951 u32 i9xx_display_irq_enable_mask(struct intel_display *display)
1952 {
1953 	u32 enable_mask;
1954 
1955 	enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1956 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1957 
1958 	if (DISPLAY_VER(display) >= 3)
1959 		enable_mask |= I915_ASLE_INTERRUPT;
1960 
1961 	if (HAS_HOTPLUG(display))
1962 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1963 
1964 	return enable_mask;
1965 }
1966 
1967 void i915_display_irq_postinstall(struct intel_display *display)
1968 {
1969 	/*
1970 	 * Interrupt setup is already guaranteed to be single-threaded, this is
1971 	 * just to make the assert_spin_locked check happy.
1972 	 */
1973 	spin_lock_irq(&display->irq.lock);
1974 	i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1975 	i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1976 	spin_unlock_irq(&display->irq.lock);
1977 
1978 	i915_enable_asle_pipestat(display);
1979 }
1980 
1981 void i965_display_irq_postinstall(struct intel_display *display)
1982 {
1983 	/*
1984 	 * Interrupt setup is already guaranteed to be single-threaded, this is
1985 	 * just to make the assert_spin_locked check happy.
1986 	 */
1987 	spin_lock_irq(&display->irq.lock);
1988 	i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1989 	i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1990 	i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1991 	spin_unlock_irq(&display->irq.lock);
1992 
1993 	i915_enable_asle_pipestat(display);
1994 }
1995 
1996 static u32 vlv_error_mask(void)
1997 {
1998 	/* TODO enable other errors too? */
1999 	return VLV_ERROR_PAGE_TABLE;
2000 }
2001 
2002 static void _vlv_display_irq_postinstall(struct intel_display *display)
2003 {
2004 	u32 pipestat_mask;
2005 	u32 enable_mask;
2006 	enum pipe pipe;
2007 
2008 	if (display->platform.cherryview)
2009 		intel_de_write(display, DPINVGTT,
2010 			       DPINVGTT_STATUS_MASK_CHV |
2011 			       DPINVGTT_EN_MASK_CHV);
2012 	else
2013 		intel_de_write(display, DPINVGTT,
2014 			       DPINVGTT_STATUS_MASK_VLV |
2015 			       DPINVGTT_EN_MASK_VLV);
2016 
2017 	gen2_error_init(to_intel_uncore(display->drm),
2018 			VLV_ERROR_REGS, ~vlv_error_mask());
2019 
2020 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2021 
2022 	i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2023 	for_each_pipe(display, pipe)
2024 		i915_enable_pipestat(display, pipe, pipestat_mask);
2025 
2026 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2027 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2028 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2029 		I915_LPE_PIPE_A_INTERRUPT |
2030 		I915_LPE_PIPE_B_INTERRUPT |
2031 		I915_MASTER_ERROR_INTERRUPT;
2032 
2033 	if (display->platform.cherryview)
2034 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2035 			I915_LPE_PIPE_C_INTERRUPT;
2036 
2037 	drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u);
2038 
2039 	display->irq.vlv_imr_mask = ~enable_mask;
2040 
2041 	intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask);
2042 }
2043 
2044 void vlv_display_irq_postinstall(struct intel_display *display)
2045 {
2046 	spin_lock_irq(&display->irq.lock);
2047 	if (display->irq.vlv_display_irqs_enabled)
2048 		_vlv_display_irq_postinstall(display);
2049 	spin_unlock_irq(&display->irq.lock);
2050 }
2051 
2052 static void ibx_display_irq_reset(struct intel_display *display)
2053 {
2054 	if (HAS_PCH_NOP(display))
2055 		return;
2056 
2057 	gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
2058 
2059 	if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
2060 		intel_de_write(display, SERR_INT, 0xffffffff);
2061 }
2062 
2063 void ilk_display_irq_reset(struct intel_display *display)
2064 {
2065 	struct intel_uncore *uncore = to_intel_uncore(display->drm);
2066 
2067 	gen2_irq_reset(uncore, DE_IRQ_REGS);
2068 	display->irq.ilk_de_imr_mask = ~0u;
2069 
2070 	if (DISPLAY_VER(display) == 7)
2071 		intel_de_write(display, GEN7_ERR_INT, 0xffffffff);
2072 
2073 	if (display->platform.haswell) {
2074 		intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
2075 		intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
2076 	}
2077 
2078 	ibx_display_irq_reset(display);
2079 }
2080 
2081 void gen8_display_irq_reset(struct intel_display *display)
2082 {
2083 	enum pipe pipe;
2084 
2085 	if (!HAS_DISPLAY(display))
2086 		return;
2087 
2088 	intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
2089 	intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
2090 
2091 	for_each_pipe(display, pipe)
2092 		if (intel_display_power_is_enabled(display,
2093 						   POWER_DOMAIN_PIPE(pipe)))
2094 			intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
2095 
2096 	intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
2097 	intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
2098 
2099 	if (HAS_PCH_SPLIT(display))
2100 		ibx_display_irq_reset(display);
2101 }
2102 
2103 void gen11_display_irq_reset(struct intel_display *display)
2104 {
2105 	enum pipe pipe;
2106 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2107 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2108 
2109 	if (!HAS_DISPLAY(display))
2110 		return;
2111 
2112 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
2113 
2114 	if (DISPLAY_VER(display) >= 12) {
2115 		enum transcoder trans;
2116 
2117 		for_each_cpu_transcoder_masked(display, trans, trans_mask) {
2118 			enum intel_display_power_domain domain;
2119 
2120 			domain = POWER_DOMAIN_TRANSCODER(trans);
2121 			if (!intel_display_power_is_enabled(display, domain))
2122 				continue;
2123 
2124 			intel_de_write(display,
2125 				       TRANS_PSR_IMR(display, trans),
2126 				       0xffffffff);
2127 			intel_de_write(display,
2128 				       TRANS_PSR_IIR(display, trans),
2129 				       0xffffffff);
2130 		}
2131 	} else {
2132 		intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
2133 		intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
2134 	}
2135 
2136 	for_each_pipe(display, pipe)
2137 		if (intel_display_power_is_enabled(display,
2138 						   POWER_DOMAIN_PIPE(pipe)))
2139 			intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
2140 
2141 	intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
2142 	intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
2143 
2144 	if (DISPLAY_VER(display) >= 14)
2145 		intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
2146 	else
2147 		intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
2148 
2149 	if (INTEL_PCH_TYPE(display) >= PCH_ICP)
2150 		intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
2151 }
2152 
2153 void gen8_irq_power_well_post_enable(struct intel_display *display,
2154 				     u8 pipe_mask)
2155 {
2156 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2157 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
2158 		gen8_de_pipe_flip_done_mask(display);
2159 	enum pipe pipe;
2160 
2161 	spin_lock_irq(&display->irq.lock);
2162 
2163 	if (!intel_irqs_enabled(dev_priv)) {
2164 		spin_unlock_irq(&display->irq.lock);
2165 		return;
2166 	}
2167 
2168 	for_each_pipe_masked(display, pipe, pipe_mask)
2169 		intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
2170 					    display->irq.de_pipe_imr_mask[pipe],
2171 					    ~display->irq.de_pipe_imr_mask[pipe] | extra_ier);
2172 
2173 	spin_unlock_irq(&display->irq.lock);
2174 }
2175 
2176 void gen8_irq_power_well_pre_disable(struct intel_display *display,
2177 				     u8 pipe_mask)
2178 {
2179 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2180 	enum pipe pipe;
2181 
2182 	spin_lock_irq(&display->irq.lock);
2183 
2184 	if (!intel_irqs_enabled(dev_priv)) {
2185 		spin_unlock_irq(&display->irq.lock);
2186 		return;
2187 	}
2188 
2189 	for_each_pipe_masked(display, pipe, pipe_mask)
2190 		intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
2191 
2192 	spin_unlock_irq(&display->irq.lock);
2193 
2194 	/* make sure we're done processing display irqs */
2195 	intel_synchronize_irq(dev_priv);
2196 }
2197 
2198 /*
2199  * SDEIER is also touched by the interrupt handler to work around missed PCH
2200  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2201  * instead we unconditionally enable all PCH interrupt sources here, but then
2202  * only unmask them as needed with SDEIMR.
2203  *
2204  * Note that we currently do this after installing the interrupt handler,
2205  * but before we enable the master interrupt. That should be sufficient
2206  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
2207  * interrupts could still race.
2208  */
2209 static void ibx_irq_postinstall(struct intel_display *display)
2210 {
2211 	u32 mask;
2212 
2213 	if (HAS_PCH_NOP(display))
2214 		return;
2215 
2216 	if (HAS_PCH_IBX(display))
2217 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
2218 	else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
2219 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2220 	else
2221 		mask = SDE_GMBUS_CPT;
2222 
2223 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
2224 }
2225 
2226 void valleyview_enable_display_irqs(struct intel_display *display)
2227 {
2228 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2229 
2230 	spin_lock_irq(&display->irq.lock);
2231 
2232 	if (display->irq.vlv_display_irqs_enabled)
2233 		goto out;
2234 
2235 	display->irq.vlv_display_irqs_enabled = true;
2236 
2237 	if (intel_irqs_enabled(dev_priv)) {
2238 		_vlv_display_irq_reset(display);
2239 		_vlv_display_irq_postinstall(display);
2240 	}
2241 
2242 out:
2243 	spin_unlock_irq(&display->irq.lock);
2244 }
2245 
2246 void valleyview_disable_display_irqs(struct intel_display *display)
2247 {
2248 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2249 
2250 	spin_lock_irq(&display->irq.lock);
2251 
2252 	if (!display->irq.vlv_display_irqs_enabled)
2253 		goto out;
2254 
2255 	display->irq.vlv_display_irqs_enabled = false;
2256 
2257 	if (intel_irqs_enabled(dev_priv))
2258 		_vlv_display_irq_reset(display);
2259 out:
2260 	spin_unlock_irq(&display->irq.lock);
2261 }
2262 
2263 void ilk_de_irq_postinstall(struct intel_display *display)
2264 {
2265 	u32 display_mask, extra_mask;
2266 
2267 	if (DISPLAY_VER(display) >= 7) {
2268 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2269 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
2270 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2271 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
2272 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
2273 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
2274 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
2275 			      DE_DP_A_HOTPLUG_IVB);
2276 	} else {
2277 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE |
2278 				DE_PCH_EVENT | DE_GTT_FAULT |
2279 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
2280 				DE_PIPEA_CRC_DONE | DE_POISON);
2281 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
2282 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2283 			      DE_PLANE_FLIP_DONE(PLANE_A) |
2284 			      DE_PLANE_FLIP_DONE(PLANE_B) |
2285 			      DE_DP_A_HOTPLUG);
2286 	}
2287 
2288 	if (display->platform.haswell) {
2289 		intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
2290 		display_mask |= DE_EDP_PSR_INT_HSW;
2291 	}
2292 
2293 	if (display->platform.ironlake && display->platform.mobile)
2294 		extra_mask |= DE_PCU_EVENT;
2295 
2296 	display->irq.ilk_de_imr_mask = ~display_mask;
2297 
2298 	ibx_irq_postinstall(display);
2299 
2300 	intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask,
2301 				    display_mask | extra_mask);
2302 }
2303 
2304 static void mtp_irq_postinstall(struct intel_display *display);
2305 static void icp_irq_postinstall(struct intel_display *display);
2306 
2307 void gen8_de_irq_postinstall(struct intel_display *display)
2308 {
2309 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) |
2310 		GEN8_PIPE_CDCLK_CRC_DONE;
2311 	u32 de_pipe_enables;
2312 	u32 de_port_masked = gen8_de_port_aux_mask(display);
2313 	u32 de_port_enables;
2314 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
2315 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2316 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2317 	enum pipe pipe;
2318 
2319 	if (!HAS_DISPLAY(display))
2320 		return;
2321 
2322 	if (DISPLAY_VER(display) >= 14)
2323 		mtp_irq_postinstall(display);
2324 	else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
2325 		icp_irq_postinstall(display);
2326 	else if (HAS_PCH_SPLIT(display))
2327 		ibx_irq_postinstall(display);
2328 
2329 	if (DISPLAY_VER(display) < 11)
2330 		de_misc_masked |= GEN8_DE_MISC_GSE;
2331 
2332 	if (display->platform.geminilake || display->platform.broxton)
2333 		de_port_masked |= BXT_DE_PORT_GMBUS;
2334 
2335 	if (DISPLAY_VER(display) >= 14) {
2336 		de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
2337 				  XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
2338 	} else if (DISPLAY_VER(display) >= 11) {
2339 		enum port port;
2340 
2341 		if (intel_bios_is_dsi_present(display, &port))
2342 			de_port_masked |= DSI0_TE | DSI1_TE;
2343 	}
2344 
2345 	if (HAS_DBUF_OVERLAP_DETECTION(display))
2346 		de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
2347 
2348 	if (HAS_DSB(display))
2349 		de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
2350 			GEN12_DSB_INT(INTEL_DSB_1) |
2351 			GEN12_DSB_INT(INTEL_DSB_2);
2352 
2353 	/* TODO figure PIPEDMC interrupts for pre-LNL */
2354 	if (DISPLAY_VER(display) >= 20)
2355 		de_pipe_masked |= GEN12_PIPEDMC_INTERRUPT;
2356 
2357 	de_pipe_enables = de_pipe_masked |
2358 		GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
2359 		gen8_de_pipe_flip_done_mask(display);
2360 
2361 	de_port_enables = de_port_masked;
2362 	if (display->platform.geminilake || display->platform.broxton)
2363 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
2364 	else if (display->platform.broadwell)
2365 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
2366 
2367 	if (DISPLAY_VER(display) >= 12) {
2368 		enum transcoder trans;
2369 
2370 		for_each_cpu_transcoder_masked(display, trans, trans_mask) {
2371 			enum intel_display_power_domain domain;
2372 
2373 			domain = POWER_DOMAIN_TRANSCODER(trans);
2374 			if (!intel_display_power_is_enabled(display, domain))
2375 				continue;
2376 
2377 			intel_display_irq_regs_assert_irr_is_zero(display,
2378 								  TRANS_PSR_IIR(display, trans));
2379 		}
2380 	} else {
2381 		intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
2382 	}
2383 
2384 	for_each_pipe(display, pipe) {
2385 		display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked;
2386 
2387 		if (intel_display_power_is_enabled(display,
2388 						   POWER_DOMAIN_PIPE(pipe)))
2389 			intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
2390 						    display->irq.de_pipe_imr_mask[pipe],
2391 						    de_pipe_enables);
2392 	}
2393 
2394 	intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked,
2395 				    de_port_enables);
2396 	intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
2397 				    de_misc_masked);
2398 
2399 	if (IS_DISPLAY_VER(display, 11, 13)) {
2400 		u32 de_hpd_masked = 0;
2401 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
2402 				     GEN11_DE_TBT_HOTPLUG_MASK;
2403 
2404 		intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
2405 					    de_hpd_enables);
2406 	}
2407 }
2408 
2409 static void mtp_irq_postinstall(struct intel_display *display)
2410 {
2411 	u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
2412 	u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
2413 	u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
2414 			     XELPDP_TBT_HOTPLUG_MASK;
2415 
2416 	intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
2417 				    de_hpd_enables);
2418 
2419 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
2420 }
2421 
2422 static void icp_irq_postinstall(struct intel_display *display)
2423 {
2424 	u32 mask = SDE_GMBUS_ICP;
2425 
2426 	intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
2427 }
2428 
2429 void gen11_de_irq_postinstall(struct intel_display *display)
2430 {
2431 	if (!HAS_DISPLAY(display))
2432 		return;
2433 
2434 	gen8_de_irq_postinstall(display);
2435 
2436 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
2437 }
2438 
2439 void dg1_de_irq_postinstall(struct intel_display *display)
2440 {
2441 	if (!HAS_DISPLAY(display))
2442 		return;
2443 
2444 	gen8_de_irq_postinstall(display);
2445 	intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
2446 }
2447 
2448 void intel_display_irq_init(struct intel_display *display)
2449 {
2450 	spin_lock_init(&display->irq.lock);
2451 
2452 	display->drm->vblank_disable_immediate = true;
2453 
2454 	intel_hotplug_irq_init(display);
2455 
2456 	INIT_WORK(&display->irq.vblank_notify_work,
2457 		  intel_display_vblank_notify_work);
2458 }
2459 
2460 struct intel_display_irq_snapshot {
2461 	u32 derrmr;
2462 };
2463 
2464 struct intel_display_irq_snapshot *
2465 intel_display_irq_snapshot_capture(struct intel_display *display)
2466 {
2467 	struct intel_display_irq_snapshot *snapshot;
2468 
2469 	snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
2470 	if (!snapshot)
2471 		return NULL;
2472 
2473 	if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
2474 		snapshot->derrmr = intel_de_read(display, DERRMR);
2475 
2476 	return snapshot;
2477 }
2478 
2479 void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot,
2480 				      struct drm_printer *p)
2481 {
2482 	if (!snapshot)
2483 		return;
2484 
2485 	drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
2486 }
2487