xref: /linux/drivers/gpu/drm/i915/i915_irq.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/circ_buf.h>
32 #include <linux/slab.h>
33 #include <linux/sysrq.h>
34 
35 #include <drm/drm_drv.h>
36 #include <drm/drm_irq.h>
37 #include <drm/i915_drm.h>
38 
39 #include "display/intel_display_types.h"
40 #include "display/intel_fifo_underrun.h"
41 #include "display/intel_hotplug.h"
42 #include "display/intel_lpe_audio.h"
43 #include "display/intel_psr.h"
44 
45 #include "gt/intel_gt.h"
46 #include "gt/intel_gt_irq.h"
47 #include "gt/intel_gt_pm_irq.h"
48 #include "gt/intel_rps.h"
49 
50 #include "i915_drv.h"
51 #include "i915_irq.h"
52 #include "i915_trace.h"
53 #include "intel_pm.h"
54 
55 /**
56  * DOC: interrupt handling
57  *
58  * These functions provide the basic support for enabling and disabling the
59  * interrupt handling support. There's a lot more functionality in i915_irq.c
60  * and related files, but that will be described in separate chapters.
61  */
62 
63 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
64 
65 static const u32 hpd_ilk[HPD_NUM_PINS] = {
66 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
67 };
68 
69 static const u32 hpd_ivb[HPD_NUM_PINS] = {
70 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
71 };
72 
73 static const u32 hpd_bdw[HPD_NUM_PINS] = {
74 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
75 };
76 
77 static const u32 hpd_ibx[HPD_NUM_PINS] = {
78 	[HPD_CRT] = SDE_CRT_HOTPLUG,
79 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
80 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
81 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
82 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
83 };
84 
85 static const u32 hpd_cpt[HPD_NUM_PINS] = {
86 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
87 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
88 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
89 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
90 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
91 };
92 
93 static const u32 hpd_spt[HPD_NUM_PINS] = {
94 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
95 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
96 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
97 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
98 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
99 };
100 
101 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
102 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
103 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
104 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
105 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
106 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
107 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
108 };
109 
110 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
111 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
112 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
113 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
114 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
115 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
116 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
117 };
118 
119 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
120 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
121 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
122 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
123 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
124 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
125 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
126 };
127 
128 /* BXT hpd list */
129 static const u32 hpd_bxt[HPD_NUM_PINS] = {
130 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
131 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
132 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
133 };
134 
135 static const u32 hpd_gen11[HPD_NUM_PINS] = {
136 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
137 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
138 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
139 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
140 };
141 
142 static const u32 hpd_gen12[HPD_NUM_PINS] = {
143 	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
144 	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
145 	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
146 	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
147 	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
148 	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
149 };
150 
151 static const u32 hpd_icp[HPD_NUM_PINS] = {
152 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
153 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
154 	[HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
155 	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
156 	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
157 	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
158 };
159 
160 static const u32 hpd_tgp[HPD_NUM_PINS] = {
161 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
162 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
163 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
164 	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
165 	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
166 	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
167 	[HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
168 	[HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
169 	[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
170 };
171 
172 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
173 		    i915_reg_t iir, i915_reg_t ier)
174 {
175 	intel_uncore_write(uncore, imr, 0xffffffff);
176 	intel_uncore_posting_read(uncore, imr);
177 
178 	intel_uncore_write(uncore, ier, 0);
179 
180 	/* IIR can theoretically queue up two events. Be paranoid. */
181 	intel_uncore_write(uncore, iir, 0xffffffff);
182 	intel_uncore_posting_read(uncore, iir);
183 	intel_uncore_write(uncore, iir, 0xffffffff);
184 	intel_uncore_posting_read(uncore, iir);
185 }
186 
187 void gen2_irq_reset(struct intel_uncore *uncore)
188 {
189 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
190 	intel_uncore_posting_read16(uncore, GEN2_IMR);
191 
192 	intel_uncore_write16(uncore, GEN2_IER, 0);
193 
194 	/* IIR can theoretically queue up two events. Be paranoid. */
195 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
196 	intel_uncore_posting_read16(uncore, GEN2_IIR);
197 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
198 	intel_uncore_posting_read16(uncore, GEN2_IIR);
199 }
200 
201 /*
202  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
203  */
204 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
205 {
206 	u32 val = intel_uncore_read(uncore, reg);
207 
208 	if (val == 0)
209 		return;
210 
211 	drm_WARN(&uncore->i915->drm, 1,
212 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
213 		 i915_mmio_reg_offset(reg), val);
214 	intel_uncore_write(uncore, reg, 0xffffffff);
215 	intel_uncore_posting_read(uncore, reg);
216 	intel_uncore_write(uncore, reg, 0xffffffff);
217 	intel_uncore_posting_read(uncore, reg);
218 }
219 
220 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
221 {
222 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
223 
224 	if (val == 0)
225 		return;
226 
227 	drm_WARN(&uncore->i915->drm, 1,
228 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
229 		 i915_mmio_reg_offset(GEN2_IIR), val);
230 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
231 	intel_uncore_posting_read16(uncore, GEN2_IIR);
232 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
233 	intel_uncore_posting_read16(uncore, GEN2_IIR);
234 }
235 
236 void gen3_irq_init(struct intel_uncore *uncore,
237 		   i915_reg_t imr, u32 imr_val,
238 		   i915_reg_t ier, u32 ier_val,
239 		   i915_reg_t iir)
240 {
241 	gen3_assert_iir_is_zero(uncore, iir);
242 
243 	intel_uncore_write(uncore, ier, ier_val);
244 	intel_uncore_write(uncore, imr, imr_val);
245 	intel_uncore_posting_read(uncore, imr);
246 }
247 
248 void gen2_irq_init(struct intel_uncore *uncore,
249 		   u32 imr_val, u32 ier_val)
250 {
251 	gen2_assert_iir_is_zero(uncore);
252 
253 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
254 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
255 	intel_uncore_posting_read16(uncore, GEN2_IMR);
256 }
257 
258 /* For display hotplug interrupt */
259 static inline void
260 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
261 				     u32 mask,
262 				     u32 bits)
263 {
264 	u32 val;
265 
266 	lockdep_assert_held(&dev_priv->irq_lock);
267 	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
268 
269 	val = I915_READ(PORT_HOTPLUG_EN);
270 	val &= ~mask;
271 	val |= bits;
272 	I915_WRITE(PORT_HOTPLUG_EN, val);
273 }
274 
275 /**
276  * i915_hotplug_interrupt_update - update hotplug interrupt enable
277  * @dev_priv: driver private
278  * @mask: bits to update
279  * @bits: bits to enable
280  * NOTE: the HPD enable bits are modified both inside and outside
281  * of an interrupt context. To avoid that read-modify-write cycles
282  * interfer, these bits are protected by a spinlock. Since this
283  * function is usually not called from a context where the lock is
284  * held already, this function acquires the lock itself. A non-locking
285  * version is also available.
286  */
287 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
288 				   u32 mask,
289 				   u32 bits)
290 {
291 	spin_lock_irq(&dev_priv->irq_lock);
292 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
293 	spin_unlock_irq(&dev_priv->irq_lock);
294 }
295 
296 /**
297  * ilk_update_display_irq - update DEIMR
298  * @dev_priv: driver private
299  * @interrupt_mask: mask of interrupt bits to update
300  * @enabled_irq_mask: mask of interrupt bits to enable
301  */
302 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
303 			    u32 interrupt_mask,
304 			    u32 enabled_irq_mask)
305 {
306 	u32 new_val;
307 
308 	lockdep_assert_held(&dev_priv->irq_lock);
309 
310 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
311 
312 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
313 		return;
314 
315 	new_val = dev_priv->irq_mask;
316 	new_val &= ~interrupt_mask;
317 	new_val |= (~enabled_irq_mask & interrupt_mask);
318 
319 	if (new_val != dev_priv->irq_mask) {
320 		dev_priv->irq_mask = new_val;
321 		I915_WRITE(DEIMR, dev_priv->irq_mask);
322 		POSTING_READ(DEIMR);
323 	}
324 }
325 
326 /**
327  * bdw_update_port_irq - update DE port interrupt
328  * @dev_priv: driver private
329  * @interrupt_mask: mask of interrupt bits to update
330  * @enabled_irq_mask: mask of interrupt bits to enable
331  */
332 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
333 				u32 interrupt_mask,
334 				u32 enabled_irq_mask)
335 {
336 	u32 new_val;
337 	u32 old_val;
338 
339 	lockdep_assert_held(&dev_priv->irq_lock);
340 
341 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
342 
343 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
344 		return;
345 
346 	old_val = I915_READ(GEN8_DE_PORT_IMR);
347 
348 	new_val = old_val;
349 	new_val &= ~interrupt_mask;
350 	new_val |= (~enabled_irq_mask & interrupt_mask);
351 
352 	if (new_val != old_val) {
353 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
354 		POSTING_READ(GEN8_DE_PORT_IMR);
355 	}
356 }
357 
358 /**
359  * bdw_update_pipe_irq - update DE pipe interrupt
360  * @dev_priv: driver private
361  * @pipe: pipe whose interrupt to update
362  * @interrupt_mask: mask of interrupt bits to update
363  * @enabled_irq_mask: mask of interrupt bits to enable
364  */
365 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
366 			 enum pipe pipe,
367 			 u32 interrupt_mask,
368 			 u32 enabled_irq_mask)
369 {
370 	u32 new_val;
371 
372 	lockdep_assert_held(&dev_priv->irq_lock);
373 
374 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
375 
376 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
377 		return;
378 
379 	new_val = dev_priv->de_irq_mask[pipe];
380 	new_val &= ~interrupt_mask;
381 	new_val |= (~enabled_irq_mask & interrupt_mask);
382 
383 	if (new_val != dev_priv->de_irq_mask[pipe]) {
384 		dev_priv->de_irq_mask[pipe] = new_val;
385 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
386 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
387 	}
388 }
389 
390 /**
391  * ibx_display_interrupt_update - update SDEIMR
392  * @dev_priv: driver private
393  * @interrupt_mask: mask of interrupt bits to update
394  * @enabled_irq_mask: mask of interrupt bits to enable
395  */
396 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
397 				  u32 interrupt_mask,
398 				  u32 enabled_irq_mask)
399 {
400 	u32 sdeimr = I915_READ(SDEIMR);
401 	sdeimr &= ~interrupt_mask;
402 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
403 
404 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
405 
406 	lockdep_assert_held(&dev_priv->irq_lock);
407 
408 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
409 		return;
410 
411 	I915_WRITE(SDEIMR, sdeimr);
412 	POSTING_READ(SDEIMR);
413 }
414 
415 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
416 			      enum pipe pipe)
417 {
418 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
419 	u32 enable_mask = status_mask << 16;
420 
421 	lockdep_assert_held(&dev_priv->irq_lock);
422 
423 	if (INTEL_GEN(dev_priv) < 5)
424 		goto out;
425 
426 	/*
427 	 * On pipe A we don't support the PSR interrupt yet,
428 	 * on pipe B and C the same bit MBZ.
429 	 */
430 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
431 			     status_mask & PIPE_A_PSR_STATUS_VLV))
432 		return 0;
433 	/*
434 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
435 	 * A the same bit is for perf counters which we don't use either.
436 	 */
437 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
438 			     status_mask & PIPE_B_PSR_STATUS_VLV))
439 		return 0;
440 
441 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
442 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
443 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
444 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
445 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
446 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
447 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
448 
449 out:
450 	drm_WARN_ONCE(&dev_priv->drm,
451 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
452 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
453 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
454 		      pipe_name(pipe), enable_mask, status_mask);
455 
456 	return enable_mask;
457 }
458 
459 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
460 			  enum pipe pipe, u32 status_mask)
461 {
462 	i915_reg_t reg = PIPESTAT(pipe);
463 	u32 enable_mask;
464 
465 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
466 		      "pipe %c: status_mask=0x%x\n",
467 		      pipe_name(pipe), status_mask);
468 
469 	lockdep_assert_held(&dev_priv->irq_lock);
470 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
471 
472 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
473 		return;
474 
475 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
476 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
477 
478 	I915_WRITE(reg, enable_mask | status_mask);
479 	POSTING_READ(reg);
480 }
481 
482 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
483 			   enum pipe pipe, u32 status_mask)
484 {
485 	i915_reg_t reg = PIPESTAT(pipe);
486 	u32 enable_mask;
487 
488 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
489 		      "pipe %c: status_mask=0x%x\n",
490 		      pipe_name(pipe), status_mask);
491 
492 	lockdep_assert_held(&dev_priv->irq_lock);
493 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
494 
495 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
496 		return;
497 
498 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
499 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
500 
501 	I915_WRITE(reg, enable_mask | status_mask);
502 	POSTING_READ(reg);
503 }
504 
505 static bool i915_has_asle(struct drm_i915_private *dev_priv)
506 {
507 	if (!dev_priv->opregion.asle)
508 		return false;
509 
510 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
511 }
512 
513 /**
514  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
515  * @dev_priv: i915 device private
516  */
517 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
518 {
519 	if (!i915_has_asle(dev_priv))
520 		return;
521 
522 	spin_lock_irq(&dev_priv->irq_lock);
523 
524 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
525 	if (INTEL_GEN(dev_priv) >= 4)
526 		i915_enable_pipestat(dev_priv, PIPE_A,
527 				     PIPE_LEGACY_BLC_EVENT_STATUS);
528 
529 	spin_unlock_irq(&dev_priv->irq_lock);
530 }
531 
532 /*
533  * This timing diagram depicts the video signal in and
534  * around the vertical blanking period.
535  *
536  * Assumptions about the fictitious mode used in this example:
537  *  vblank_start >= 3
538  *  vsync_start = vblank_start + 1
539  *  vsync_end = vblank_start + 2
540  *  vtotal = vblank_start + 3
541  *
542  *           start of vblank:
543  *           latch double buffered registers
544  *           increment frame counter (ctg+)
545  *           generate start of vblank interrupt (gen4+)
546  *           |
547  *           |          frame start:
548  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
549  *           |          may be shifted forward 1-3 extra lines via PIPECONF
550  *           |          |
551  *           |          |  start of vsync:
552  *           |          |  generate vsync interrupt
553  *           |          |  |
554  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
555  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
556  * ----va---> <-----------------vb--------------------> <--------va-------------
557  *       |          |       <----vs----->                     |
558  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
559  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
560  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
561  *       |          |                                         |
562  *       last visible pixel                                   first visible pixel
563  *                  |                                         increment frame counter (gen3/4)
564  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
565  *
566  * x  = horizontal active
567  * _  = horizontal blanking
568  * hs = horizontal sync
569  * va = vertical active
570  * vb = vertical blanking
571  * vs = vertical sync
572  * vbs = vblank_start (number)
573  *
574  * Summary:
575  * - most events happen at the start of horizontal sync
576  * - frame start happens at the start of horizontal blank, 1-4 lines
577  *   (depending on PIPECONF settings) after the start of vblank
578  * - gen3/4 pixel and frame counter are synchronized with the start
579  *   of horizontal active on the first line of vertical active
580  */
581 
582 /* Called from drm generic code, passed a 'crtc', which
583  * we use as a pipe index
584  */
585 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
586 {
587 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
588 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
589 	const struct drm_display_mode *mode = &vblank->hwmode;
590 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
591 	i915_reg_t high_frame, low_frame;
592 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
593 	unsigned long irqflags;
594 
595 	/*
596 	 * On i965gm TV output the frame counter only works up to
597 	 * the point when we enable the TV encoder. After that the
598 	 * frame counter ceases to work and reads zero. We need a
599 	 * vblank wait before enabling the TV encoder and so we
600 	 * have to enable vblank interrupts while the frame counter
601 	 * is still in a working state. However the core vblank code
602 	 * does not like us returning non-zero frame counter values
603 	 * when we've told it that we don't have a working frame
604 	 * counter. Thus we must stop non-zero values leaking out.
605 	 */
606 	if (!vblank->max_vblank_count)
607 		return 0;
608 
609 	htotal = mode->crtc_htotal;
610 	hsync_start = mode->crtc_hsync_start;
611 	vbl_start = mode->crtc_vblank_start;
612 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
613 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
614 
615 	/* Convert to pixel count */
616 	vbl_start *= htotal;
617 
618 	/* Start of vblank event occurs at start of hsync */
619 	vbl_start -= htotal - hsync_start;
620 
621 	high_frame = PIPEFRAME(pipe);
622 	low_frame = PIPEFRAMEPIXEL(pipe);
623 
624 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
625 
626 	/*
627 	 * High & low register fields aren't synchronized, so make sure
628 	 * we get a low value that's stable across two reads of the high
629 	 * register.
630 	 */
631 	do {
632 		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
633 		low   = intel_de_read_fw(dev_priv, low_frame);
634 		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
635 	} while (high1 != high2);
636 
637 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
638 
639 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
640 	pixel = low & PIPE_PIXEL_MASK;
641 	low >>= PIPE_FRAME_LOW_SHIFT;
642 
643 	/*
644 	 * The frame counter increments at beginning of active.
645 	 * Cook up a vblank counter by also checking the pixel
646 	 * counter against vblank start.
647 	 */
648 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
649 }
650 
651 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
652 {
653 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
654 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
655 
656 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
657 }
658 
659 /*
660  * On certain encoders on certain platforms, pipe
661  * scanline register will not work to get the scanline,
662  * since the timings are driven from the PORT or issues
663  * with scanline register updates.
664  * This function will use Framestamp and current
665  * timestamp registers to calculate the scanline.
666  */
667 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
668 {
669 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
670 	struct drm_vblank_crtc *vblank =
671 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
672 	const struct drm_display_mode *mode = &vblank->hwmode;
673 	u32 vblank_start = mode->crtc_vblank_start;
674 	u32 vtotal = mode->crtc_vtotal;
675 	u32 htotal = mode->crtc_htotal;
676 	u32 clock = mode->crtc_clock;
677 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
678 
679 	/*
680 	 * To avoid the race condition where we might cross into the
681 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
682 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
683 	 * during the same frame.
684 	 */
685 	do {
686 		/*
687 		 * This field provides read back of the display
688 		 * pipe frame time stamp. The time stamp value
689 		 * is sampled at every start of vertical blank.
690 		 */
691 		scan_prev_time = intel_de_read_fw(dev_priv,
692 						  PIPE_FRMTMSTMP(crtc->pipe));
693 
694 		/*
695 		 * The TIMESTAMP_CTR register has the current
696 		 * time stamp value.
697 		 */
698 		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
699 
700 		scan_post_time = intel_de_read_fw(dev_priv,
701 						  PIPE_FRMTMSTMP(crtc->pipe));
702 	} while (scan_post_time != scan_prev_time);
703 
704 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
705 					clock), 1000 * htotal);
706 	scanline = min(scanline, vtotal - 1);
707 	scanline = (scanline + vblank_start) % vtotal;
708 
709 	return scanline;
710 }
711 
712 /*
713  * intel_de_read_fw(), only for fast reads of display block, no need for
714  * forcewake etc.
715  */
716 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
717 {
718 	struct drm_device *dev = crtc->base.dev;
719 	struct drm_i915_private *dev_priv = to_i915(dev);
720 	const struct drm_display_mode *mode;
721 	struct drm_vblank_crtc *vblank;
722 	enum pipe pipe = crtc->pipe;
723 	int position, vtotal;
724 
725 	if (!crtc->active)
726 		return -1;
727 
728 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
729 	mode = &vblank->hwmode;
730 
731 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
732 		return __intel_get_crtc_scanline_from_timestamp(crtc);
733 
734 	vtotal = mode->crtc_vtotal;
735 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
736 		vtotal /= 2;
737 
738 	if (IS_GEN(dev_priv, 2))
739 		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
740 	else
741 		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
742 
743 	/*
744 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
745 	 * read it just before the start of vblank.  So try it again
746 	 * so we don't accidentally end up spanning a vblank frame
747 	 * increment, causing the pipe_update_end() code to squak at us.
748 	 *
749 	 * The nature of this problem means we can't simply check the ISR
750 	 * bit and return the vblank start value; nor can we use the scanline
751 	 * debug register in the transcoder as it appears to have the same
752 	 * problem.  We may need to extend this to include other platforms,
753 	 * but so far testing only shows the problem on HSW.
754 	 */
755 	if (HAS_DDI(dev_priv) && !position) {
756 		int i, temp;
757 
758 		for (i = 0; i < 100; i++) {
759 			udelay(1);
760 			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
761 			if (temp != position) {
762 				position = temp;
763 				break;
764 			}
765 		}
766 	}
767 
768 	/*
769 	 * See update_scanline_offset() for the details on the
770 	 * scanline_offset adjustment.
771 	 */
772 	return (position + crtc->scanline_offset) % vtotal;
773 }
774 
775 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
776 				     bool in_vblank_irq,
777 				     int *vpos, int *hpos,
778 				     ktime_t *stime, ktime_t *etime,
779 				     const struct drm_display_mode *mode)
780 {
781 	struct drm_device *dev = _crtc->dev;
782 	struct drm_i915_private *dev_priv = to_i915(dev);
783 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
784 	enum pipe pipe = crtc->pipe;
785 	int position;
786 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
787 	unsigned long irqflags;
788 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
789 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
790 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
791 
792 	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
793 		drm_dbg(&dev_priv->drm,
794 			"trying to get scanoutpos for disabled "
795 			"pipe %c\n", pipe_name(pipe));
796 		return false;
797 	}
798 
799 	htotal = mode->crtc_htotal;
800 	hsync_start = mode->crtc_hsync_start;
801 	vtotal = mode->crtc_vtotal;
802 	vbl_start = mode->crtc_vblank_start;
803 	vbl_end = mode->crtc_vblank_end;
804 
805 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
806 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
807 		vbl_end /= 2;
808 		vtotal /= 2;
809 	}
810 
811 	/*
812 	 * Lock uncore.lock, as we will do multiple timing critical raw
813 	 * register reads, potentially with preemption disabled, so the
814 	 * following code must not block on uncore.lock.
815 	 */
816 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
817 
818 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
819 
820 	/* Get optional system timestamp before query. */
821 	if (stime)
822 		*stime = ktime_get();
823 
824 	if (use_scanline_counter) {
825 		/* No obvious pixelcount register. Only query vertical
826 		 * scanout position from Display scan line register.
827 		 */
828 		position = __intel_get_crtc_scanline(crtc);
829 	} else {
830 		/* Have access to pixelcount since start of frame.
831 		 * We can split this into vertical and horizontal
832 		 * scanout position.
833 		 */
834 		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
835 
836 		/* convert to pixel counts */
837 		vbl_start *= htotal;
838 		vbl_end *= htotal;
839 		vtotal *= htotal;
840 
841 		/*
842 		 * In interlaced modes, the pixel counter counts all pixels,
843 		 * so one field will have htotal more pixels. In order to avoid
844 		 * the reported position from jumping backwards when the pixel
845 		 * counter is beyond the length of the shorter field, just
846 		 * clamp the position the length of the shorter field. This
847 		 * matches how the scanline counter based position works since
848 		 * the scanline counter doesn't count the two half lines.
849 		 */
850 		if (position >= vtotal)
851 			position = vtotal - 1;
852 
853 		/*
854 		 * Start of vblank interrupt is triggered at start of hsync,
855 		 * just prior to the first active line of vblank. However we
856 		 * consider lines to start at the leading edge of horizontal
857 		 * active. So, should we get here before we've crossed into
858 		 * the horizontal active of the first line in vblank, we would
859 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
860 		 * always add htotal-hsync_start to the current pixel position.
861 		 */
862 		position = (position + htotal - hsync_start) % vtotal;
863 	}
864 
865 	/* Get optional system timestamp after query. */
866 	if (etime)
867 		*etime = ktime_get();
868 
869 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
870 
871 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
872 
873 	/*
874 	 * While in vblank, position will be negative
875 	 * counting up towards 0 at vbl_end. And outside
876 	 * vblank, position will be positive counting
877 	 * up since vbl_end.
878 	 */
879 	if (position >= vbl_start)
880 		position -= vbl_end;
881 	else
882 		position += vtotal - vbl_end;
883 
884 	if (use_scanline_counter) {
885 		*vpos = position;
886 		*hpos = 0;
887 	} else {
888 		*vpos = position / htotal;
889 		*hpos = position - (*vpos * htotal);
890 	}
891 
892 	return true;
893 }
894 
895 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
896 				     ktime_t *vblank_time, bool in_vblank_irq)
897 {
898 	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
899 		crtc, max_error, vblank_time, in_vblank_irq,
900 		i915_get_crtc_scanoutpos);
901 }
902 
903 int intel_get_crtc_scanline(struct intel_crtc *crtc)
904 {
905 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
906 	unsigned long irqflags;
907 	int position;
908 
909 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
910 	position = __intel_get_crtc_scanline(crtc);
911 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
912 
913 	return position;
914 }
915 
916 /**
917  * ivb_parity_work - Workqueue called when a parity error interrupt
918  * occurred.
919  * @work: workqueue struct
920  *
921  * Doesn't actually do anything except notify userspace. As a consequence of
922  * this event, userspace should try to remap the bad rows since statistically
923  * it is likely the same row is more likely to go bad again.
924  */
925 static void ivb_parity_work(struct work_struct *work)
926 {
927 	struct drm_i915_private *dev_priv =
928 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
929 	struct intel_gt *gt = &dev_priv->gt;
930 	u32 error_status, row, bank, subbank;
931 	char *parity_event[6];
932 	u32 misccpctl;
933 	u8 slice = 0;
934 
935 	/* We must turn off DOP level clock gating to access the L3 registers.
936 	 * In order to prevent a get/put style interface, acquire struct mutex
937 	 * any time we access those registers.
938 	 */
939 	mutex_lock(&dev_priv->drm.struct_mutex);
940 
941 	/* If we've screwed up tracking, just let the interrupt fire again */
942 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
943 		goto out;
944 
945 	misccpctl = I915_READ(GEN7_MISCCPCTL);
946 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
947 	POSTING_READ(GEN7_MISCCPCTL);
948 
949 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
950 		i915_reg_t reg;
951 
952 		slice--;
953 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
954 				     slice >= NUM_L3_SLICES(dev_priv)))
955 			break;
956 
957 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
958 
959 		reg = GEN7_L3CDERRST1(slice);
960 
961 		error_status = I915_READ(reg);
962 		row = GEN7_PARITY_ERROR_ROW(error_status);
963 		bank = GEN7_PARITY_ERROR_BANK(error_status);
964 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
965 
966 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
967 		POSTING_READ(reg);
968 
969 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
970 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
971 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
972 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
973 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
974 		parity_event[5] = NULL;
975 
976 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
977 				   KOBJ_CHANGE, parity_event);
978 
979 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
980 			  slice, row, bank, subbank);
981 
982 		kfree(parity_event[4]);
983 		kfree(parity_event[3]);
984 		kfree(parity_event[2]);
985 		kfree(parity_event[1]);
986 	}
987 
988 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
989 
990 out:
991 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
992 	spin_lock_irq(&gt->irq_lock);
993 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
994 	spin_unlock_irq(&gt->irq_lock);
995 
996 	mutex_unlock(&dev_priv->drm.struct_mutex);
997 }
998 
999 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1000 {
1001 	switch (pin) {
1002 	case HPD_PORT_C:
1003 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1004 	case HPD_PORT_D:
1005 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1006 	case HPD_PORT_E:
1007 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1008 	case HPD_PORT_F:
1009 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1010 	default:
1011 		return false;
1012 	}
1013 }
1014 
1015 static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1016 {
1017 	switch (pin) {
1018 	case HPD_PORT_D:
1019 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1020 	case HPD_PORT_E:
1021 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1022 	case HPD_PORT_F:
1023 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1024 	case HPD_PORT_G:
1025 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1026 	case HPD_PORT_H:
1027 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1028 	case HPD_PORT_I:
1029 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1030 	default:
1031 		return false;
1032 	}
1033 }
1034 
1035 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1036 {
1037 	switch (pin) {
1038 	case HPD_PORT_A:
1039 		return val & PORTA_HOTPLUG_LONG_DETECT;
1040 	case HPD_PORT_B:
1041 		return val & PORTB_HOTPLUG_LONG_DETECT;
1042 	case HPD_PORT_C:
1043 		return val & PORTC_HOTPLUG_LONG_DETECT;
1044 	default:
1045 		return false;
1046 	}
1047 }
1048 
1049 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1050 {
1051 	switch (pin) {
1052 	case HPD_PORT_A:
1053 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1054 	case HPD_PORT_B:
1055 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1056 	case HPD_PORT_C:
1057 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1058 	default:
1059 		return false;
1060 	}
1061 }
1062 
1063 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1064 {
1065 	switch (pin) {
1066 	case HPD_PORT_C:
1067 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1068 	case HPD_PORT_D:
1069 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1070 	case HPD_PORT_E:
1071 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1072 	case HPD_PORT_F:
1073 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1074 	default:
1075 		return false;
1076 	}
1077 }
1078 
1079 static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1080 {
1081 	switch (pin) {
1082 	case HPD_PORT_D:
1083 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1084 	case HPD_PORT_E:
1085 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1086 	case HPD_PORT_F:
1087 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1088 	case HPD_PORT_G:
1089 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1090 	case HPD_PORT_H:
1091 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1092 	case HPD_PORT_I:
1093 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1094 	default:
1095 		return false;
1096 	}
1097 }
1098 
1099 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1100 {
1101 	switch (pin) {
1102 	case HPD_PORT_E:
1103 		return val & PORTE_HOTPLUG_LONG_DETECT;
1104 	default:
1105 		return false;
1106 	}
1107 }
1108 
1109 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1110 {
1111 	switch (pin) {
1112 	case HPD_PORT_A:
1113 		return val & PORTA_HOTPLUG_LONG_DETECT;
1114 	case HPD_PORT_B:
1115 		return val & PORTB_HOTPLUG_LONG_DETECT;
1116 	case HPD_PORT_C:
1117 		return val & PORTC_HOTPLUG_LONG_DETECT;
1118 	case HPD_PORT_D:
1119 		return val & PORTD_HOTPLUG_LONG_DETECT;
1120 	default:
1121 		return false;
1122 	}
1123 }
1124 
1125 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1126 {
1127 	switch (pin) {
1128 	case HPD_PORT_A:
1129 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1130 	default:
1131 		return false;
1132 	}
1133 }
1134 
1135 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1136 {
1137 	switch (pin) {
1138 	case HPD_PORT_B:
1139 		return val & PORTB_HOTPLUG_LONG_DETECT;
1140 	case HPD_PORT_C:
1141 		return val & PORTC_HOTPLUG_LONG_DETECT;
1142 	case HPD_PORT_D:
1143 		return val & PORTD_HOTPLUG_LONG_DETECT;
1144 	default:
1145 		return false;
1146 	}
1147 }
1148 
1149 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1150 {
1151 	switch (pin) {
1152 	case HPD_PORT_B:
1153 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1154 	case HPD_PORT_C:
1155 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1156 	case HPD_PORT_D:
1157 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1158 	default:
1159 		return false;
1160 	}
1161 }
1162 
1163 /*
1164  * Get a bit mask of pins that have triggered, and which ones may be long.
1165  * This can be called multiple times with the same masks to accumulate
1166  * hotplug detection results from several registers.
1167  *
1168  * Note that the caller is expected to zero out the masks initially.
1169  */
1170 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1171 			       u32 *pin_mask, u32 *long_mask,
1172 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1173 			       const u32 hpd[HPD_NUM_PINS],
1174 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1175 {
1176 	enum hpd_pin pin;
1177 
1178 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1179 
1180 	for_each_hpd_pin(pin) {
1181 		if ((hpd[pin] & hotplug_trigger) == 0)
1182 			continue;
1183 
1184 		*pin_mask |= BIT(pin);
1185 
1186 		if (long_pulse_detect(pin, dig_hotplug_reg))
1187 			*long_mask |= BIT(pin);
1188 	}
1189 
1190 	drm_dbg(&dev_priv->drm,
1191 		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1192 		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1193 
1194 }
1195 
1196 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1197 {
1198 	wake_up_all(&dev_priv->gmbus_wait_queue);
1199 }
1200 
1201 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1202 {
1203 	wake_up_all(&dev_priv->gmbus_wait_queue);
1204 }
1205 
1206 #if defined(CONFIG_DEBUG_FS)
1207 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1208 					 enum pipe pipe,
1209 					 u32 crc0, u32 crc1,
1210 					 u32 crc2, u32 crc3,
1211 					 u32 crc4)
1212 {
1213 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1214 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1215 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1216 
1217 	trace_intel_pipe_crc(crtc, crcs);
1218 
1219 	spin_lock(&pipe_crc->lock);
1220 	/*
1221 	 * For some not yet identified reason, the first CRC is
1222 	 * bonkers. So let's just wait for the next vblank and read
1223 	 * out the buggy result.
1224 	 *
1225 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1226 	 * don't trust that one either.
1227 	 */
1228 	if (pipe_crc->skipped <= 0 ||
1229 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1230 		pipe_crc->skipped++;
1231 		spin_unlock(&pipe_crc->lock);
1232 		return;
1233 	}
1234 	spin_unlock(&pipe_crc->lock);
1235 
1236 	drm_crtc_add_crc_entry(&crtc->base, true,
1237 				drm_crtc_accurate_vblank_count(&crtc->base),
1238 				crcs);
1239 }
1240 #else
1241 static inline void
1242 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1243 			     enum pipe pipe,
1244 			     u32 crc0, u32 crc1,
1245 			     u32 crc2, u32 crc3,
1246 			     u32 crc4) {}
1247 #endif
1248 
1249 
1250 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1251 				     enum pipe pipe)
1252 {
1253 	display_pipe_crc_irq_handler(dev_priv, pipe,
1254 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1255 				     0, 0, 0, 0);
1256 }
1257 
1258 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1259 				     enum pipe pipe)
1260 {
1261 	display_pipe_crc_irq_handler(dev_priv, pipe,
1262 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1263 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1264 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1265 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1266 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1267 }
1268 
1269 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1270 				      enum pipe pipe)
1271 {
1272 	u32 res1, res2;
1273 
1274 	if (INTEL_GEN(dev_priv) >= 3)
1275 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1276 	else
1277 		res1 = 0;
1278 
1279 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1280 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1281 	else
1282 		res2 = 0;
1283 
1284 	display_pipe_crc_irq_handler(dev_priv, pipe,
1285 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1286 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1287 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1288 				     res1, res2);
1289 }
1290 
1291 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1292 {
1293 	enum pipe pipe;
1294 
1295 	for_each_pipe(dev_priv, pipe) {
1296 		I915_WRITE(PIPESTAT(pipe),
1297 			   PIPESTAT_INT_STATUS_MASK |
1298 			   PIPE_FIFO_UNDERRUN_STATUS);
1299 
1300 		dev_priv->pipestat_irq_mask[pipe] = 0;
1301 	}
1302 }
1303 
1304 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1305 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1306 {
1307 	enum pipe pipe;
1308 
1309 	spin_lock(&dev_priv->irq_lock);
1310 
1311 	if (!dev_priv->display_irqs_enabled) {
1312 		spin_unlock(&dev_priv->irq_lock);
1313 		return;
1314 	}
1315 
1316 	for_each_pipe(dev_priv, pipe) {
1317 		i915_reg_t reg;
1318 		u32 status_mask, enable_mask, iir_bit = 0;
1319 
1320 		/*
1321 		 * PIPESTAT bits get signalled even when the interrupt is
1322 		 * disabled with the mask bits, and some of the status bits do
1323 		 * not generate interrupts at all (like the underrun bit). Hence
1324 		 * we need to be careful that we only handle what we want to
1325 		 * handle.
1326 		 */
1327 
1328 		/* fifo underruns are filterered in the underrun handler. */
1329 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1330 
1331 		switch (pipe) {
1332 		default:
1333 		case PIPE_A:
1334 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1335 			break;
1336 		case PIPE_B:
1337 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1338 			break;
1339 		case PIPE_C:
1340 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1341 			break;
1342 		}
1343 		if (iir & iir_bit)
1344 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1345 
1346 		if (!status_mask)
1347 			continue;
1348 
1349 		reg = PIPESTAT(pipe);
1350 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
1351 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1352 
1353 		/*
1354 		 * Clear the PIPE*STAT regs before the IIR
1355 		 *
1356 		 * Toggle the enable bits to make sure we get an
1357 		 * edge in the ISR pipe event bit if we don't clear
1358 		 * all the enabled status bits. Otherwise the edge
1359 		 * triggered IIR on i965/g4x wouldn't notice that
1360 		 * an interrupt is still pending.
1361 		 */
1362 		if (pipe_stats[pipe]) {
1363 			I915_WRITE(reg, pipe_stats[pipe]);
1364 			I915_WRITE(reg, enable_mask);
1365 		}
1366 	}
1367 	spin_unlock(&dev_priv->irq_lock);
1368 }
1369 
1370 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1371 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1372 {
1373 	enum pipe pipe;
1374 
1375 	for_each_pipe(dev_priv, pipe) {
1376 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1377 			drm_handle_vblank(&dev_priv->drm, pipe);
1378 
1379 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1380 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1381 
1382 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1383 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1384 	}
1385 }
1386 
1387 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1388 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1389 {
1390 	bool blc_event = false;
1391 	enum pipe pipe;
1392 
1393 	for_each_pipe(dev_priv, pipe) {
1394 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1395 			drm_handle_vblank(&dev_priv->drm, pipe);
1396 
1397 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1398 			blc_event = true;
1399 
1400 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1401 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1402 
1403 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1404 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1405 	}
1406 
1407 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1408 		intel_opregion_asle_intr(dev_priv);
1409 }
1410 
1411 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1412 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1413 {
1414 	bool blc_event = false;
1415 	enum pipe pipe;
1416 
1417 	for_each_pipe(dev_priv, pipe) {
1418 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1419 			drm_handle_vblank(&dev_priv->drm, pipe);
1420 
1421 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1422 			blc_event = true;
1423 
1424 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1425 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1426 
1427 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1428 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1429 	}
1430 
1431 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1432 		intel_opregion_asle_intr(dev_priv);
1433 
1434 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1435 		gmbus_irq_handler(dev_priv);
1436 }
1437 
1438 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1439 					    u32 pipe_stats[I915_MAX_PIPES])
1440 {
1441 	enum pipe pipe;
1442 
1443 	for_each_pipe(dev_priv, pipe) {
1444 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1445 			drm_handle_vblank(&dev_priv->drm, pipe);
1446 
1447 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1448 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1449 
1450 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1451 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1452 	}
1453 
1454 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1455 		gmbus_irq_handler(dev_priv);
1456 }
1457 
1458 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1459 {
1460 	u32 hotplug_status = 0, hotplug_status_mask;
1461 	int i;
1462 
1463 	if (IS_G4X(dev_priv) ||
1464 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1465 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1466 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1467 	else
1468 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1469 
1470 	/*
1471 	 * We absolutely have to clear all the pending interrupt
1472 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1473 	 * interrupt bit won't have an edge, and the i965/g4x
1474 	 * edge triggered IIR will not notice that an interrupt
1475 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1476 	 * guarantee the edge as the act of toggling the enable
1477 	 * bits can itself generate a new hotplug interrupt :(
1478 	 */
1479 	for (i = 0; i < 10; i++) {
1480 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1481 
1482 		if (tmp == 0)
1483 			return hotplug_status;
1484 
1485 		hotplug_status |= tmp;
1486 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1487 	}
1488 
1489 	drm_WARN_ONCE(&dev_priv->drm, 1,
1490 		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1491 		      I915_READ(PORT_HOTPLUG_STAT));
1492 
1493 	return hotplug_status;
1494 }
1495 
1496 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1497 				 u32 hotplug_status)
1498 {
1499 	u32 pin_mask = 0, long_mask = 0;
1500 
1501 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1502 	    IS_CHERRYVIEW(dev_priv)) {
1503 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1504 
1505 		if (hotplug_trigger) {
1506 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1507 					   hotplug_trigger, hotplug_trigger,
1508 					   hpd_status_g4x,
1509 					   i9xx_port_hotplug_long_detect);
1510 
1511 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1512 		}
1513 
1514 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1515 			dp_aux_irq_handler(dev_priv);
1516 	} else {
1517 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1518 
1519 		if (hotplug_trigger) {
1520 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1521 					   hotplug_trigger, hotplug_trigger,
1522 					   hpd_status_i915,
1523 					   i9xx_port_hotplug_long_detect);
1524 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1525 		}
1526 	}
1527 }
1528 
1529 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1530 {
1531 	struct drm_i915_private *dev_priv = arg;
1532 	irqreturn_t ret = IRQ_NONE;
1533 
1534 	if (!intel_irqs_enabled(dev_priv))
1535 		return IRQ_NONE;
1536 
1537 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1538 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1539 
1540 	do {
1541 		u32 iir, gt_iir, pm_iir;
1542 		u32 pipe_stats[I915_MAX_PIPES] = {};
1543 		u32 hotplug_status = 0;
1544 		u32 ier = 0;
1545 
1546 		gt_iir = I915_READ(GTIIR);
1547 		pm_iir = I915_READ(GEN6_PMIIR);
1548 		iir = I915_READ(VLV_IIR);
1549 
1550 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1551 			break;
1552 
1553 		ret = IRQ_HANDLED;
1554 
1555 		/*
1556 		 * Theory on interrupt generation, based on empirical evidence:
1557 		 *
1558 		 * x = ((VLV_IIR & VLV_IER) ||
1559 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1560 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1561 		 *
1562 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1563 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1564 		 * guarantee the CPU interrupt will be raised again even if we
1565 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1566 		 * bits this time around.
1567 		 */
1568 		I915_WRITE(VLV_MASTER_IER, 0);
1569 		ier = I915_READ(VLV_IER);
1570 		I915_WRITE(VLV_IER, 0);
1571 
1572 		if (gt_iir)
1573 			I915_WRITE(GTIIR, gt_iir);
1574 		if (pm_iir)
1575 			I915_WRITE(GEN6_PMIIR, pm_iir);
1576 
1577 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1578 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1579 
1580 		/* Call regardless, as some status bits might not be
1581 		 * signalled in iir */
1582 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1583 
1584 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1585 			   I915_LPE_PIPE_B_INTERRUPT))
1586 			intel_lpe_audio_irq_handler(dev_priv);
1587 
1588 		/*
1589 		 * VLV_IIR is single buffered, and reflects the level
1590 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1591 		 */
1592 		if (iir)
1593 			I915_WRITE(VLV_IIR, iir);
1594 
1595 		I915_WRITE(VLV_IER, ier);
1596 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1597 
1598 		if (gt_iir)
1599 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1600 		if (pm_iir)
1601 			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1602 
1603 		if (hotplug_status)
1604 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1605 
1606 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1607 	} while (0);
1608 
1609 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1610 
1611 	return ret;
1612 }
1613 
1614 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1615 {
1616 	struct drm_i915_private *dev_priv = arg;
1617 	irqreturn_t ret = IRQ_NONE;
1618 
1619 	if (!intel_irqs_enabled(dev_priv))
1620 		return IRQ_NONE;
1621 
1622 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1623 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1624 
1625 	do {
1626 		u32 master_ctl, iir;
1627 		u32 pipe_stats[I915_MAX_PIPES] = {};
1628 		u32 hotplug_status = 0;
1629 		u32 ier = 0;
1630 
1631 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1632 		iir = I915_READ(VLV_IIR);
1633 
1634 		if (master_ctl == 0 && iir == 0)
1635 			break;
1636 
1637 		ret = IRQ_HANDLED;
1638 
1639 		/*
1640 		 * Theory on interrupt generation, based on empirical evidence:
1641 		 *
1642 		 * x = ((VLV_IIR & VLV_IER) ||
1643 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1644 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1645 		 *
1646 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1647 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1648 		 * guarantee the CPU interrupt will be raised again even if we
1649 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1650 		 * bits this time around.
1651 		 */
1652 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1653 		ier = I915_READ(VLV_IER);
1654 		I915_WRITE(VLV_IER, 0);
1655 
1656 		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1657 
1658 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1659 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1660 
1661 		/* Call regardless, as some status bits might not be
1662 		 * signalled in iir */
1663 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1664 
1665 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1666 			   I915_LPE_PIPE_B_INTERRUPT |
1667 			   I915_LPE_PIPE_C_INTERRUPT))
1668 			intel_lpe_audio_irq_handler(dev_priv);
1669 
1670 		/*
1671 		 * VLV_IIR is single buffered, and reflects the level
1672 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1673 		 */
1674 		if (iir)
1675 			I915_WRITE(VLV_IIR, iir);
1676 
1677 		I915_WRITE(VLV_IER, ier);
1678 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1679 
1680 		if (hotplug_status)
1681 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1682 
1683 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1684 	} while (0);
1685 
1686 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1687 
1688 	return ret;
1689 }
1690 
1691 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1692 				u32 hotplug_trigger,
1693 				const u32 hpd[HPD_NUM_PINS])
1694 {
1695 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1696 
1697 	/*
1698 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1699 	 * unless we touch the hotplug register, even if hotplug_trigger is
1700 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1701 	 * errors.
1702 	 */
1703 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1704 	if (!hotplug_trigger) {
1705 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1706 			PORTD_HOTPLUG_STATUS_MASK |
1707 			PORTC_HOTPLUG_STATUS_MASK |
1708 			PORTB_HOTPLUG_STATUS_MASK;
1709 		dig_hotplug_reg &= ~mask;
1710 	}
1711 
1712 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1713 	if (!hotplug_trigger)
1714 		return;
1715 
1716 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1717 			   dig_hotplug_reg, hpd,
1718 			   pch_port_hotplug_long_detect);
1719 
1720 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1721 }
1722 
1723 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1724 {
1725 	enum pipe pipe;
1726 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1727 
1728 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1729 
1730 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1731 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1732 			       SDE_AUDIO_POWER_SHIFT);
1733 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1734 			port_name(port));
1735 	}
1736 
1737 	if (pch_iir & SDE_AUX_MASK)
1738 		dp_aux_irq_handler(dev_priv);
1739 
1740 	if (pch_iir & SDE_GMBUS)
1741 		gmbus_irq_handler(dev_priv);
1742 
1743 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1744 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1745 
1746 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1747 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1748 
1749 	if (pch_iir & SDE_POISON)
1750 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1751 
1752 	if (pch_iir & SDE_FDI_MASK)
1753 		for_each_pipe(dev_priv, pipe)
1754 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1755 				pipe_name(pipe),
1756 				I915_READ(FDI_RX_IIR(pipe)));
1757 
1758 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1759 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1760 
1761 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1762 		drm_dbg(&dev_priv->drm,
1763 			"PCH transcoder CRC error interrupt\n");
1764 
1765 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1766 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1767 
1768 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1769 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1770 }
1771 
1772 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1773 {
1774 	u32 err_int = I915_READ(GEN7_ERR_INT);
1775 	enum pipe pipe;
1776 
1777 	if (err_int & ERR_INT_POISON)
1778 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1779 
1780 	for_each_pipe(dev_priv, pipe) {
1781 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1782 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1783 
1784 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1785 			if (IS_IVYBRIDGE(dev_priv))
1786 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1787 			else
1788 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1789 		}
1790 	}
1791 
1792 	I915_WRITE(GEN7_ERR_INT, err_int);
1793 }
1794 
1795 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1796 {
1797 	u32 serr_int = I915_READ(SERR_INT);
1798 	enum pipe pipe;
1799 
1800 	if (serr_int & SERR_INT_POISON)
1801 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1802 
1803 	for_each_pipe(dev_priv, pipe)
1804 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1805 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1806 
1807 	I915_WRITE(SERR_INT, serr_int);
1808 }
1809 
1810 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1811 {
1812 	enum pipe pipe;
1813 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1814 
1815 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
1816 
1817 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1818 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1819 			       SDE_AUDIO_POWER_SHIFT_CPT);
1820 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1821 			port_name(port));
1822 	}
1823 
1824 	if (pch_iir & SDE_AUX_MASK_CPT)
1825 		dp_aux_irq_handler(dev_priv);
1826 
1827 	if (pch_iir & SDE_GMBUS_CPT)
1828 		gmbus_irq_handler(dev_priv);
1829 
1830 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1831 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1832 
1833 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1834 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1835 
1836 	if (pch_iir & SDE_FDI_MASK_CPT)
1837 		for_each_pipe(dev_priv, pipe)
1838 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1839 				pipe_name(pipe),
1840 				I915_READ(FDI_RX_IIR(pipe)));
1841 
1842 	if (pch_iir & SDE_ERROR_CPT)
1843 		cpt_serr_int_handler(dev_priv);
1844 }
1845 
1846 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1847 {
1848 	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1849 	u32 pin_mask = 0, long_mask = 0;
1850 	bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
1851 	const u32 *pins;
1852 
1853 	if (HAS_PCH_TGP(dev_priv)) {
1854 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1855 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1856 		tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
1857 		pins = hpd_tgp;
1858 	} else if (HAS_PCH_JSP(dev_priv)) {
1859 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1860 		tc_hotplug_trigger = 0;
1861 		pins = hpd_tgp;
1862 	} else if (HAS_PCH_MCC(dev_priv)) {
1863 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1864 		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1865 		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1866 		pins = hpd_icp;
1867 	} else {
1868 		drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
1869 			 "Unrecognized PCH type 0x%x\n",
1870 			 INTEL_PCH_TYPE(dev_priv));
1871 
1872 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1873 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1874 		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1875 		pins = hpd_icp;
1876 	}
1877 
1878 	if (ddi_hotplug_trigger) {
1879 		u32 dig_hotplug_reg;
1880 
1881 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
1882 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1883 
1884 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1885 				   ddi_hotplug_trigger,
1886 				   dig_hotplug_reg, pins,
1887 				   icp_ddi_port_hotplug_long_detect);
1888 	}
1889 
1890 	if (tc_hotplug_trigger) {
1891 		u32 dig_hotplug_reg;
1892 
1893 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
1894 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
1895 
1896 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1897 				   tc_hotplug_trigger,
1898 				   dig_hotplug_reg, pins,
1899 				   tc_port_hotplug_long_detect);
1900 	}
1901 
1902 	if (pin_mask)
1903 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1904 
1905 	if (pch_iir & SDE_GMBUS_ICP)
1906 		gmbus_irq_handler(dev_priv);
1907 }
1908 
1909 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1910 {
1911 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1912 		~SDE_PORTE_HOTPLUG_SPT;
1913 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1914 	u32 pin_mask = 0, long_mask = 0;
1915 
1916 	if (hotplug_trigger) {
1917 		u32 dig_hotplug_reg;
1918 
1919 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1920 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1921 
1922 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1923 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
1924 				   spt_port_hotplug_long_detect);
1925 	}
1926 
1927 	if (hotplug2_trigger) {
1928 		u32 dig_hotplug_reg;
1929 
1930 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1931 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1932 
1933 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1934 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
1935 				   spt_port_hotplug2_long_detect);
1936 	}
1937 
1938 	if (pin_mask)
1939 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1940 
1941 	if (pch_iir & SDE_GMBUS_CPT)
1942 		gmbus_irq_handler(dev_priv);
1943 }
1944 
1945 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1946 				u32 hotplug_trigger,
1947 				const u32 hpd[HPD_NUM_PINS])
1948 {
1949 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1950 
1951 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1952 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1953 
1954 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1955 			   dig_hotplug_reg, hpd,
1956 			   ilk_port_hotplug_long_detect);
1957 
1958 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1959 }
1960 
1961 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1962 				    u32 de_iir)
1963 {
1964 	enum pipe pipe;
1965 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1966 
1967 	if (hotplug_trigger)
1968 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
1969 
1970 	if (de_iir & DE_AUX_CHANNEL_A)
1971 		dp_aux_irq_handler(dev_priv);
1972 
1973 	if (de_iir & DE_GSE)
1974 		intel_opregion_asle_intr(dev_priv);
1975 
1976 	if (de_iir & DE_POISON)
1977 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1978 
1979 	for_each_pipe(dev_priv, pipe) {
1980 		if (de_iir & DE_PIPE_VBLANK(pipe))
1981 			drm_handle_vblank(&dev_priv->drm, pipe);
1982 
1983 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1984 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1985 
1986 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1987 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1988 	}
1989 
1990 	/* check event from PCH */
1991 	if (de_iir & DE_PCH_EVENT) {
1992 		u32 pch_iir = I915_READ(SDEIIR);
1993 
1994 		if (HAS_PCH_CPT(dev_priv))
1995 			cpt_irq_handler(dev_priv, pch_iir);
1996 		else
1997 			ibx_irq_handler(dev_priv, pch_iir);
1998 
1999 		/* should clear PCH hotplug event before clear CPU irq */
2000 		I915_WRITE(SDEIIR, pch_iir);
2001 	}
2002 
2003 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2004 		gen5_rps_irq_handler(&dev_priv->gt.rps);
2005 }
2006 
2007 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2008 				    u32 de_iir)
2009 {
2010 	enum pipe pipe;
2011 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2012 
2013 	if (hotplug_trigger)
2014 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2015 
2016 	if (de_iir & DE_ERR_INT_IVB)
2017 		ivb_err_int_handler(dev_priv);
2018 
2019 	if (de_iir & DE_EDP_PSR_INT_HSW) {
2020 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
2021 
2022 		intel_psr_irq_handler(dev_priv, psr_iir);
2023 		I915_WRITE(EDP_PSR_IIR, psr_iir);
2024 	}
2025 
2026 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2027 		dp_aux_irq_handler(dev_priv);
2028 
2029 	if (de_iir & DE_GSE_IVB)
2030 		intel_opregion_asle_intr(dev_priv);
2031 
2032 	for_each_pipe(dev_priv, pipe) {
2033 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2034 			drm_handle_vblank(&dev_priv->drm, pipe);
2035 	}
2036 
2037 	/* check event from PCH */
2038 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2039 		u32 pch_iir = I915_READ(SDEIIR);
2040 
2041 		cpt_irq_handler(dev_priv, pch_iir);
2042 
2043 		/* clear PCH hotplug event before clear CPU irq */
2044 		I915_WRITE(SDEIIR, pch_iir);
2045 	}
2046 }
2047 
2048 /*
2049  * To handle irqs with the minimum potential races with fresh interrupts, we:
2050  * 1 - Disable Master Interrupt Control.
2051  * 2 - Find the source(s) of the interrupt.
2052  * 3 - Clear the Interrupt Identity bits (IIR).
2053  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2054  * 5 - Re-enable Master Interrupt Control.
2055  */
2056 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2057 {
2058 	struct drm_i915_private *dev_priv = arg;
2059 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2060 	irqreturn_t ret = IRQ_NONE;
2061 
2062 	if (!intel_irqs_enabled(dev_priv))
2063 		return IRQ_NONE;
2064 
2065 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2066 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2067 
2068 	/* disable master interrupt before clearing iir  */
2069 	de_ier = I915_READ(DEIER);
2070 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2071 
2072 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2073 	 * interrupts will will be stored on its back queue, and then we'll be
2074 	 * able to process them after we restore SDEIER (as soon as we restore
2075 	 * it, we'll get an interrupt if SDEIIR still has something to process
2076 	 * due to its back queue). */
2077 	if (!HAS_PCH_NOP(dev_priv)) {
2078 		sde_ier = I915_READ(SDEIER);
2079 		I915_WRITE(SDEIER, 0);
2080 	}
2081 
2082 	/* Find, clear, then process each source of interrupt */
2083 
2084 	gt_iir = I915_READ(GTIIR);
2085 	if (gt_iir) {
2086 		I915_WRITE(GTIIR, gt_iir);
2087 		ret = IRQ_HANDLED;
2088 		if (INTEL_GEN(dev_priv) >= 6)
2089 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2090 		else
2091 			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2092 	}
2093 
2094 	de_iir = I915_READ(DEIIR);
2095 	if (de_iir) {
2096 		I915_WRITE(DEIIR, de_iir);
2097 		ret = IRQ_HANDLED;
2098 		if (INTEL_GEN(dev_priv) >= 7)
2099 			ivb_display_irq_handler(dev_priv, de_iir);
2100 		else
2101 			ilk_display_irq_handler(dev_priv, de_iir);
2102 	}
2103 
2104 	if (INTEL_GEN(dev_priv) >= 6) {
2105 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2106 		if (pm_iir) {
2107 			I915_WRITE(GEN6_PMIIR, pm_iir);
2108 			ret = IRQ_HANDLED;
2109 			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
2110 		}
2111 	}
2112 
2113 	I915_WRITE(DEIER, de_ier);
2114 	if (!HAS_PCH_NOP(dev_priv))
2115 		I915_WRITE(SDEIER, sde_ier);
2116 
2117 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2118 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2119 
2120 	return ret;
2121 }
2122 
2123 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2124 				u32 hotplug_trigger,
2125 				const u32 hpd[HPD_NUM_PINS])
2126 {
2127 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2128 
2129 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2130 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2131 
2132 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2133 			   dig_hotplug_reg, hpd,
2134 			   bxt_port_hotplug_long_detect);
2135 
2136 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2137 }
2138 
2139 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2140 {
2141 	u32 pin_mask = 0, long_mask = 0;
2142 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2143 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2144 	long_pulse_detect_func long_pulse_detect;
2145 	const u32 *hpd;
2146 
2147 	if (INTEL_GEN(dev_priv) >= 12) {
2148 		long_pulse_detect = gen12_port_hotplug_long_detect;
2149 		hpd = hpd_gen12;
2150 	} else {
2151 		long_pulse_detect = gen11_port_hotplug_long_detect;
2152 		hpd = hpd_gen11;
2153 	}
2154 
2155 	if (trigger_tc) {
2156 		u32 dig_hotplug_reg;
2157 
2158 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2159 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2160 
2161 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2162 				   dig_hotplug_reg, hpd, long_pulse_detect);
2163 	}
2164 
2165 	if (trigger_tbt) {
2166 		u32 dig_hotplug_reg;
2167 
2168 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2169 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2170 
2171 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2172 				   dig_hotplug_reg, hpd, long_pulse_detect);
2173 	}
2174 
2175 	if (pin_mask)
2176 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2177 	else
2178 		drm_err(&dev_priv->drm,
2179 			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2180 }
2181 
2182 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2183 {
2184 	u32 mask;
2185 
2186 	if (INTEL_GEN(dev_priv) >= 12)
2187 		return TGL_DE_PORT_AUX_DDIA |
2188 			TGL_DE_PORT_AUX_DDIB |
2189 			TGL_DE_PORT_AUX_DDIC |
2190 			TGL_DE_PORT_AUX_USBC1 |
2191 			TGL_DE_PORT_AUX_USBC2 |
2192 			TGL_DE_PORT_AUX_USBC3 |
2193 			TGL_DE_PORT_AUX_USBC4 |
2194 			TGL_DE_PORT_AUX_USBC5 |
2195 			TGL_DE_PORT_AUX_USBC6;
2196 
2197 
2198 	mask = GEN8_AUX_CHANNEL_A;
2199 	if (INTEL_GEN(dev_priv) >= 9)
2200 		mask |= GEN9_AUX_CHANNEL_B |
2201 			GEN9_AUX_CHANNEL_C |
2202 			GEN9_AUX_CHANNEL_D;
2203 
2204 	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2205 		mask |= CNL_AUX_CHANNEL_F;
2206 
2207 	if (IS_GEN(dev_priv, 11))
2208 		mask |= ICL_AUX_CHANNEL_E;
2209 
2210 	return mask;
2211 }
2212 
2213 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2214 {
2215 	if (INTEL_GEN(dev_priv) >= 11)
2216 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2217 	else if (INTEL_GEN(dev_priv) >= 9)
2218 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2219 	else
2220 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2221 }
2222 
2223 static void
2224 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2225 {
2226 	bool found = false;
2227 
2228 	if (iir & GEN8_DE_MISC_GSE) {
2229 		intel_opregion_asle_intr(dev_priv);
2230 		found = true;
2231 	}
2232 
2233 	if (iir & GEN8_DE_EDP_PSR) {
2234 		u32 psr_iir;
2235 		i915_reg_t iir_reg;
2236 
2237 		if (INTEL_GEN(dev_priv) >= 12)
2238 			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2239 		else
2240 			iir_reg = EDP_PSR_IIR;
2241 
2242 		psr_iir = I915_READ(iir_reg);
2243 		I915_WRITE(iir_reg, psr_iir);
2244 
2245 		if (psr_iir)
2246 			found = true;
2247 
2248 		intel_psr_irq_handler(dev_priv, psr_iir);
2249 	}
2250 
2251 	if (!found)
2252 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2253 }
2254 
2255 static irqreturn_t
2256 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2257 {
2258 	irqreturn_t ret = IRQ_NONE;
2259 	u32 iir;
2260 	enum pipe pipe;
2261 
2262 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2263 		iir = I915_READ(GEN8_DE_MISC_IIR);
2264 		if (iir) {
2265 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2266 			ret = IRQ_HANDLED;
2267 			gen8_de_misc_irq_handler(dev_priv, iir);
2268 		} else {
2269 			drm_err(&dev_priv->drm,
2270 				"The master control interrupt lied (DE MISC)!\n");
2271 		}
2272 	}
2273 
2274 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2275 		iir = I915_READ(GEN11_DE_HPD_IIR);
2276 		if (iir) {
2277 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2278 			ret = IRQ_HANDLED;
2279 			gen11_hpd_irq_handler(dev_priv, iir);
2280 		} else {
2281 			drm_err(&dev_priv->drm,
2282 				"The master control interrupt lied, (DE HPD)!\n");
2283 		}
2284 	}
2285 
2286 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2287 		iir = I915_READ(GEN8_DE_PORT_IIR);
2288 		if (iir) {
2289 			u32 tmp_mask;
2290 			bool found = false;
2291 
2292 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2293 			ret = IRQ_HANDLED;
2294 
2295 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2296 				dp_aux_irq_handler(dev_priv);
2297 				found = true;
2298 			}
2299 
2300 			if (IS_GEN9_LP(dev_priv)) {
2301 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2302 				if (tmp_mask) {
2303 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
2304 							    hpd_bxt);
2305 					found = true;
2306 				}
2307 			} else if (IS_BROADWELL(dev_priv)) {
2308 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2309 				if (tmp_mask) {
2310 					ilk_hpd_irq_handler(dev_priv,
2311 							    tmp_mask, hpd_bdw);
2312 					found = true;
2313 				}
2314 			}
2315 
2316 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2317 				gmbus_irq_handler(dev_priv);
2318 				found = true;
2319 			}
2320 
2321 			if (!found)
2322 				drm_err(&dev_priv->drm,
2323 					"Unexpected DE Port interrupt\n");
2324 		}
2325 		else
2326 			drm_err(&dev_priv->drm,
2327 				"The master control interrupt lied (DE PORT)!\n");
2328 	}
2329 
2330 	for_each_pipe(dev_priv, pipe) {
2331 		u32 fault_errors;
2332 
2333 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2334 			continue;
2335 
2336 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2337 		if (!iir) {
2338 			drm_err(&dev_priv->drm,
2339 				"The master control interrupt lied (DE PIPE)!\n");
2340 			continue;
2341 		}
2342 
2343 		ret = IRQ_HANDLED;
2344 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2345 
2346 		if (iir & GEN8_PIPE_VBLANK)
2347 			drm_handle_vblank(&dev_priv->drm, pipe);
2348 
2349 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2350 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2351 
2352 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2353 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2354 
2355 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2356 		if (fault_errors)
2357 			drm_err(&dev_priv->drm,
2358 				"Fault errors on pipe %c: 0x%08x\n",
2359 				pipe_name(pipe),
2360 				fault_errors);
2361 	}
2362 
2363 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2364 	    master_ctl & GEN8_DE_PCH_IRQ) {
2365 		/*
2366 		 * FIXME(BDW): Assume for now that the new interrupt handling
2367 		 * scheme also closed the SDE interrupt handling race we've seen
2368 		 * on older pch-split platforms. But this needs testing.
2369 		 */
2370 		iir = I915_READ(SDEIIR);
2371 		if (iir) {
2372 			I915_WRITE(SDEIIR, iir);
2373 			ret = IRQ_HANDLED;
2374 
2375 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2376 				icp_irq_handler(dev_priv, iir);
2377 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2378 				spt_irq_handler(dev_priv, iir);
2379 			else
2380 				cpt_irq_handler(dev_priv, iir);
2381 		} else {
2382 			/*
2383 			 * Like on previous PCH there seems to be something
2384 			 * fishy going on with forwarding PCH interrupts.
2385 			 */
2386 			drm_dbg(&dev_priv->drm,
2387 				"The master control interrupt lied (SDE)!\n");
2388 		}
2389 	}
2390 
2391 	return ret;
2392 }
2393 
2394 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2395 {
2396 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2397 
2398 	/*
2399 	 * Now with master disabled, get a sample of level indications
2400 	 * for this interrupt. Indications will be cleared on related acks.
2401 	 * New indications can and will light up during processing,
2402 	 * and will generate new interrupt after enabling master.
2403 	 */
2404 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2405 }
2406 
2407 static inline void gen8_master_intr_enable(void __iomem * const regs)
2408 {
2409 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2410 }
2411 
2412 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2413 {
2414 	struct drm_i915_private *dev_priv = arg;
2415 	void __iomem * const regs = dev_priv->uncore.regs;
2416 	u32 master_ctl;
2417 
2418 	if (!intel_irqs_enabled(dev_priv))
2419 		return IRQ_NONE;
2420 
2421 	master_ctl = gen8_master_intr_disable(regs);
2422 	if (!master_ctl) {
2423 		gen8_master_intr_enable(regs);
2424 		return IRQ_NONE;
2425 	}
2426 
2427 	/* Find, queue (onto bottom-halves), then clear each source */
2428 	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2429 
2430 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2431 	if (master_ctl & ~GEN8_GT_IRQS) {
2432 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2433 		gen8_de_irq_handler(dev_priv, master_ctl);
2434 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2435 	}
2436 
2437 	gen8_master_intr_enable(regs);
2438 
2439 	return IRQ_HANDLED;
2440 }
2441 
2442 static u32
2443 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2444 {
2445 	void __iomem * const regs = gt->uncore->regs;
2446 	u32 iir;
2447 
2448 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2449 		return 0;
2450 
2451 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2452 	if (likely(iir))
2453 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2454 
2455 	return iir;
2456 }
2457 
2458 static void
2459 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2460 {
2461 	if (iir & GEN11_GU_MISC_GSE)
2462 		intel_opregion_asle_intr(gt->i915);
2463 }
2464 
2465 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2466 {
2467 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2468 
2469 	/*
2470 	 * Now with master disabled, get a sample of level indications
2471 	 * for this interrupt. Indications will be cleared on related acks.
2472 	 * New indications can and will light up during processing,
2473 	 * and will generate new interrupt after enabling master.
2474 	 */
2475 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2476 }
2477 
2478 static inline void gen11_master_intr_enable(void __iomem * const regs)
2479 {
2480 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2481 }
2482 
2483 static void
2484 gen11_display_irq_handler(struct drm_i915_private *i915)
2485 {
2486 	void __iomem * const regs = i915->uncore.regs;
2487 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2488 
2489 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2490 	/*
2491 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2492 	 * for the display related bits.
2493 	 */
2494 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2495 	gen8_de_irq_handler(i915, disp_ctl);
2496 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2497 		      GEN11_DISPLAY_IRQ_ENABLE);
2498 
2499 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2500 }
2501 
2502 static __always_inline irqreturn_t
2503 __gen11_irq_handler(struct drm_i915_private * const i915,
2504 		    u32 (*intr_disable)(void __iomem * const regs),
2505 		    void (*intr_enable)(void __iomem * const regs))
2506 {
2507 	void __iomem * const regs = i915->uncore.regs;
2508 	struct intel_gt *gt = &i915->gt;
2509 	u32 master_ctl;
2510 	u32 gu_misc_iir;
2511 
2512 	if (!intel_irqs_enabled(i915))
2513 		return IRQ_NONE;
2514 
2515 	master_ctl = intr_disable(regs);
2516 	if (!master_ctl) {
2517 		intr_enable(regs);
2518 		return IRQ_NONE;
2519 	}
2520 
2521 	/* Find, queue (onto bottom-halves), then clear each source */
2522 	gen11_gt_irq_handler(gt, master_ctl);
2523 
2524 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2525 	if (master_ctl & GEN11_DISPLAY_IRQ)
2526 		gen11_display_irq_handler(i915);
2527 
2528 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2529 
2530 	intr_enable(regs);
2531 
2532 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2533 
2534 	return IRQ_HANDLED;
2535 }
2536 
2537 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2538 {
2539 	return __gen11_irq_handler(arg,
2540 				   gen11_master_intr_disable,
2541 				   gen11_master_intr_enable);
2542 }
2543 
2544 /* Called from drm generic code, passed 'crtc' which
2545  * we use as a pipe index
2546  */
2547 int i8xx_enable_vblank(struct drm_crtc *crtc)
2548 {
2549 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2550 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2551 	unsigned long irqflags;
2552 
2553 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2554 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2555 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2556 
2557 	return 0;
2558 }
2559 
2560 int i915gm_enable_vblank(struct drm_crtc *crtc)
2561 {
2562 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2563 
2564 	/*
2565 	 * Vblank interrupts fail to wake the device up from C2+.
2566 	 * Disabling render clock gating during C-states avoids
2567 	 * the problem. There is a small power cost so we do this
2568 	 * only when vblank interrupts are actually enabled.
2569 	 */
2570 	if (dev_priv->vblank_enabled++ == 0)
2571 		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2572 
2573 	return i8xx_enable_vblank(crtc);
2574 }
2575 
2576 int i965_enable_vblank(struct drm_crtc *crtc)
2577 {
2578 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2579 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2580 	unsigned long irqflags;
2581 
2582 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2583 	i915_enable_pipestat(dev_priv, pipe,
2584 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2585 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2586 
2587 	return 0;
2588 }
2589 
2590 int ilk_enable_vblank(struct drm_crtc *crtc)
2591 {
2592 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2593 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2594 	unsigned long irqflags;
2595 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2596 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2597 
2598 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2599 	ilk_enable_display_irq(dev_priv, bit);
2600 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2601 
2602 	/* Even though there is no DMC, frame counter can get stuck when
2603 	 * PSR is active as no frames are generated.
2604 	 */
2605 	if (HAS_PSR(dev_priv))
2606 		drm_crtc_vblank_restore(crtc);
2607 
2608 	return 0;
2609 }
2610 
2611 int bdw_enable_vblank(struct drm_crtc *crtc)
2612 {
2613 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2614 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2615 	unsigned long irqflags;
2616 
2617 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2618 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2619 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2620 
2621 	/* Even if there is no DMC, frame counter can get stuck when
2622 	 * PSR is active as no frames are generated, so check only for PSR.
2623 	 */
2624 	if (HAS_PSR(dev_priv))
2625 		drm_crtc_vblank_restore(crtc);
2626 
2627 	return 0;
2628 }
2629 
2630 /* Called from drm generic code, passed 'crtc' which
2631  * we use as a pipe index
2632  */
2633 void i8xx_disable_vblank(struct drm_crtc *crtc)
2634 {
2635 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2636 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2637 	unsigned long irqflags;
2638 
2639 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2640 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2641 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2642 }
2643 
2644 void i915gm_disable_vblank(struct drm_crtc *crtc)
2645 {
2646 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2647 
2648 	i8xx_disable_vblank(crtc);
2649 
2650 	if (--dev_priv->vblank_enabled == 0)
2651 		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2652 }
2653 
2654 void i965_disable_vblank(struct drm_crtc *crtc)
2655 {
2656 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2657 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2658 	unsigned long irqflags;
2659 
2660 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2661 	i915_disable_pipestat(dev_priv, pipe,
2662 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2663 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2664 }
2665 
2666 void ilk_disable_vblank(struct drm_crtc *crtc)
2667 {
2668 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2669 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2670 	unsigned long irqflags;
2671 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2672 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2673 
2674 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2675 	ilk_disable_display_irq(dev_priv, bit);
2676 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2677 }
2678 
2679 void bdw_disable_vblank(struct drm_crtc *crtc)
2680 {
2681 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2682 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2683 	unsigned long irqflags;
2684 
2685 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2686 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2687 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2688 }
2689 
2690 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2691 {
2692 	struct intel_uncore *uncore = &dev_priv->uncore;
2693 
2694 	if (HAS_PCH_NOP(dev_priv))
2695 		return;
2696 
2697 	GEN3_IRQ_RESET(uncore, SDE);
2698 
2699 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2700 		I915_WRITE(SERR_INT, 0xffffffff);
2701 }
2702 
2703 /*
2704  * SDEIER is also touched by the interrupt handler to work around missed PCH
2705  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2706  * instead we unconditionally enable all PCH interrupt sources here, but then
2707  * only unmask them as needed with SDEIMR.
2708  *
2709  * This function needs to be called before interrupts are enabled.
2710  */
2711 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
2712 {
2713 	if (HAS_PCH_NOP(dev_priv))
2714 		return;
2715 
2716 	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
2717 	I915_WRITE(SDEIER, 0xffffffff);
2718 	POSTING_READ(SDEIER);
2719 }
2720 
2721 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2722 {
2723 	struct intel_uncore *uncore = &dev_priv->uncore;
2724 
2725 	if (IS_CHERRYVIEW(dev_priv))
2726 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2727 	else
2728 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2729 
2730 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2731 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2732 
2733 	i9xx_pipestat_irq_reset(dev_priv);
2734 
2735 	GEN3_IRQ_RESET(uncore, VLV_);
2736 	dev_priv->irq_mask = ~0u;
2737 }
2738 
2739 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2740 {
2741 	struct intel_uncore *uncore = &dev_priv->uncore;
2742 
2743 	u32 pipestat_mask;
2744 	u32 enable_mask;
2745 	enum pipe pipe;
2746 
2747 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2748 
2749 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2750 	for_each_pipe(dev_priv, pipe)
2751 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2752 
2753 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2754 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2755 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2756 		I915_LPE_PIPE_A_INTERRUPT |
2757 		I915_LPE_PIPE_B_INTERRUPT;
2758 
2759 	if (IS_CHERRYVIEW(dev_priv))
2760 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2761 			I915_LPE_PIPE_C_INTERRUPT;
2762 
2763 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2764 
2765 	dev_priv->irq_mask = ~enable_mask;
2766 
2767 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2768 }
2769 
2770 /* drm_dma.h hooks
2771 */
2772 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2773 {
2774 	struct intel_uncore *uncore = &dev_priv->uncore;
2775 
2776 	GEN3_IRQ_RESET(uncore, DE);
2777 	if (IS_GEN(dev_priv, 7))
2778 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2779 
2780 	if (IS_HASWELL(dev_priv)) {
2781 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2782 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2783 	}
2784 
2785 	gen5_gt_irq_reset(&dev_priv->gt);
2786 
2787 	ibx_irq_reset(dev_priv);
2788 }
2789 
2790 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2791 {
2792 	I915_WRITE(VLV_MASTER_IER, 0);
2793 	POSTING_READ(VLV_MASTER_IER);
2794 
2795 	gen5_gt_irq_reset(&dev_priv->gt);
2796 
2797 	spin_lock_irq(&dev_priv->irq_lock);
2798 	if (dev_priv->display_irqs_enabled)
2799 		vlv_display_irq_reset(dev_priv);
2800 	spin_unlock_irq(&dev_priv->irq_lock);
2801 }
2802 
2803 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2804 {
2805 	struct intel_uncore *uncore = &dev_priv->uncore;
2806 	enum pipe pipe;
2807 
2808 	gen8_master_intr_disable(dev_priv->uncore.regs);
2809 
2810 	gen8_gt_irq_reset(&dev_priv->gt);
2811 
2812 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2813 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2814 
2815 	for_each_pipe(dev_priv, pipe)
2816 		if (intel_display_power_is_enabled(dev_priv,
2817 						   POWER_DOMAIN_PIPE(pipe)))
2818 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2819 
2820 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2821 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2822 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2823 
2824 	if (HAS_PCH_SPLIT(dev_priv))
2825 		ibx_irq_reset(dev_priv);
2826 }
2827 
2828 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2829 {
2830 	struct intel_uncore *uncore = &dev_priv->uncore;
2831 	enum pipe pipe;
2832 
2833 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2834 
2835 	if (INTEL_GEN(dev_priv) >= 12) {
2836 		enum transcoder trans;
2837 
2838 		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
2839 			enum intel_display_power_domain domain;
2840 
2841 			domain = POWER_DOMAIN_TRANSCODER(trans);
2842 			if (!intel_display_power_is_enabled(dev_priv, domain))
2843 				continue;
2844 
2845 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2846 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2847 		}
2848 	} else {
2849 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2850 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2851 	}
2852 
2853 	for_each_pipe(dev_priv, pipe)
2854 		if (intel_display_power_is_enabled(dev_priv,
2855 						   POWER_DOMAIN_PIPE(pipe)))
2856 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2857 
2858 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2859 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2860 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2861 
2862 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2863 		GEN3_IRQ_RESET(uncore, SDE);
2864 }
2865 
2866 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2867 {
2868 	struct intel_uncore *uncore = &dev_priv->uncore;
2869 
2870 	gen11_master_intr_disable(dev_priv->uncore.regs);
2871 
2872 	gen11_gt_irq_reset(&dev_priv->gt);
2873 	gen11_display_irq_reset(dev_priv);
2874 
2875 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2876 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2877 }
2878 
2879 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2880 				     u8 pipe_mask)
2881 {
2882 	struct intel_uncore *uncore = &dev_priv->uncore;
2883 
2884 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2885 	enum pipe pipe;
2886 
2887 	spin_lock_irq(&dev_priv->irq_lock);
2888 
2889 	if (!intel_irqs_enabled(dev_priv)) {
2890 		spin_unlock_irq(&dev_priv->irq_lock);
2891 		return;
2892 	}
2893 
2894 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2895 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2896 				  dev_priv->de_irq_mask[pipe],
2897 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2898 
2899 	spin_unlock_irq(&dev_priv->irq_lock);
2900 }
2901 
2902 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2903 				     u8 pipe_mask)
2904 {
2905 	struct intel_uncore *uncore = &dev_priv->uncore;
2906 	enum pipe pipe;
2907 
2908 	spin_lock_irq(&dev_priv->irq_lock);
2909 
2910 	if (!intel_irqs_enabled(dev_priv)) {
2911 		spin_unlock_irq(&dev_priv->irq_lock);
2912 		return;
2913 	}
2914 
2915 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2916 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2917 
2918 	spin_unlock_irq(&dev_priv->irq_lock);
2919 
2920 	/* make sure we're done processing display irqs */
2921 	intel_synchronize_irq(dev_priv);
2922 }
2923 
2924 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2925 {
2926 	struct intel_uncore *uncore = &dev_priv->uncore;
2927 
2928 	I915_WRITE(GEN8_MASTER_IRQ, 0);
2929 	POSTING_READ(GEN8_MASTER_IRQ);
2930 
2931 	gen8_gt_irq_reset(&dev_priv->gt);
2932 
2933 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2934 
2935 	spin_lock_irq(&dev_priv->irq_lock);
2936 	if (dev_priv->display_irqs_enabled)
2937 		vlv_display_irq_reset(dev_priv);
2938 	spin_unlock_irq(&dev_priv->irq_lock);
2939 }
2940 
2941 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2942 				  const u32 hpd[HPD_NUM_PINS])
2943 {
2944 	struct intel_encoder *encoder;
2945 	u32 enabled_irqs = 0;
2946 
2947 	for_each_intel_encoder(&dev_priv->drm, encoder)
2948 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
2949 			enabled_irqs |= hpd[encoder->hpd_pin];
2950 
2951 	return enabled_irqs;
2952 }
2953 
2954 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2955 {
2956 	u32 hotplug;
2957 
2958 	/*
2959 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2960 	 * duration to 2ms (which is the minimum in the Display Port spec).
2961 	 * The pulse duration bits are reserved on LPT+.
2962 	 */
2963 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2964 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
2965 		     PORTC_PULSE_DURATION_MASK |
2966 		     PORTD_PULSE_DURATION_MASK);
2967 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2968 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2969 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2970 	/*
2971 	 * When CPU and PCH are on the same package, port A
2972 	 * HPD must be enabled in both north and south.
2973 	 */
2974 	if (HAS_PCH_LPT_LP(dev_priv))
2975 		hotplug |= PORTA_HOTPLUG_ENABLE;
2976 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2977 }
2978 
2979 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2980 {
2981 	u32 hotplug_irqs, enabled_irqs;
2982 
2983 	if (HAS_PCH_IBX(dev_priv)) {
2984 		hotplug_irqs = SDE_HOTPLUG_MASK;
2985 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
2986 	} else {
2987 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2988 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
2989 	}
2990 
2991 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2992 
2993 	ibx_hpd_detection_setup(dev_priv);
2994 }
2995 
2996 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
2997 				    u32 ddi_hotplug_enable_mask,
2998 				    u32 tc_hotplug_enable_mask)
2999 {
3000 	u32 hotplug;
3001 
3002 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3003 	hotplug |= ddi_hotplug_enable_mask;
3004 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3005 
3006 	if (tc_hotplug_enable_mask) {
3007 		hotplug = I915_READ(SHOTPLUG_CTL_TC);
3008 		hotplug |= tc_hotplug_enable_mask;
3009 		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3010 	}
3011 }
3012 
3013 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3014 			      u32 sde_ddi_mask, u32 sde_tc_mask,
3015 			      u32 ddi_enable_mask, u32 tc_enable_mask,
3016 			      const u32 *pins)
3017 {
3018 	u32 hotplug_irqs, enabled_irqs;
3019 
3020 	hotplug_irqs = sde_ddi_mask | sde_tc_mask;
3021 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
3022 
3023 	I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3024 
3025 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3026 
3027 	icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
3028 }
3029 
3030 /*
3031  * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
3032  * equivalent of SDE.
3033  */
3034 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3035 {
3036 	icp_hpd_irq_setup(dev_priv,
3037 			  SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
3038 			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
3039 			  hpd_icp);
3040 }
3041 
3042 /*
3043  * JSP behaves exactly the same as MCC above except that port C is mapped to
3044  * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
3045  * masks & tables rather than ICP's masks & tables.
3046  */
3047 static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3048 {
3049 	icp_hpd_irq_setup(dev_priv,
3050 			  SDE_DDI_MASK_TGP, 0,
3051 			  TGP_DDI_HPD_ENABLE_MASK, 0,
3052 			  hpd_tgp);
3053 }
3054 
3055 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3056 {
3057 	u32 hotplug;
3058 
3059 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3060 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3061 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3062 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3063 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3064 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3065 
3066 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3067 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3068 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3069 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3070 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3071 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3072 }
3073 
3074 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3075 {
3076 	u32 hotplug_irqs, enabled_irqs;
3077 	const u32 *hpd;
3078 	u32 val;
3079 
3080 	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
3081 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
3082 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3083 
3084 	val = I915_READ(GEN11_DE_HPD_IMR);
3085 	val &= ~hotplug_irqs;
3086 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3087 	POSTING_READ(GEN11_DE_HPD_IMR);
3088 
3089 	gen11_hpd_detection_setup(dev_priv);
3090 
3091 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3092 		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
3093 				  TGP_DDI_HPD_ENABLE_MASK,
3094 				  TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
3095 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3096 		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
3097 				  ICP_DDI_HPD_ENABLE_MASK,
3098 				  ICP_TC_HPD_ENABLE_MASK, hpd_icp);
3099 }
3100 
3101 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3102 {
3103 	u32 val, hotplug;
3104 
3105 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3106 	if (HAS_PCH_CNP(dev_priv)) {
3107 		val = I915_READ(SOUTH_CHICKEN1);
3108 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3109 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3110 		I915_WRITE(SOUTH_CHICKEN1, val);
3111 	}
3112 
3113 	/* Enable digital hotplug on the PCH */
3114 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3115 	hotplug |= PORTA_HOTPLUG_ENABLE |
3116 		   PORTB_HOTPLUG_ENABLE |
3117 		   PORTC_HOTPLUG_ENABLE |
3118 		   PORTD_HOTPLUG_ENABLE;
3119 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3120 
3121 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3122 	hotplug |= PORTE_HOTPLUG_ENABLE;
3123 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3124 }
3125 
3126 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3127 {
3128 	u32 hotplug_irqs, enabled_irqs;
3129 
3130 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3131 		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3132 
3133 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3134 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3135 
3136 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3137 
3138 	spt_hpd_detection_setup(dev_priv);
3139 }
3140 
3141 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3142 {
3143 	u32 hotplug;
3144 
3145 	/*
3146 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3147 	 * duration to 2ms (which is the minimum in the Display Port spec)
3148 	 * The pulse duration bits are reserved on HSW+.
3149 	 */
3150 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3151 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3152 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3153 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
3154 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3155 }
3156 
3157 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3158 {
3159 	u32 hotplug_irqs, enabled_irqs;
3160 
3161 	if (INTEL_GEN(dev_priv) >= 8) {
3162 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3163 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3164 
3165 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3166 	} else if (INTEL_GEN(dev_priv) >= 7) {
3167 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3168 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3169 
3170 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3171 	} else {
3172 		hotplug_irqs = DE_DP_A_HOTPLUG;
3173 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3174 
3175 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3176 	}
3177 
3178 	ilk_hpd_detection_setup(dev_priv);
3179 
3180 	ibx_hpd_irq_setup(dev_priv);
3181 }
3182 
3183 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3184 				      u32 enabled_irqs)
3185 {
3186 	u32 hotplug;
3187 
3188 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3189 	hotplug |= PORTA_HOTPLUG_ENABLE |
3190 		   PORTB_HOTPLUG_ENABLE |
3191 		   PORTC_HOTPLUG_ENABLE;
3192 
3193 	drm_dbg_kms(&dev_priv->drm,
3194 		    "Invert bit setting: hp_ctl:%x hp_port:%x\n",
3195 		    hotplug, enabled_irqs);
3196 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3197 
3198 	/*
3199 	 * For BXT invert bit has to be set based on AOB design
3200 	 * for HPD detection logic, update it based on VBT fields.
3201 	 */
3202 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3203 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3204 		hotplug |= BXT_DDIA_HPD_INVERT;
3205 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3206 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3207 		hotplug |= BXT_DDIB_HPD_INVERT;
3208 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3209 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3210 		hotplug |= BXT_DDIC_HPD_INVERT;
3211 
3212 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3213 }
3214 
3215 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3216 {
3217 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3218 }
3219 
3220 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3221 {
3222 	u32 hotplug_irqs, enabled_irqs;
3223 
3224 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3225 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3226 
3227 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3228 
3229 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3230 }
3231 
3232 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3233 {
3234 	u32 mask;
3235 
3236 	if (HAS_PCH_NOP(dev_priv))
3237 		return;
3238 
3239 	if (HAS_PCH_IBX(dev_priv))
3240 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3241 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3242 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3243 	else
3244 		mask = SDE_GMBUS_CPT;
3245 
3246 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3247 	I915_WRITE(SDEIMR, ~mask);
3248 
3249 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3250 	    HAS_PCH_LPT(dev_priv))
3251 		ibx_hpd_detection_setup(dev_priv);
3252 	else
3253 		spt_hpd_detection_setup(dev_priv);
3254 }
3255 
3256 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3257 {
3258 	struct intel_uncore *uncore = &dev_priv->uncore;
3259 	u32 display_mask, extra_mask;
3260 
3261 	if (INTEL_GEN(dev_priv) >= 7) {
3262 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3263 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3264 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3265 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3266 			      DE_DP_A_HOTPLUG_IVB);
3267 	} else {
3268 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3269 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3270 				DE_PIPEA_CRC_DONE | DE_POISON);
3271 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3272 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3273 			      DE_DP_A_HOTPLUG);
3274 	}
3275 
3276 	if (IS_HASWELL(dev_priv)) {
3277 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3278 		display_mask |= DE_EDP_PSR_INT_HSW;
3279 	}
3280 
3281 	dev_priv->irq_mask = ~display_mask;
3282 
3283 	ibx_irq_pre_postinstall(dev_priv);
3284 
3285 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3286 		      display_mask | extra_mask);
3287 
3288 	gen5_gt_irq_postinstall(&dev_priv->gt);
3289 
3290 	ilk_hpd_detection_setup(dev_priv);
3291 
3292 	ibx_irq_postinstall(dev_priv);
3293 
3294 	if (IS_IRONLAKE_M(dev_priv)) {
3295 		/* Enable PCU event interrupts
3296 		 *
3297 		 * spinlocking not required here for correctness since interrupt
3298 		 * setup is guaranteed to run in single-threaded context. But we
3299 		 * need it to make the assert_spin_locked happy. */
3300 		spin_lock_irq(&dev_priv->irq_lock);
3301 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3302 		spin_unlock_irq(&dev_priv->irq_lock);
3303 	}
3304 }
3305 
3306 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3307 {
3308 	lockdep_assert_held(&dev_priv->irq_lock);
3309 
3310 	if (dev_priv->display_irqs_enabled)
3311 		return;
3312 
3313 	dev_priv->display_irqs_enabled = true;
3314 
3315 	if (intel_irqs_enabled(dev_priv)) {
3316 		vlv_display_irq_reset(dev_priv);
3317 		vlv_display_irq_postinstall(dev_priv);
3318 	}
3319 }
3320 
3321 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3322 {
3323 	lockdep_assert_held(&dev_priv->irq_lock);
3324 
3325 	if (!dev_priv->display_irqs_enabled)
3326 		return;
3327 
3328 	dev_priv->display_irqs_enabled = false;
3329 
3330 	if (intel_irqs_enabled(dev_priv))
3331 		vlv_display_irq_reset(dev_priv);
3332 }
3333 
3334 
3335 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3336 {
3337 	gen5_gt_irq_postinstall(&dev_priv->gt);
3338 
3339 	spin_lock_irq(&dev_priv->irq_lock);
3340 	if (dev_priv->display_irqs_enabled)
3341 		vlv_display_irq_postinstall(dev_priv);
3342 	spin_unlock_irq(&dev_priv->irq_lock);
3343 
3344 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3345 	POSTING_READ(VLV_MASTER_IER);
3346 }
3347 
3348 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3349 {
3350 	struct intel_uncore *uncore = &dev_priv->uncore;
3351 
3352 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3353 	u32 de_pipe_enables;
3354 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3355 	u32 de_port_enables;
3356 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3357 	enum pipe pipe;
3358 
3359 	if (INTEL_GEN(dev_priv) <= 10)
3360 		de_misc_masked |= GEN8_DE_MISC_GSE;
3361 
3362 	if (INTEL_GEN(dev_priv) >= 9) {
3363 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3364 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3365 				  GEN9_AUX_CHANNEL_D;
3366 		if (IS_GEN9_LP(dev_priv))
3367 			de_port_masked |= BXT_DE_PORT_GMBUS;
3368 	} else {
3369 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3370 	}
3371 
3372 	if (INTEL_GEN(dev_priv) >= 11)
3373 		de_port_masked |= ICL_AUX_CHANNEL_E;
3374 
3375 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
3376 		de_port_masked |= CNL_AUX_CHANNEL_F;
3377 
3378 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3379 					   GEN8_PIPE_FIFO_UNDERRUN;
3380 
3381 	de_port_enables = de_port_masked;
3382 	if (IS_GEN9_LP(dev_priv))
3383 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3384 	else if (IS_BROADWELL(dev_priv))
3385 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3386 
3387 	if (INTEL_GEN(dev_priv) >= 12) {
3388 		enum transcoder trans;
3389 
3390 		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
3391 			enum intel_display_power_domain domain;
3392 
3393 			domain = POWER_DOMAIN_TRANSCODER(trans);
3394 			if (!intel_display_power_is_enabled(dev_priv, domain))
3395 				continue;
3396 
3397 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3398 		}
3399 	} else {
3400 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3401 	}
3402 
3403 	for_each_pipe(dev_priv, pipe) {
3404 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3405 
3406 		if (intel_display_power_is_enabled(dev_priv,
3407 				POWER_DOMAIN_PIPE(pipe)))
3408 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3409 					  dev_priv->de_irq_mask[pipe],
3410 					  de_pipe_enables);
3411 	}
3412 
3413 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3414 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3415 
3416 	if (INTEL_GEN(dev_priv) >= 11) {
3417 		u32 de_hpd_masked = 0;
3418 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3419 				     GEN11_DE_TBT_HOTPLUG_MASK;
3420 
3421 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3422 			      de_hpd_enables);
3423 		gen11_hpd_detection_setup(dev_priv);
3424 	} else if (IS_GEN9_LP(dev_priv)) {
3425 		bxt_hpd_detection_setup(dev_priv);
3426 	} else if (IS_BROADWELL(dev_priv)) {
3427 		ilk_hpd_detection_setup(dev_priv);
3428 	}
3429 }
3430 
3431 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3432 {
3433 	if (HAS_PCH_SPLIT(dev_priv))
3434 		ibx_irq_pre_postinstall(dev_priv);
3435 
3436 	gen8_gt_irq_postinstall(&dev_priv->gt);
3437 	gen8_de_irq_postinstall(dev_priv);
3438 
3439 	if (HAS_PCH_SPLIT(dev_priv))
3440 		ibx_irq_postinstall(dev_priv);
3441 
3442 	gen8_master_intr_enable(dev_priv->uncore.regs);
3443 }
3444 
3445 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3446 {
3447 	u32 mask = SDE_GMBUS_ICP;
3448 
3449 	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
3450 	I915_WRITE(SDEIER, 0xffffffff);
3451 	POSTING_READ(SDEIER);
3452 
3453 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3454 	I915_WRITE(SDEIMR, ~mask);
3455 
3456 	if (HAS_PCH_TGP(dev_priv))
3457 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3458 					TGP_TC_HPD_ENABLE_MASK);
3459 	else if (HAS_PCH_JSP(dev_priv))
3460 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3461 	else if (HAS_PCH_MCC(dev_priv))
3462 		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3463 					ICP_TC_HPD_ENABLE(PORT_TC1));
3464 	else
3465 		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3466 					ICP_TC_HPD_ENABLE_MASK);
3467 }
3468 
3469 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3470 {
3471 	struct intel_uncore *uncore = &dev_priv->uncore;
3472 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3473 
3474 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3475 		icp_irq_postinstall(dev_priv);
3476 
3477 	gen11_gt_irq_postinstall(&dev_priv->gt);
3478 	gen8_de_irq_postinstall(dev_priv);
3479 
3480 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3481 
3482 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3483 
3484 	gen11_master_intr_enable(uncore->regs);
3485 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
3486 }
3487 
3488 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3489 {
3490 	gen8_gt_irq_postinstall(&dev_priv->gt);
3491 
3492 	spin_lock_irq(&dev_priv->irq_lock);
3493 	if (dev_priv->display_irqs_enabled)
3494 		vlv_display_irq_postinstall(dev_priv);
3495 	spin_unlock_irq(&dev_priv->irq_lock);
3496 
3497 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3498 	POSTING_READ(GEN8_MASTER_IRQ);
3499 }
3500 
3501 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3502 {
3503 	struct intel_uncore *uncore = &dev_priv->uncore;
3504 
3505 	i9xx_pipestat_irq_reset(dev_priv);
3506 
3507 	GEN2_IRQ_RESET(uncore);
3508 }
3509 
3510 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3511 {
3512 	struct intel_uncore *uncore = &dev_priv->uncore;
3513 	u16 enable_mask;
3514 
3515 	intel_uncore_write16(uncore,
3516 			     EMR,
3517 			     ~(I915_ERROR_PAGE_TABLE |
3518 			       I915_ERROR_MEMORY_REFRESH));
3519 
3520 	/* Unmask the interrupts that we always want on. */
3521 	dev_priv->irq_mask =
3522 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3523 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3524 		  I915_MASTER_ERROR_INTERRUPT);
3525 
3526 	enable_mask =
3527 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3528 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3529 		I915_MASTER_ERROR_INTERRUPT |
3530 		I915_USER_INTERRUPT;
3531 
3532 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3533 
3534 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3535 	 * just to make the assert_spin_locked check happy. */
3536 	spin_lock_irq(&dev_priv->irq_lock);
3537 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3538 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3539 	spin_unlock_irq(&dev_priv->irq_lock);
3540 }
3541 
3542 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3543 			       u16 *eir, u16 *eir_stuck)
3544 {
3545 	struct intel_uncore *uncore = &i915->uncore;
3546 	u16 emr;
3547 
3548 	*eir = intel_uncore_read16(uncore, EIR);
3549 
3550 	if (*eir)
3551 		intel_uncore_write16(uncore, EIR, *eir);
3552 
3553 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3554 	if (*eir_stuck == 0)
3555 		return;
3556 
3557 	/*
3558 	 * Toggle all EMR bits to make sure we get an edge
3559 	 * in the ISR master error bit if we don't clear
3560 	 * all the EIR bits. Otherwise the edge triggered
3561 	 * IIR on i965/g4x wouldn't notice that an interrupt
3562 	 * is still pending. Also some EIR bits can't be
3563 	 * cleared except by handling the underlying error
3564 	 * (or by a GPU reset) so we mask any bit that
3565 	 * remains set.
3566 	 */
3567 	emr = intel_uncore_read16(uncore, EMR);
3568 	intel_uncore_write16(uncore, EMR, 0xffff);
3569 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3570 }
3571 
3572 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3573 				   u16 eir, u16 eir_stuck)
3574 {
3575 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3576 
3577 	if (eir_stuck)
3578 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3579 			eir_stuck);
3580 }
3581 
3582 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3583 			       u32 *eir, u32 *eir_stuck)
3584 {
3585 	u32 emr;
3586 
3587 	*eir = I915_READ(EIR);
3588 
3589 	I915_WRITE(EIR, *eir);
3590 
3591 	*eir_stuck = I915_READ(EIR);
3592 	if (*eir_stuck == 0)
3593 		return;
3594 
3595 	/*
3596 	 * Toggle all EMR bits to make sure we get an edge
3597 	 * in the ISR master error bit if we don't clear
3598 	 * all the EIR bits. Otherwise the edge triggered
3599 	 * IIR on i965/g4x wouldn't notice that an interrupt
3600 	 * is still pending. Also some EIR bits can't be
3601 	 * cleared except by handling the underlying error
3602 	 * (or by a GPU reset) so we mask any bit that
3603 	 * remains set.
3604 	 */
3605 	emr = I915_READ(EMR);
3606 	I915_WRITE(EMR, 0xffffffff);
3607 	I915_WRITE(EMR, emr | *eir_stuck);
3608 }
3609 
3610 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3611 				   u32 eir, u32 eir_stuck)
3612 {
3613 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3614 
3615 	if (eir_stuck)
3616 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3617 			eir_stuck);
3618 }
3619 
3620 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3621 {
3622 	struct drm_i915_private *dev_priv = arg;
3623 	irqreturn_t ret = IRQ_NONE;
3624 
3625 	if (!intel_irqs_enabled(dev_priv))
3626 		return IRQ_NONE;
3627 
3628 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3629 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3630 
3631 	do {
3632 		u32 pipe_stats[I915_MAX_PIPES] = {};
3633 		u16 eir = 0, eir_stuck = 0;
3634 		u16 iir;
3635 
3636 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3637 		if (iir == 0)
3638 			break;
3639 
3640 		ret = IRQ_HANDLED;
3641 
3642 		/* Call regardless, as some status bits might not be
3643 		 * signalled in iir */
3644 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3645 
3646 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3647 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3648 
3649 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3650 
3651 		if (iir & I915_USER_INTERRUPT)
3652 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3653 
3654 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3655 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3656 
3657 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3658 	} while (0);
3659 
3660 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3661 
3662 	return ret;
3663 }
3664 
3665 static void i915_irq_reset(struct drm_i915_private *dev_priv)
3666 {
3667 	struct intel_uncore *uncore = &dev_priv->uncore;
3668 
3669 	if (I915_HAS_HOTPLUG(dev_priv)) {
3670 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3671 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3672 	}
3673 
3674 	i9xx_pipestat_irq_reset(dev_priv);
3675 
3676 	GEN3_IRQ_RESET(uncore, GEN2_);
3677 }
3678 
3679 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3680 {
3681 	struct intel_uncore *uncore = &dev_priv->uncore;
3682 	u32 enable_mask;
3683 
3684 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3685 			  I915_ERROR_MEMORY_REFRESH));
3686 
3687 	/* Unmask the interrupts that we always want on. */
3688 	dev_priv->irq_mask =
3689 		~(I915_ASLE_INTERRUPT |
3690 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3691 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3692 		  I915_MASTER_ERROR_INTERRUPT);
3693 
3694 	enable_mask =
3695 		I915_ASLE_INTERRUPT |
3696 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3697 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3698 		I915_MASTER_ERROR_INTERRUPT |
3699 		I915_USER_INTERRUPT;
3700 
3701 	if (I915_HAS_HOTPLUG(dev_priv)) {
3702 		/* Enable in IER... */
3703 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3704 		/* and unmask in IMR */
3705 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3706 	}
3707 
3708 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3709 
3710 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3711 	 * just to make the assert_spin_locked check happy. */
3712 	spin_lock_irq(&dev_priv->irq_lock);
3713 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3714 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3715 	spin_unlock_irq(&dev_priv->irq_lock);
3716 
3717 	i915_enable_asle_pipestat(dev_priv);
3718 }
3719 
3720 static irqreturn_t i915_irq_handler(int irq, void *arg)
3721 {
3722 	struct drm_i915_private *dev_priv = arg;
3723 	irqreturn_t ret = IRQ_NONE;
3724 
3725 	if (!intel_irqs_enabled(dev_priv))
3726 		return IRQ_NONE;
3727 
3728 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3729 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3730 
3731 	do {
3732 		u32 pipe_stats[I915_MAX_PIPES] = {};
3733 		u32 eir = 0, eir_stuck = 0;
3734 		u32 hotplug_status = 0;
3735 		u32 iir;
3736 
3737 		iir = I915_READ(GEN2_IIR);
3738 		if (iir == 0)
3739 			break;
3740 
3741 		ret = IRQ_HANDLED;
3742 
3743 		if (I915_HAS_HOTPLUG(dev_priv) &&
3744 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3745 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3746 
3747 		/* Call regardless, as some status bits might not be
3748 		 * signalled in iir */
3749 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3750 
3751 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3752 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3753 
3754 		I915_WRITE(GEN2_IIR, iir);
3755 
3756 		if (iir & I915_USER_INTERRUPT)
3757 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3758 
3759 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3760 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3761 
3762 		if (hotplug_status)
3763 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3764 
3765 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3766 	} while (0);
3767 
3768 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3769 
3770 	return ret;
3771 }
3772 
3773 static void i965_irq_reset(struct drm_i915_private *dev_priv)
3774 {
3775 	struct intel_uncore *uncore = &dev_priv->uncore;
3776 
3777 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3778 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3779 
3780 	i9xx_pipestat_irq_reset(dev_priv);
3781 
3782 	GEN3_IRQ_RESET(uncore, GEN2_);
3783 }
3784 
3785 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3786 {
3787 	struct intel_uncore *uncore = &dev_priv->uncore;
3788 	u32 enable_mask;
3789 	u32 error_mask;
3790 
3791 	/*
3792 	 * Enable some error detection, note the instruction error mask
3793 	 * bit is reserved, so we leave it masked.
3794 	 */
3795 	if (IS_G4X(dev_priv)) {
3796 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3797 			       GM45_ERROR_MEM_PRIV |
3798 			       GM45_ERROR_CP_PRIV |
3799 			       I915_ERROR_MEMORY_REFRESH);
3800 	} else {
3801 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3802 			       I915_ERROR_MEMORY_REFRESH);
3803 	}
3804 	I915_WRITE(EMR, error_mask);
3805 
3806 	/* Unmask the interrupts that we always want on. */
3807 	dev_priv->irq_mask =
3808 		~(I915_ASLE_INTERRUPT |
3809 		  I915_DISPLAY_PORT_INTERRUPT |
3810 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3811 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3812 		  I915_MASTER_ERROR_INTERRUPT);
3813 
3814 	enable_mask =
3815 		I915_ASLE_INTERRUPT |
3816 		I915_DISPLAY_PORT_INTERRUPT |
3817 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3818 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3819 		I915_MASTER_ERROR_INTERRUPT |
3820 		I915_USER_INTERRUPT;
3821 
3822 	if (IS_G4X(dev_priv))
3823 		enable_mask |= I915_BSD_USER_INTERRUPT;
3824 
3825 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3826 
3827 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3828 	 * just to make the assert_spin_locked check happy. */
3829 	spin_lock_irq(&dev_priv->irq_lock);
3830 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3831 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3832 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3833 	spin_unlock_irq(&dev_priv->irq_lock);
3834 
3835 	i915_enable_asle_pipestat(dev_priv);
3836 }
3837 
3838 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3839 {
3840 	u32 hotplug_en;
3841 
3842 	lockdep_assert_held(&dev_priv->irq_lock);
3843 
3844 	/* Note HDMI and DP share hotplug bits */
3845 	/* enable bits are the same for all generations */
3846 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3847 	/* Programming the CRT detection parameters tends
3848 	   to generate a spurious hotplug event about three
3849 	   seconds later.  So just do it once.
3850 	*/
3851 	if (IS_G4X(dev_priv))
3852 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3853 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3854 
3855 	/* Ignore TV since it's buggy */
3856 	i915_hotplug_interrupt_update_locked(dev_priv,
3857 					     HOTPLUG_INT_EN_MASK |
3858 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3859 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3860 					     hotplug_en);
3861 }
3862 
3863 static irqreturn_t i965_irq_handler(int irq, void *arg)
3864 {
3865 	struct drm_i915_private *dev_priv = arg;
3866 	irqreturn_t ret = IRQ_NONE;
3867 
3868 	if (!intel_irqs_enabled(dev_priv))
3869 		return IRQ_NONE;
3870 
3871 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3872 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3873 
3874 	do {
3875 		u32 pipe_stats[I915_MAX_PIPES] = {};
3876 		u32 eir = 0, eir_stuck = 0;
3877 		u32 hotplug_status = 0;
3878 		u32 iir;
3879 
3880 		iir = I915_READ(GEN2_IIR);
3881 		if (iir == 0)
3882 			break;
3883 
3884 		ret = IRQ_HANDLED;
3885 
3886 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3887 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3888 
3889 		/* Call regardless, as some status bits might not be
3890 		 * signalled in iir */
3891 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3892 
3893 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3894 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3895 
3896 		I915_WRITE(GEN2_IIR, iir);
3897 
3898 		if (iir & I915_USER_INTERRUPT)
3899 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3900 
3901 		if (iir & I915_BSD_USER_INTERRUPT)
3902 			intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
3903 
3904 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3905 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3906 
3907 		if (hotplug_status)
3908 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3909 
3910 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3911 	} while (0);
3912 
3913 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3914 
3915 	return ret;
3916 }
3917 
3918 /**
3919  * intel_irq_init - initializes irq support
3920  * @dev_priv: i915 device instance
3921  *
3922  * This function initializes all the irq support including work items, timers
3923  * and all the vtables. It does not setup the interrupt itself though.
3924  */
3925 void intel_irq_init(struct drm_i915_private *dev_priv)
3926 {
3927 	struct drm_device *dev = &dev_priv->drm;
3928 	int i;
3929 
3930 	intel_hpd_init_work(dev_priv);
3931 
3932 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3933 	for (i = 0; i < MAX_L3_SLICES; ++i)
3934 		dev_priv->l3_parity.remap_info[i] = NULL;
3935 
3936 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3937 	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3938 		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3939 
3940 	dev->vblank_disable_immediate = true;
3941 
3942 	/* Most platforms treat the display irq block as an always-on
3943 	 * power domain. vlv/chv can disable it at runtime and need
3944 	 * special care to avoid writing any of the display block registers
3945 	 * outside of the power domain. We defer setting up the display irqs
3946 	 * in this case to the runtime pm.
3947 	 */
3948 	dev_priv->display_irqs_enabled = true;
3949 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3950 		dev_priv->display_irqs_enabled = false;
3951 
3952 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3953 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
3954 	 * detection, as short HPD storms will occur as a natural part of
3955 	 * sideband messaging with MST.
3956 	 * On older platforms however, IRQ storms can occur with both long and
3957 	 * short pulses, as seen on some G4x systems.
3958 	 */
3959 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
3960 
3961 	if (HAS_GMCH(dev_priv)) {
3962 		if (I915_HAS_HOTPLUG(dev_priv))
3963 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3964 	} else {
3965 		if (HAS_PCH_JSP(dev_priv))
3966 			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
3967 		else if (HAS_PCH_MCC(dev_priv))
3968 			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
3969 		else if (INTEL_GEN(dev_priv) >= 11)
3970 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
3971 		else if (IS_GEN9_LP(dev_priv))
3972 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
3973 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3974 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
3975 		else
3976 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
3977 	}
3978 }
3979 
3980 /**
3981  * intel_irq_fini - deinitializes IRQ support
3982  * @i915: i915 device instance
3983  *
3984  * This function deinitializes all the IRQ support.
3985  */
3986 void intel_irq_fini(struct drm_i915_private *i915)
3987 {
3988 	int i;
3989 
3990 	for (i = 0; i < MAX_L3_SLICES; ++i)
3991 		kfree(i915->l3_parity.remap_info[i]);
3992 }
3993 
3994 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
3995 {
3996 	if (HAS_GMCH(dev_priv)) {
3997 		if (IS_CHERRYVIEW(dev_priv))
3998 			return cherryview_irq_handler;
3999 		else if (IS_VALLEYVIEW(dev_priv))
4000 			return valleyview_irq_handler;
4001 		else if (IS_GEN(dev_priv, 4))
4002 			return i965_irq_handler;
4003 		else if (IS_GEN(dev_priv, 3))
4004 			return i915_irq_handler;
4005 		else
4006 			return i8xx_irq_handler;
4007 	} else {
4008 		if (INTEL_GEN(dev_priv) >= 11)
4009 			return gen11_irq_handler;
4010 		else if (INTEL_GEN(dev_priv) >= 8)
4011 			return gen8_irq_handler;
4012 		else
4013 			return ilk_irq_handler;
4014 	}
4015 }
4016 
4017 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4018 {
4019 	if (HAS_GMCH(dev_priv)) {
4020 		if (IS_CHERRYVIEW(dev_priv))
4021 			cherryview_irq_reset(dev_priv);
4022 		else if (IS_VALLEYVIEW(dev_priv))
4023 			valleyview_irq_reset(dev_priv);
4024 		else if (IS_GEN(dev_priv, 4))
4025 			i965_irq_reset(dev_priv);
4026 		else if (IS_GEN(dev_priv, 3))
4027 			i915_irq_reset(dev_priv);
4028 		else
4029 			i8xx_irq_reset(dev_priv);
4030 	} else {
4031 		if (INTEL_GEN(dev_priv) >= 11)
4032 			gen11_irq_reset(dev_priv);
4033 		else if (INTEL_GEN(dev_priv) >= 8)
4034 			gen8_irq_reset(dev_priv);
4035 		else
4036 			ilk_irq_reset(dev_priv);
4037 	}
4038 }
4039 
4040 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4041 {
4042 	if (HAS_GMCH(dev_priv)) {
4043 		if (IS_CHERRYVIEW(dev_priv))
4044 			cherryview_irq_postinstall(dev_priv);
4045 		else if (IS_VALLEYVIEW(dev_priv))
4046 			valleyview_irq_postinstall(dev_priv);
4047 		else if (IS_GEN(dev_priv, 4))
4048 			i965_irq_postinstall(dev_priv);
4049 		else if (IS_GEN(dev_priv, 3))
4050 			i915_irq_postinstall(dev_priv);
4051 		else
4052 			i8xx_irq_postinstall(dev_priv);
4053 	} else {
4054 		if (INTEL_GEN(dev_priv) >= 11)
4055 			gen11_irq_postinstall(dev_priv);
4056 		else if (INTEL_GEN(dev_priv) >= 8)
4057 			gen8_irq_postinstall(dev_priv);
4058 		else
4059 			ilk_irq_postinstall(dev_priv);
4060 	}
4061 }
4062 
4063 /**
4064  * intel_irq_install - enables the hardware interrupt
4065  * @dev_priv: i915 device instance
4066  *
4067  * This function enables the hardware interrupt handling, but leaves the hotplug
4068  * handling still disabled. It is called after intel_irq_init().
4069  *
4070  * In the driver load and resume code we need working interrupts in a few places
4071  * but don't want to deal with the hassle of concurrent probe and hotplug
4072  * workers. Hence the split into this two-stage approach.
4073  */
4074 int intel_irq_install(struct drm_i915_private *dev_priv)
4075 {
4076 	int irq = dev_priv->drm.pdev->irq;
4077 	int ret;
4078 
4079 	/*
4080 	 * We enable some interrupt sources in our postinstall hooks, so mark
4081 	 * interrupts as enabled _before_ actually enabling them to avoid
4082 	 * special cases in our ordering checks.
4083 	 */
4084 	dev_priv->runtime_pm.irqs_enabled = true;
4085 
4086 	dev_priv->drm.irq_enabled = true;
4087 
4088 	intel_irq_reset(dev_priv);
4089 
4090 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4091 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4092 	if (ret < 0) {
4093 		dev_priv->drm.irq_enabled = false;
4094 		return ret;
4095 	}
4096 
4097 	intel_irq_postinstall(dev_priv);
4098 
4099 	return ret;
4100 }
4101 
4102 /**
4103  * intel_irq_uninstall - finilizes all irq handling
4104  * @dev_priv: i915 device instance
4105  *
4106  * This stops interrupt and hotplug handling and unregisters and frees all
4107  * resources acquired in the init functions.
4108  */
4109 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4110 {
4111 	int irq = dev_priv->drm.pdev->irq;
4112 
4113 	/*
4114 	 * FIXME we can get called twice during driver probe
4115 	 * error handling as well as during driver remove due to
4116 	 * intel_modeset_driver_remove() calling us out of sequence.
4117 	 * Would be nice if it didn't do that...
4118 	 */
4119 	if (!dev_priv->drm.irq_enabled)
4120 		return;
4121 
4122 	dev_priv->drm.irq_enabled = false;
4123 
4124 	intel_irq_reset(dev_priv);
4125 
4126 	free_irq(irq, dev_priv);
4127 
4128 	intel_hpd_cancel_work(dev_priv);
4129 	dev_priv->runtime_pm.irqs_enabled = false;
4130 }
4131 
4132 /**
4133  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4134  * @dev_priv: i915 device instance
4135  *
4136  * This function is used to disable interrupts at runtime, both in the runtime
4137  * pm and the system suspend/resume code.
4138  */
4139 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4140 {
4141 	intel_irq_reset(dev_priv);
4142 	dev_priv->runtime_pm.irqs_enabled = false;
4143 	intel_synchronize_irq(dev_priv);
4144 }
4145 
4146 /**
4147  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4148  * @dev_priv: i915 device instance
4149  *
4150  * This function is used to enable interrupts at runtime, both in the runtime
4151  * pm and the system suspend/resume code.
4152  */
4153 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4154 {
4155 	dev_priv->runtime_pm.irqs_enabled = true;
4156 	intel_irq_reset(dev_priv);
4157 	intel_irq_postinstall(dev_priv);
4158 }
4159 
4160 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4161 {
4162 	/*
4163 	 * We only use drm_irq_uninstall() at unload and VT switch, so
4164 	 * this is the only thing we need to check.
4165 	 */
4166 	return dev_priv->runtime_pm.irqs_enabled;
4167 }
4168 
4169 void intel_synchronize_irq(struct drm_i915_private *i915)
4170 {
4171 	synchronize_irq(i915->drm.pdev->irq);
4172 }
4173