xref: /linux/drivers/gpu/drm/xe/xe_irq.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_irq.h"
7 
8 #include <linux/sched/clock.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "display/xe_display.h"
13 #include "regs/xe_irq_regs.h"
14 #include "xe_device.h"
15 #include "xe_drv.h"
16 #include "xe_gsc_proxy.h"
17 #include "xe_gt.h"
18 #include "xe_guc.h"
19 #include "xe_hw_engine.h"
20 #include "xe_hw_error.h"
21 #include "xe_i2c.h"
22 #include "xe_memirq.h"
23 #include "xe_mert.h"
24 #include "xe_mmio.h"
25 #include "xe_pxp.h"
26 #include "xe_sriov.h"
27 #include "xe_tile.h"
28 
29 /*
30  * Interrupt registers for a unit are always consecutive and ordered
31  * ISR, IMR, IIR, IER.
32  */
33 #define IMR(offset)				XE_REG(offset + 0x4)
34 #define IIR(offset)				XE_REG(offset + 0x8)
35 #define IER(offset)				XE_REG(offset + 0xc)
36 
37 static int xe_irq_msix_init(struct xe_device *xe);
38 static void xe_irq_msix_free(struct xe_device *xe);
39 static int xe_irq_msix_request_irqs(struct xe_device *xe);
40 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
41 
42 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
43 {
44 	u32 val = xe_mmio_read32(mmio, reg);
45 
46 	if (val == 0)
47 		return;
48 
49 	drm_WARN(&mmio->tile->xe->drm, 1,
50 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
51 		 reg.addr, val);
52 	xe_mmio_write32(mmio, reg, 0xffffffff);
53 	xe_mmio_read32(mmio, reg);
54 	xe_mmio_write32(mmio, reg, 0xffffffff);
55 	xe_mmio_read32(mmio, reg);
56 }
57 
58 /*
59  * Unmask and enable the specified interrupts.  Does not check current state,
60  * so any bits not specified here will become masked and disabled.
61  */
62 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
63 {
64 	struct xe_mmio *mmio = &tile->mmio;
65 
66 	/*
67 	 * If we're just enabling an interrupt now, it shouldn't already
68 	 * be raised in the IIR.
69 	 */
70 	assert_iir_is_zero(mmio, IIR(irqregs));
71 
72 	xe_mmio_write32(mmio, IER(irqregs), bits);
73 	xe_mmio_write32(mmio, IMR(irqregs), ~bits);
74 
75 	/* Posting read */
76 	xe_mmio_read32(mmio, IMR(irqregs));
77 }
78 
79 /* Mask and disable all interrupts. */
80 static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
81 {
82 	struct xe_mmio *mmio = &tile->mmio;
83 
84 	xe_mmio_write32(mmio, IMR(irqregs), ~0);
85 	/* Posting read */
86 	xe_mmio_read32(mmio, IMR(irqregs));
87 
88 	xe_mmio_write32(mmio, IER(irqregs), 0);
89 
90 	/* IIR can theoretically queue up two events. Be paranoid. */
91 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
92 	xe_mmio_read32(mmio, IIR(irqregs));
93 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
94 	xe_mmio_read32(mmio, IIR(irqregs));
95 }
96 
97 static u32 xelp_intr_disable(struct xe_device *xe)
98 {
99 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
100 
101 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
102 
103 	/*
104 	 * Now with master disabled, get a sample of level indications
105 	 * for this interrupt. Indications will be cleared on related acks.
106 	 * New indications can and will light up during processing,
107 	 * and will generate new interrupt after enabling master.
108 	 */
109 	return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
110 }
111 
112 static u32
113 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
114 {
115 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
116 	u32 iir;
117 
118 	if (!(master_ctl & GU_MISC_IRQ))
119 		return 0;
120 
121 	iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
122 	if (likely(iir))
123 		xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
124 
125 	return iir;
126 }
127 
128 static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
129 {
130 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
131 
132 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
133 	if (stall)
134 		xe_mmio_read32(mmio, GFX_MSTR_IRQ);
135 }
136 
137 /* Enable/unmask the HWE interrupts for a specific GT's engines. */
138 void xe_irq_enable_hwe(struct xe_gt *gt)
139 {
140 	struct xe_device *xe = gt_to_xe(gt);
141 	struct xe_mmio *mmio = &gt->mmio;
142 	u32 common_mask, val, gsc_mask = 0, heci_mask = 0,
143 	    rcs_mask = 0, bcs_mask = 0, vcs_mask = 0, vecs_mask = 0,
144 	    ccs_mask = 0;
145 
146 	if (xe_device_uses_memirq(xe))
147 		return;
148 
149 	if (xe_device_uc_enabled(xe)) {
150 		common_mask = GT_MI_USER_INTERRUPT |
151 			      GT_FLUSH_COMPLETE_INTERRUPT;
152 
153 		/* Enable Compute Walker Interrupt for non-MSIX platforms */
154 		if (GRAPHICS_VERx100(xe) >= 3511 && !xe_device_has_msix(xe)) {
155 			rcs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
156 			ccs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
157 		}
158 	} else {
159 		common_mask = GT_MI_USER_INTERRUPT |
160 			      GT_CS_MASTER_ERROR_INTERRUPT |
161 			      GT_CONTEXT_SWITCH_INTERRUPT |
162 			      GT_WAIT_SEMAPHORE_INTERRUPT;
163 	}
164 
165 	rcs_mask |= common_mask;
166 	bcs_mask |= common_mask;
167 	vcs_mask |= common_mask;
168 	vecs_mask |= common_mask;
169 	ccs_mask |= common_mask;
170 
171 	if (xe_gt_is_main_type(gt)) {
172 		/*
173 		 * For enabling the interrupts, the information about fused off
174 		 * engines doesn't matter much, but this also allows to check if
175 		 * the engine is available architecturally in the platform
176 		 */
177 		u32 ccs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
178 		u32 bcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
179 
180 		/* Enable interrupts for each engine class */
181 		xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE,
182 				REG_FIELD_PREP(ENGINE1_MASK, rcs_mask) |
183 				REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
184 		if (ccs_fuse_mask)
185 			xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE,
186 					REG_FIELD_PREP(ENGINE1_MASK, ccs_mask));
187 
188 		/* Unmask interrupts for each engine instance */
189 		val = ~REG_FIELD_PREP(ENGINE1_MASK, rcs_mask);
190 		xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, val);
191 		val = ~REG_FIELD_PREP(ENGINE1_MASK, bcs_mask);
192 		xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, val);
193 
194 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, bcs_mask) |
195 			REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
196 		if (bcs_fuse_mask & (BIT(1)|BIT(2)))
197 			xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, val);
198 		if (bcs_fuse_mask & (BIT(3)|BIT(4)))
199 			xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, val);
200 		if (bcs_fuse_mask & (BIT(5)|BIT(6)))
201 			xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, val);
202 		if (bcs_fuse_mask & (BIT(7)|BIT(8)))
203 			xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, val);
204 
205 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, ccs_mask) |
206 			REG_FIELD_PREP(ENGINE0_MASK, ccs_mask));
207 		if (ccs_fuse_mask & (BIT(0)|BIT(1)))
208 			xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, val);
209 		if (ccs_fuse_mask & (BIT(2)|BIT(3)))
210 			xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, val);
211 	}
212 
213 	if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
214 		u32 vcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
215 		u32 vecs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
216 		u32 other_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER);
217 
218 		/* Enable interrupts for each engine class */
219 		xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE,
220 				REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
221 				REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
222 
223 		/* Unmask interrupts for each engine instance */
224 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
225 			REG_FIELD_PREP(ENGINE0_MASK, vcs_mask));
226 		if (vcs_fuse_mask & (BIT(0) | BIT(1)))
227 			xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, val);
228 		if (vcs_fuse_mask & (BIT(2) | BIT(3)))
229 			xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, val);
230 		if (vcs_fuse_mask & (BIT(4) | BIT(5)))
231 			xe_mmio_write32(mmio, VCS4_VCS5_INTR_MASK, val);
232 		if (vcs_fuse_mask & (BIT(6) | BIT(7)))
233 			xe_mmio_write32(mmio, VCS6_VCS7_INTR_MASK, val);
234 
235 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, vecs_mask) |
236 			REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
237 		if (vecs_fuse_mask & (BIT(0) | BIT(1)))
238 			xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, val);
239 		if (vecs_fuse_mask & (BIT(2) | BIT(3)))
240 			xe_mmio_write32(mmio, VECS2_VECS3_INTR_MASK, val);
241 
242 		/*
243 		 * the heci2 interrupt is enabled via the same register as the
244 		 * GSCCS interrupts, but it has its own mask register.
245 		 */
246 		if (other_fuse_mask) {
247 			gsc_mask = common_mask | GSC_ER_COMPLETE;
248 			heci_mask = GSC_IRQ_INTF(1);
249 		} else if (xe->info.has_heci_gscfi) {
250 			gsc_mask = GSC_IRQ_INTF(1);
251 		}
252 
253 		if (gsc_mask) {
254 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
255 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
256 		}
257 		if (heci_mask)
258 			xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
259 
260 		if (xe_pxp_is_supported(xe)) {
261 			u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
262 				       KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
263 				       KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
264 
265 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
266 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
267 		}
268 	}
269 }
270 
271 static u32
272 gt_engine_identity(struct xe_device *xe,
273 		   struct xe_mmio *mmio,
274 		   const unsigned int bank,
275 		   const unsigned int bit)
276 {
277 	u32 timeout_ts;
278 	u32 ident;
279 
280 	lockdep_assert_held(&xe->irq.lock);
281 
282 	xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
283 
284 	/*
285 	 * NB: Specs do not specify how long to spin wait,
286 	 * so we do ~100us as an educated guess.
287 	 */
288 	timeout_ts = (local_clock() >> 10) + 100;
289 	do {
290 		ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
291 	} while (!(ident & INTR_DATA_VALID) &&
292 		 !time_after32(local_clock() >> 10, timeout_ts));
293 
294 	if (unlikely(!(ident & INTR_DATA_VALID))) {
295 		drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
296 			bank, bit, ident);
297 		return 0;
298 	}
299 
300 	xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
301 
302 	return ident;
303 }
304 
305 #define   OTHER_MEDIA_GUC_INSTANCE           16
306 
307 static void
308 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
309 {
310 	if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt))
311 		return xe_guc_irq_handler(&gt->uc.guc, iir);
312 	if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
313 		return xe_guc_irq_handler(&gt->uc.guc, iir);
314 	if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
315 		return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
316 
317 	if (instance != OTHER_GUC_INSTANCE &&
318 	    instance != OTHER_MEDIA_GUC_INSTANCE) {
319 		WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
320 			  instance, iir);
321 	}
322 }
323 
324 static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
325 				    enum xe_engine_class class,
326 				    unsigned int instance)
327 {
328 	struct xe_device *xe = tile_to_xe(tile);
329 
330 	if (MEDIA_VER(xe) < 13)
331 		return tile->primary_gt;
332 
333 	switch (class) {
334 	case XE_ENGINE_CLASS_VIDEO_DECODE:
335 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
336 		return tile->media_gt;
337 	case XE_ENGINE_CLASS_OTHER:
338 		switch (instance) {
339 		case OTHER_MEDIA_GUC_INSTANCE:
340 		case OTHER_GSC_INSTANCE:
341 		case OTHER_GSC_HECI2_INSTANCE:
342 			return tile->media_gt;
343 		default:
344 			break;
345 		}
346 		fallthrough;
347 	default:
348 		return tile->primary_gt;
349 	}
350 }
351 
352 static void gt_irq_handler(struct xe_tile *tile,
353 			   u32 master_ctl, unsigned long *intr_dw,
354 			   u32 *identity)
355 {
356 	struct xe_device *xe = tile_to_xe(tile);
357 	struct xe_mmio *mmio = &tile->mmio;
358 	unsigned int bank, bit;
359 	u16 instance, intr_vec;
360 	enum xe_engine_class class;
361 	struct xe_hw_engine *hwe;
362 
363 	spin_lock(&xe->irq.lock);
364 
365 	for (bank = 0; bank < 2; bank++) {
366 		if (!(master_ctl & GT_DW_IRQ(bank)))
367 			continue;
368 
369 		intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
370 		for_each_set_bit(bit, intr_dw + bank, 32)
371 			identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
372 		xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
373 
374 		for_each_set_bit(bit, intr_dw + bank, 32) {
375 			struct xe_gt *engine_gt;
376 
377 			class = INTR_ENGINE_CLASS(identity[bit]);
378 			instance = INTR_ENGINE_INSTANCE(identity[bit]);
379 			intr_vec = INTR_ENGINE_INTR(identity[bit]);
380 
381 			engine_gt = pick_engine_gt(tile, class, instance);
382 
383 			hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
384 			if (hwe) {
385 				xe_hw_engine_handle_irq(hwe, intr_vec);
386 				continue;
387 			}
388 
389 			if (class == XE_ENGINE_CLASS_OTHER) {
390 				/*
391 				 * HECI GSCFI interrupts come from outside of GT.
392 				 * KCR irqs come from inside GT but are handled
393 				 * by the global PXP subsystem.
394 				 */
395 				if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
396 					xe_heci_gsc_irq_handler(xe, intr_vec);
397 				else if (instance == OTHER_KCR_INSTANCE)
398 					xe_pxp_irq_handler(xe, intr_vec);
399 				else
400 					gt_other_irq_handler(engine_gt, instance, intr_vec);
401 			}
402 		}
403 	}
404 
405 	spin_unlock(&xe->irq.lock);
406 }
407 
408 /*
409  * Top-level interrupt handler for Xe_LP platforms (which did not have
410  * a "master tile" interrupt register.
411  */
412 static irqreturn_t xelp_irq_handler(int irq, void *arg)
413 {
414 	struct xe_device *xe = arg;
415 	struct xe_tile *tile = xe_device_get_root_tile(xe);
416 	u32 master_ctl, gu_misc_iir;
417 	unsigned long intr_dw[2];
418 	u32 identity[32];
419 
420 	if (!atomic_read(&xe->irq.enabled))
421 		return IRQ_NONE;
422 
423 	master_ctl = xelp_intr_disable(xe);
424 	if (!master_ctl) {
425 		xelp_intr_enable(xe, false);
426 		return IRQ_NONE;
427 	}
428 
429 	gt_irq_handler(tile, master_ctl, intr_dw, identity);
430 
431 	xe_display_irq_handler(xe, master_ctl);
432 
433 	gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
434 
435 	xelp_intr_enable(xe, false);
436 
437 	xe_display_irq_enable(xe, gu_misc_iir);
438 
439 	return IRQ_HANDLED;
440 }
441 
442 static u32 dg1_intr_disable(struct xe_device *xe)
443 {
444 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
445 	u32 val;
446 
447 	/* First disable interrupts */
448 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
449 
450 	/* Get the indication levels and ack the master unit */
451 	val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
452 	if (unlikely(!val))
453 		return 0;
454 
455 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
456 
457 	return val;
458 }
459 
460 static void dg1_intr_enable(struct xe_device *xe, bool stall)
461 {
462 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
463 
464 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
465 	if (stall)
466 		xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
467 }
468 
469 /*
470  * Top-level interrupt handler for Xe_LP+ and beyond.  These platforms have
471  * a "master tile" interrupt register which must be consulted before the
472  * "graphics master" interrupt register.
473  */
474 static irqreturn_t dg1_irq_handler(int irq, void *arg)
475 {
476 	struct xe_device *xe = arg;
477 	struct xe_tile *tile;
478 	u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
479 	unsigned long intr_dw[2];
480 	u32 identity[32];
481 	u8 id;
482 
483 	/* TODO: This really shouldn't be copied+pasted */
484 
485 	if (!atomic_read(&xe->irq.enabled))
486 		return IRQ_NONE;
487 
488 	master_tile_ctl = dg1_intr_disable(xe);
489 	if (!master_tile_ctl) {
490 		dg1_intr_enable(xe, false);
491 		return IRQ_NONE;
492 	}
493 
494 	for_each_tile(tile, xe, id) {
495 		struct xe_mmio *mmio = &tile->mmio;
496 
497 		if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
498 			continue;
499 
500 		master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
501 
502 		/*
503 		 * We might be in irq handler just when PCIe DPC is initiated
504 		 * and all MMIO reads will be returned with all 1's. Ignore this
505 		 * irq as device is inaccessible.
506 		 */
507 		if (master_ctl == REG_GENMASK(31, 0)) {
508 			drm_dbg(&tile_to_xe(tile)->drm,
509 				"Ignore this IRQ as device might be in DPC containment.\n");
510 			return IRQ_HANDLED;
511 		}
512 
513 		xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
514 
515 		gt_irq_handler(tile, master_ctl, intr_dw, identity);
516 		xe_hw_error_irq_handler(tile, master_ctl);
517 
518 		/*
519 		 * Display interrupts (including display backlight operations
520 		 * that get reported as Gunit GSE) would only be hooked up to
521 		 * the primary tile.
522 		 */
523 		if (id == 0) {
524 			if (xe->info.has_heci_cscfi)
525 				xe_heci_csc_irq_handler(xe, master_ctl);
526 			xe_display_irq_handler(xe, master_ctl);
527 			xe_i2c_irq_handler(xe, master_ctl);
528 			xe_mert_irq_handler(xe, master_ctl);
529 			gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
530 		}
531 	}
532 
533 	dg1_intr_enable(xe, false);
534 	xe_display_irq_enable(xe, gu_misc_iir);
535 
536 	return IRQ_HANDLED;
537 }
538 
539 static void gt_irq_reset(struct xe_tile *tile)
540 {
541 	struct xe_mmio *mmio = &tile->mmio;
542 	u32 ccs_mask = ~0;
543 	u32 bcs_mask = ~0;
544 
545 	if (tile->primary_gt) {
546 		ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
547 						       XE_ENGINE_CLASS_COMPUTE);
548 		bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
549 						       XE_ENGINE_CLASS_COPY);
550 	}
551 
552 	/* Disable RCS, BCS, VCS and VECS class engines. */
553 	xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
554 	xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
555 	if (ccs_mask)
556 		xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
557 
558 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
559 	xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK,	~0);
560 	xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK,	~0);
561 	if (bcs_mask & (BIT(1)|BIT(2)))
562 		xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
563 	if (bcs_mask & (BIT(3)|BIT(4)))
564 		xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
565 	if (bcs_mask & (BIT(5)|BIT(6)))
566 		xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
567 	if (bcs_mask & (BIT(7)|BIT(8)))
568 		xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
569 	xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK,	~0);
570 	xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK,	~0);
571 	xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK,	~0);
572 	if (ccs_mask & (BIT(0)|BIT(1)))
573 		xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
574 	if (ccs_mask & (BIT(2)|BIT(3)))
575 		xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
576 
577 	if ((tile->media_gt &&
578 	     xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
579 	    tile_to_xe(tile)->info.has_heci_gscfi) {
580 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
581 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
582 		xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
583 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
584 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
585 	}
586 
587 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
588 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK,  ~0);
589 	xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE,	 0);
590 	xe_mmio_write32(mmio, GUC_SG_INTR_MASK,		~0);
591 }
592 
593 static void xelp_irq_reset(struct xe_tile *tile)
594 {
595 	xelp_intr_disable(tile_to_xe(tile));
596 
597 	gt_irq_reset(tile);
598 
599 	if (IS_SRIOV_VF(tile_to_xe(tile)))
600 		return;
601 
602 	mask_and_disable(tile, PCU_IRQ_OFFSET);
603 }
604 
605 static void dg1_irq_reset(struct xe_tile *tile)
606 {
607 	if (xe_tile_is_root(tile))
608 		dg1_intr_disable(tile_to_xe(tile));
609 
610 	gt_irq_reset(tile);
611 
612 	if (IS_SRIOV_VF(tile_to_xe(tile)))
613 		return;
614 
615 	mask_and_disable(tile, PCU_IRQ_OFFSET);
616 }
617 
618 static void dg1_irq_reset_mstr(struct xe_tile *tile)
619 {
620 	struct xe_mmio *mmio = &tile->mmio;
621 
622 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
623 }
624 
625 static void vf_irq_reset(struct xe_device *xe)
626 {
627 	struct xe_tile *tile;
628 	unsigned int id;
629 
630 	xe_assert(xe, IS_SRIOV_VF(xe));
631 
632 	if (GRAPHICS_VERx100(xe) < 1210)
633 		xelp_intr_disable(xe);
634 	else
635 		xe_assert(xe, xe_device_has_memirq(xe));
636 
637 	for_each_tile(tile, xe, id) {
638 		if (xe_device_has_memirq(xe))
639 			xe_memirq_reset(&tile->memirq);
640 		else
641 			gt_irq_reset(tile);
642 	}
643 }
644 
645 static void xe_irq_reset(struct xe_device *xe)
646 {
647 	struct xe_tile *tile;
648 	u8 id;
649 
650 	if (IS_SRIOV_VF(xe))
651 		return vf_irq_reset(xe);
652 
653 	if (xe_device_uses_memirq(xe)) {
654 		for_each_tile(tile, xe, id)
655 			xe_memirq_reset(&tile->memirq);
656 	}
657 
658 	for_each_tile(tile, xe, id) {
659 		if (GRAPHICS_VERx100(xe) >= 1210)
660 			dg1_irq_reset(tile);
661 		else
662 			xelp_irq_reset(tile);
663 	}
664 
665 	tile = xe_device_get_root_tile(xe);
666 	mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
667 	xe_display_irq_reset(xe);
668 	xe_i2c_irq_reset(xe);
669 
670 	/*
671 	 * The tile's top-level status register should be the last one
672 	 * to be reset to avoid possible bit re-latching from lower
673 	 * level interrupts.
674 	 */
675 	if (GRAPHICS_VERx100(xe) >= 1210) {
676 		for_each_tile(tile, xe, id)
677 			dg1_irq_reset_mstr(tile);
678 	}
679 }
680 
681 static void vf_irq_postinstall(struct xe_device *xe)
682 {
683 	struct xe_tile *tile;
684 	unsigned int id;
685 
686 	for_each_tile(tile, xe, id)
687 		if (xe_device_has_memirq(xe))
688 			xe_memirq_postinstall(&tile->memirq);
689 
690 	if (GRAPHICS_VERx100(xe) < 1210)
691 		xelp_intr_enable(xe, true);
692 	else
693 		xe_assert(xe, xe_device_has_memirq(xe));
694 }
695 
696 static void xe_irq_postinstall(struct xe_device *xe)
697 {
698 	if (IS_SRIOV_VF(xe))
699 		return vf_irq_postinstall(xe);
700 
701 	if (xe_device_uses_memirq(xe)) {
702 		struct xe_tile *tile;
703 		unsigned int id;
704 
705 		for_each_tile(tile, xe, id)
706 			xe_memirq_postinstall(&tile->memirq);
707 	}
708 
709 	xe_display_irq_postinstall(xe);
710 	xe_i2c_irq_postinstall(xe);
711 
712 	/*
713 	 * ASLE backlight operations are reported via GUnit GSE interrupts
714 	 * on the root tile.
715 	 */
716 	unmask_and_enable(xe_device_get_root_tile(xe),
717 			  GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
718 
719 	/* Enable top-level interrupts */
720 	if (GRAPHICS_VERx100(xe) >= 1210)
721 		dg1_intr_enable(xe, true);
722 	else
723 		xelp_intr_enable(xe, true);
724 }
725 
726 static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
727 {
728 	struct xe_device *xe = arg;
729 	struct xe_tile *tile;
730 	unsigned int id;
731 
732 	if (!atomic_read(&xe->irq.enabled))
733 		return IRQ_NONE;
734 
735 	for_each_tile(tile, xe, id)
736 		xe_memirq_handler(&tile->memirq);
737 
738 	return IRQ_HANDLED;
739 }
740 
741 static irq_handler_t xe_irq_handler(struct xe_device *xe)
742 {
743 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
744 		return vf_mem_irq_handler;
745 
746 	if (GRAPHICS_VERx100(xe) >= 1210)
747 		return dg1_irq_handler;
748 	else
749 		return xelp_irq_handler;
750 }
751 
752 static int xe_irq_msi_request_irqs(struct xe_device *xe)
753 {
754 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
755 	irq_handler_t irq_handler;
756 	int irq, err;
757 
758 	irq_handler = xe_irq_handler(xe);
759 	if (!irq_handler) {
760 		drm_err(&xe->drm, "No supported interrupt handler");
761 		return -EINVAL;
762 	}
763 
764 	irq = pci_irq_vector(pdev, 0);
765 	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
766 	if (err < 0) {
767 		drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
768 		return err;
769 	}
770 
771 	return 0;
772 }
773 
774 static void xe_irq_msi_free(struct xe_device *xe)
775 {
776 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
777 	int irq;
778 
779 	irq = pci_irq_vector(pdev, 0);
780 	free_irq(irq, xe);
781 }
782 
783 static void irq_uninstall(void *arg)
784 {
785 	struct xe_device *xe = arg;
786 
787 	if (!atomic_xchg(&xe->irq.enabled, 0))
788 		return;
789 
790 	xe_irq_reset(xe);
791 
792 	if (xe_device_has_msix(xe))
793 		xe_irq_msix_free(xe);
794 	else
795 		xe_irq_msi_free(xe);
796 }
797 
798 int xe_irq_init(struct xe_device *xe)
799 {
800 	spin_lock_init(&xe->irq.lock);
801 
802 	return xe_irq_msix_init(xe);
803 }
804 
805 int xe_irq_install(struct xe_device *xe)
806 {
807 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
808 	unsigned int irq_flags = PCI_IRQ_MSI;
809 	int nvec = 1;
810 	int err;
811 
812 	xe_hw_error_init(xe);
813 
814 	xe_irq_reset(xe);
815 
816 	if (xe_device_has_msix(xe)) {
817 		nvec = xe->irq.msix.nvec;
818 		irq_flags = PCI_IRQ_MSIX;
819 	}
820 
821 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
822 	if (err < 0) {
823 		drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
824 		return err;
825 	}
826 
827 	err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
828 					xe_irq_msi_request_irqs(xe);
829 	if (err)
830 		return err;
831 
832 	atomic_set(&xe->irq.enabled, 1);
833 
834 	xe_irq_postinstall(xe);
835 
836 	return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
837 }
838 
839 static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
840 {
841 	synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
842 }
843 
844 void xe_irq_suspend(struct xe_device *xe)
845 {
846 	atomic_set(&xe->irq.enabled, 0); /* no new irqs */
847 
848 	/* flush irqs */
849 	if (xe_device_has_msix(xe))
850 		xe_irq_msix_synchronize_irq(xe);
851 	else
852 		xe_irq_msi_synchronize_irq(xe);
853 	xe_irq_reset(xe); /* turn irqs off */
854 }
855 
856 void xe_irq_resume(struct xe_device *xe)
857 {
858 	struct xe_gt *gt;
859 	int id;
860 
861 	/*
862 	 * lock not needed:
863 	 * 1. no irq will arrive before the postinstall
864 	 * 2. display is not yet resumed
865 	 */
866 	atomic_set(&xe->irq.enabled, 1);
867 	xe_irq_reset(xe);
868 	xe_irq_postinstall(xe); /* turn irqs on */
869 
870 	for_each_gt(gt, xe, id)
871 		xe_irq_enable_hwe(gt);
872 }
873 
874 /* MSI-X related definitions and functions below. */
875 
876 enum xe_irq_msix_static {
877 	GUC2HOST_MSIX = 0,
878 	DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
879 	/* Must be last */
880 	NUM_OF_STATIC_MSIX,
881 };
882 
883 static int xe_irq_msix_init(struct xe_device *xe)
884 {
885 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
886 	int nvec = pci_msix_vec_count(pdev);
887 
888 	if (nvec == -EINVAL)
889 		return 0;  /* MSI */
890 
891 	if (nvec < 0) {
892 		drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
893 		return nvec;
894 	}
895 
896 	xe->irq.msix.nvec = nvec;
897 	xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
898 	return 0;
899 }
900 
901 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
902 {
903 	unsigned int tile_id, gt_id;
904 	struct xe_device *xe = arg;
905 	struct xe_memirq *memirq;
906 	struct xe_hw_engine *hwe;
907 	enum xe_hw_engine_id id;
908 	struct xe_tile *tile;
909 	struct xe_gt *gt;
910 
911 	if (!atomic_read(&xe->irq.enabled))
912 		return IRQ_NONE;
913 
914 	for_each_tile(tile, xe, tile_id) {
915 		memirq = &tile->memirq;
916 		if (!memirq->bo)
917 			continue;
918 
919 		for_each_gt(gt, xe, gt_id) {
920 			if (gt->tile != tile)
921 				continue;
922 
923 			for_each_hw_engine(hwe, gt, id)
924 				xe_memirq_hwe_handler(memirq, hwe);
925 		}
926 	}
927 
928 	return IRQ_HANDLED;
929 }
930 
931 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
932 				    bool dynamic_msix, u16 *msix)
933 {
934 	struct xa_limit limit;
935 	int ret;
936 	u32 id;
937 
938 	limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
939 				 XA_LIMIT(*msix, *msix);
940 	ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
941 	if (ret)
942 		return ret;
943 
944 	if (dynamic_msix)
945 		*msix = id;
946 
947 	return 0;
948 }
949 
950 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
951 {
952 	xa_erase(&xe->irq.msix.indexes, msix);
953 }
954 
955 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
956 					    void *irq_buf, const char *name, u16 msix)
957 {
958 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
959 	int ret, irq;
960 
961 	irq = pci_irq_vector(pdev, msix);
962 	if (irq < 0)
963 		return irq;
964 
965 	ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
966 	if (ret < 0)
967 		return ret;
968 
969 	return 0;
970 }
971 
972 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
973 			    const char *name, bool dynamic_msix, u16 *msix)
974 {
975 	int ret;
976 
977 	ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
978 	if (ret)
979 		return ret;
980 
981 	ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
982 	if (ret) {
983 		drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
984 		xe_irq_msix_release_vector(xe, *msix);
985 		return ret;
986 	}
987 
988 	return 0;
989 }
990 
991 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
992 {
993 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
994 	int irq;
995 	void *irq_buf;
996 
997 	irq_buf = xa_load(&xe->irq.msix.indexes, msix);
998 	if (!irq_buf)
999 		return;
1000 
1001 	irq = pci_irq_vector(pdev, msix);
1002 	if (irq < 0) {
1003 		drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
1004 		return;
1005 	}
1006 
1007 	free_irq(irq, irq_buf);
1008 	xe_irq_msix_release_vector(xe, msix);
1009 }
1010 
1011 int xe_irq_msix_request_irqs(struct xe_device *xe)
1012 {
1013 	int err;
1014 	u16 msix;
1015 
1016 	msix = GUC2HOST_MSIX;
1017 	err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
1018 				      DRIVER_NAME "-guc2host", false, &msix);
1019 	if (err)
1020 		return err;
1021 
1022 	msix = DEFAULT_MSIX;
1023 	err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
1024 				      DRIVER_NAME "-default-msix", false, &msix);
1025 	if (err) {
1026 		xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
1027 		return err;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 void xe_irq_msix_free(struct xe_device *xe)
1034 {
1035 	unsigned long msix;
1036 	u32 *dummy;
1037 
1038 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1039 		xe_irq_msix_free_irq(xe, msix);
1040 	xa_destroy(&xe->irq.msix.indexes);
1041 }
1042 
1043 void xe_irq_msix_synchronize_irq(struct xe_device *xe)
1044 {
1045 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
1046 	unsigned long msix;
1047 	u32 *dummy;
1048 
1049 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1050 		synchronize_irq(pci_irq_vector(pdev, msix));
1051 }
1052