xref: /linux/drivers/gpu/drm/xe/xe_irq.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_irq.h"
7 
8 #include <linux/sched/clock.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "display/xe_display.h"
13 #include "regs/xe_guc_regs.h"
14 #include "regs/xe_irq_regs.h"
15 #include "xe_device.h"
16 #include "xe_drv.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_guc.h"
20 #include "xe_hw_engine.h"
21 #include "xe_hw_error.h"
22 #include "xe_i2c.h"
23 #include "xe_memirq.h"
24 #include "xe_mmio.h"
25 #include "xe_pxp.h"
26 #include "xe_sriov.h"
27 #include "xe_tile.h"
28 
29 /*
30  * Interrupt registers for a unit are always consecutive and ordered
31  * ISR, IMR, IIR, IER.
32  */
33 #define IMR(offset)				XE_REG(offset + 0x4)
34 #define IIR(offset)				XE_REG(offset + 0x8)
35 #define IER(offset)				XE_REG(offset + 0xc)
36 
37 static int xe_irq_msix_init(struct xe_device *xe);
38 static void xe_irq_msix_free(struct xe_device *xe);
39 static int xe_irq_msix_request_irqs(struct xe_device *xe);
40 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
41 
42 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
43 {
44 	u32 val = xe_mmio_read32(mmio, reg);
45 
46 	if (val == 0)
47 		return;
48 
49 	drm_WARN(&mmio->tile->xe->drm, 1,
50 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
51 		 reg.addr, val);
52 	xe_mmio_write32(mmio, reg, 0xffffffff);
53 	xe_mmio_read32(mmio, reg);
54 	xe_mmio_write32(mmio, reg, 0xffffffff);
55 	xe_mmio_read32(mmio, reg);
56 }
57 
58 /*
59  * Unmask and enable the specified interrupts.  Does not check current state,
60  * so any bits not specified here will become masked and disabled.
61  */
62 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
63 {
64 	struct xe_mmio *mmio = &tile->mmio;
65 
66 	/*
67 	 * If we're just enabling an interrupt now, it shouldn't already
68 	 * be raised in the IIR.
69 	 */
70 	assert_iir_is_zero(mmio, IIR(irqregs));
71 
72 	xe_mmio_write32(mmio, IER(irqregs), bits);
73 	xe_mmio_write32(mmio, IMR(irqregs), ~bits);
74 
75 	/* Posting read */
76 	xe_mmio_read32(mmio, IMR(irqregs));
77 }
78 
79 /* Mask and disable all interrupts. */
80 static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
81 {
82 	struct xe_mmio *mmio = &tile->mmio;
83 
84 	xe_mmio_write32(mmio, IMR(irqregs), ~0);
85 	/* Posting read */
86 	xe_mmio_read32(mmio, IMR(irqregs));
87 
88 	xe_mmio_write32(mmio, IER(irqregs), 0);
89 
90 	/* IIR can theoretically queue up two events. Be paranoid. */
91 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
92 	xe_mmio_read32(mmio, IIR(irqregs));
93 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
94 	xe_mmio_read32(mmio, IIR(irqregs));
95 }
96 
97 static u32 xelp_intr_disable(struct xe_device *xe)
98 {
99 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
100 
101 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
102 
103 	/*
104 	 * Now with master disabled, get a sample of level indications
105 	 * for this interrupt. Indications will be cleared on related acks.
106 	 * New indications can and will light up during processing,
107 	 * and will generate new interrupt after enabling master.
108 	 */
109 	return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
110 }
111 
112 static u32
113 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
114 {
115 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
116 	u32 iir;
117 
118 	if (!(master_ctl & GU_MISC_IRQ))
119 		return 0;
120 
121 	iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
122 	if (likely(iir))
123 		xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
124 
125 	return iir;
126 }
127 
128 static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
129 {
130 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
131 
132 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
133 	if (stall)
134 		xe_mmio_read32(mmio, GFX_MSTR_IRQ);
135 }
136 
137 /* Enable/unmask the HWE interrupts for a specific GT's engines. */
138 void xe_irq_enable_hwe(struct xe_gt *gt)
139 {
140 	struct xe_device *xe = gt_to_xe(gt);
141 	struct xe_mmio *mmio = &gt->mmio;
142 	u32 common_mask, val, gsc_mask = 0, heci_mask = 0,
143 	    rcs_mask = 0, bcs_mask = 0, vcs_mask = 0, vecs_mask = 0,
144 	    ccs_mask = 0;
145 
146 	if (xe_device_uses_memirq(xe))
147 		return;
148 
149 	if (xe_device_uc_enabled(xe)) {
150 		common_mask = GT_MI_USER_INTERRUPT |
151 			      GT_FLUSH_COMPLETE_INTERRUPT;
152 
153 		/* Enable Compute Walker Interrupt for non-MSIX platforms */
154 		if (GRAPHICS_VERx100(xe) >= 3511 && !xe_device_has_msix(xe)) {
155 			rcs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
156 			ccs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
157 		}
158 	} else {
159 		common_mask = GT_MI_USER_INTERRUPT |
160 			      GT_CS_MASTER_ERROR_INTERRUPT |
161 			      GT_CONTEXT_SWITCH_INTERRUPT |
162 			      GT_WAIT_SEMAPHORE_INTERRUPT;
163 	}
164 
165 	rcs_mask |= common_mask;
166 	bcs_mask |= common_mask;
167 	vcs_mask |= common_mask;
168 	vecs_mask |= common_mask;
169 	ccs_mask |= common_mask;
170 
171 	if (xe_gt_is_main_type(gt)) {
172 		/*
173 		 * For enabling the interrupts, the information about fused off
174 		 * engines doesn't matter much, but this also allows to check if
175 		 * the engine is available architecturally in the platform
176 		 */
177 		u32 ccs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
178 		u32 bcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
179 
180 		/* Enable interrupts for each engine class */
181 		xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE,
182 				REG_FIELD_PREP(ENGINE1_MASK, rcs_mask) |
183 				REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
184 		if (ccs_fuse_mask)
185 			xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE,
186 					REG_FIELD_PREP(ENGINE1_MASK, ccs_mask));
187 
188 		/* Unmask interrupts for each engine instance */
189 		val = ~REG_FIELD_PREP(ENGINE1_MASK, rcs_mask);
190 		xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, val);
191 		val = ~REG_FIELD_PREP(ENGINE1_MASK, bcs_mask);
192 		xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, val);
193 
194 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, bcs_mask) |
195 			REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
196 		if (bcs_fuse_mask & (BIT(1)|BIT(2)))
197 			xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, val);
198 		if (bcs_fuse_mask & (BIT(3)|BIT(4)))
199 			xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, val);
200 		if (bcs_fuse_mask & (BIT(5)|BIT(6)))
201 			xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, val);
202 		if (bcs_fuse_mask & (BIT(7)|BIT(8)))
203 			xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, val);
204 
205 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, ccs_mask) |
206 			REG_FIELD_PREP(ENGINE0_MASK, ccs_mask));
207 		if (ccs_fuse_mask & (BIT(0)|BIT(1)))
208 			xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, val);
209 		if (ccs_fuse_mask & (BIT(2)|BIT(3)))
210 			xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, val);
211 	}
212 
213 	if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
214 		u32 vcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
215 		u32 vecs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
216 		u32 other_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER);
217 
218 		/* Enable interrupts for each engine class */
219 		xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE,
220 				REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
221 				REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
222 
223 		/* Unmask interrupts for each engine instance */
224 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
225 			REG_FIELD_PREP(ENGINE0_MASK, vcs_mask));
226 		if (vcs_fuse_mask & (BIT(0) | BIT(1)))
227 			xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, val);
228 		if (vcs_fuse_mask & (BIT(2) | BIT(3)))
229 			xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, val);
230 		if (vcs_fuse_mask & (BIT(4) | BIT(5)))
231 			xe_mmio_write32(mmio, VCS4_VCS5_INTR_MASK, val);
232 		if (vcs_fuse_mask & (BIT(6) | BIT(7)))
233 			xe_mmio_write32(mmio, VCS6_VCS7_INTR_MASK, val);
234 
235 		val = ~(REG_FIELD_PREP(ENGINE1_MASK, vecs_mask) |
236 			REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
237 		if (vecs_fuse_mask & (BIT(0) | BIT(1)))
238 			xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, val);
239 		if (vecs_fuse_mask & (BIT(2) | BIT(3)))
240 			xe_mmio_write32(mmio, VECS2_VECS3_INTR_MASK, val);
241 
242 		/*
243 		 * the heci2 interrupt is enabled via the same register as the
244 		 * GSCCS interrupts, but it has its own mask register.
245 		 */
246 		if (other_fuse_mask) {
247 			gsc_mask = common_mask | GSC_ER_COMPLETE;
248 			heci_mask = GSC_IRQ_INTF(1);
249 		} else if (xe->info.has_heci_gscfi) {
250 			gsc_mask = GSC_IRQ_INTF(1);
251 		}
252 
253 		if (gsc_mask) {
254 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
255 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
256 		}
257 		if (heci_mask)
258 			xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
259 
260 		if (xe_pxp_is_supported(xe)) {
261 			u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
262 				       KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
263 				       KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
264 
265 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
266 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
267 		}
268 	}
269 }
270 
271 static u32
272 gt_engine_identity(struct xe_device *xe,
273 		   struct xe_mmio *mmio,
274 		   const unsigned int bank,
275 		   const unsigned int bit)
276 {
277 	u32 timeout_ts;
278 	u32 ident;
279 
280 	lockdep_assert_held(&xe->irq.lock);
281 
282 	xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
283 
284 	/*
285 	 * NB: Specs do not specify how long to spin wait,
286 	 * so we do ~100us as an educated guess.
287 	 */
288 	timeout_ts = (local_clock() >> 10) + 100;
289 	do {
290 		ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
291 	} while (!(ident & INTR_DATA_VALID) &&
292 		 !time_after32(local_clock() >> 10, timeout_ts));
293 
294 	if (unlikely(!(ident & INTR_DATA_VALID))) {
295 		drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
296 			bank, bit, ident);
297 		return 0;
298 	}
299 
300 	xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
301 
302 	return ident;
303 }
304 
305 #define   OTHER_MEDIA_GUC_INSTANCE           16
306 
307 static void
308 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
309 {
310 	if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt))
311 		return xe_guc_irq_handler(&gt->uc.guc, iir);
312 	if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
313 		return xe_guc_irq_handler(&gt->uc.guc, iir);
314 	if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
315 		return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
316 
317 	if (instance != OTHER_GUC_INSTANCE &&
318 	    instance != OTHER_MEDIA_GUC_INSTANCE) {
319 		WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
320 			  instance, iir);
321 	}
322 }
323 
324 static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
325 				    enum xe_engine_class class,
326 				    unsigned int instance)
327 {
328 	struct xe_device *xe = tile_to_xe(tile);
329 
330 	if (MEDIA_VER(xe) < 13)
331 		return tile->primary_gt;
332 
333 	switch (class) {
334 	case XE_ENGINE_CLASS_VIDEO_DECODE:
335 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
336 		return tile->media_gt;
337 	case XE_ENGINE_CLASS_OTHER:
338 		switch (instance) {
339 		case OTHER_MEDIA_GUC_INSTANCE:
340 		case OTHER_GSC_INSTANCE:
341 		case OTHER_GSC_HECI2_INSTANCE:
342 			return tile->media_gt;
343 		default:
344 			break;
345 		}
346 		fallthrough;
347 	default:
348 		return tile->primary_gt;
349 	}
350 }
351 
352 static void gt_irq_handler(struct xe_tile *tile,
353 			   u32 master_ctl, unsigned long *intr_dw,
354 			   u32 *identity)
355 {
356 	struct xe_device *xe = tile_to_xe(tile);
357 	struct xe_mmio *mmio = &tile->mmio;
358 	unsigned int bank, bit;
359 	u16 instance, intr_vec;
360 	enum xe_engine_class class;
361 	struct xe_hw_engine *hwe;
362 
363 	spin_lock(&xe->irq.lock);
364 
365 	for (bank = 0; bank < 2; bank++) {
366 		if (!(master_ctl & GT_DW_IRQ(bank)))
367 			continue;
368 
369 		intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
370 		for_each_set_bit(bit, intr_dw + bank, 32)
371 			identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
372 		xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
373 
374 		for_each_set_bit(bit, intr_dw + bank, 32) {
375 			struct xe_gt *engine_gt;
376 
377 			class = INTR_ENGINE_CLASS(identity[bit]);
378 			instance = INTR_ENGINE_INSTANCE(identity[bit]);
379 			intr_vec = INTR_ENGINE_INTR(identity[bit]);
380 
381 			engine_gt = pick_engine_gt(tile, class, instance);
382 
383 			hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
384 			if (hwe) {
385 				xe_hw_engine_handle_irq(hwe, intr_vec);
386 				continue;
387 			}
388 
389 			if (class == XE_ENGINE_CLASS_OTHER) {
390 				/*
391 				 * HECI GSCFI interrupts come from outside of GT.
392 				 * KCR irqs come from inside GT but are handled
393 				 * by the global PXP subsystem.
394 				 */
395 				if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
396 					xe_heci_gsc_irq_handler(xe, intr_vec);
397 				else if (instance == OTHER_KCR_INSTANCE)
398 					xe_pxp_irq_handler(xe, intr_vec);
399 				else
400 					gt_other_irq_handler(engine_gt, instance, intr_vec);
401 			}
402 		}
403 	}
404 
405 	spin_unlock(&xe->irq.lock);
406 }
407 
408 /*
409  * Top-level interrupt handler for Xe_LP platforms (which did not have
410  * a "master tile" interrupt register.
411  */
412 static irqreturn_t xelp_irq_handler(int irq, void *arg)
413 {
414 	struct xe_device *xe = arg;
415 	struct xe_tile *tile = xe_device_get_root_tile(xe);
416 	u32 master_ctl, gu_misc_iir;
417 	unsigned long intr_dw[2];
418 	u32 identity[32];
419 
420 	if (!atomic_read(&xe->irq.enabled))
421 		return IRQ_NONE;
422 
423 	master_ctl = xelp_intr_disable(xe);
424 	if (!master_ctl) {
425 		xelp_intr_enable(xe, false);
426 		return IRQ_NONE;
427 	}
428 
429 	gt_irq_handler(tile, master_ctl, intr_dw, identity);
430 
431 	xe_display_irq_handler(xe, master_ctl);
432 
433 	gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
434 
435 	xelp_intr_enable(xe, false);
436 
437 	xe_display_irq_enable(xe, gu_misc_iir);
438 
439 	return IRQ_HANDLED;
440 }
441 
442 static u32 dg1_intr_disable(struct xe_device *xe)
443 {
444 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
445 	u32 val;
446 
447 	/* First disable interrupts */
448 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
449 
450 	/* Get the indication levels and ack the master unit */
451 	val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
452 	if (unlikely(!val))
453 		return 0;
454 
455 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
456 
457 	return val;
458 }
459 
460 static void dg1_intr_enable(struct xe_device *xe, bool stall)
461 {
462 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
463 
464 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
465 	if (stall)
466 		xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
467 }
468 
469 /*
470  * Top-level interrupt handler for Xe_LP+ and beyond.  These platforms have
471  * a "master tile" interrupt register which must be consulted before the
472  * "graphics master" interrupt register.
473  */
474 static irqreturn_t dg1_irq_handler(int irq, void *arg)
475 {
476 	struct xe_device *xe = arg;
477 	struct xe_tile *tile;
478 	u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
479 	unsigned long intr_dw[2];
480 	u32 identity[32];
481 	u8 id;
482 
483 	/* TODO: This really shouldn't be copied+pasted */
484 
485 	if (!atomic_read(&xe->irq.enabled))
486 		return IRQ_NONE;
487 
488 	master_tile_ctl = dg1_intr_disable(xe);
489 	if (!master_tile_ctl) {
490 		dg1_intr_enable(xe, false);
491 		return IRQ_NONE;
492 	}
493 
494 	for_each_tile(tile, xe, id) {
495 		struct xe_mmio *mmio = &tile->mmio;
496 
497 		if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
498 			continue;
499 
500 		master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
501 
502 		/*
503 		 * We might be in irq handler just when PCIe DPC is initiated
504 		 * and all MMIO reads will be returned with all 1's. Ignore this
505 		 * irq as device is inaccessible.
506 		 */
507 		if (master_ctl == REG_GENMASK(31, 0)) {
508 			drm_dbg(&tile_to_xe(tile)->drm,
509 				"Ignore this IRQ as device might be in DPC containment.\n");
510 			return IRQ_HANDLED;
511 		}
512 
513 		xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
514 
515 		gt_irq_handler(tile, master_ctl, intr_dw, identity);
516 		xe_hw_error_irq_handler(tile, master_ctl);
517 
518 		/*
519 		 * Display interrupts (including display backlight operations
520 		 * that get reported as Gunit GSE) would only be hooked up to
521 		 * the primary tile.
522 		 */
523 		if (id == 0) {
524 			if (xe->info.has_heci_cscfi)
525 				xe_heci_csc_irq_handler(xe, master_ctl);
526 			xe_display_irq_handler(xe, master_ctl);
527 			xe_i2c_irq_handler(xe, master_ctl);
528 			gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
529 		}
530 	}
531 
532 	dg1_intr_enable(xe, false);
533 	xe_display_irq_enable(xe, gu_misc_iir);
534 
535 	return IRQ_HANDLED;
536 }
537 
538 static void gt_irq_reset(struct xe_tile *tile)
539 {
540 	struct xe_mmio *mmio = &tile->mmio;
541 	u32 ccs_mask = ~0;
542 	u32 bcs_mask = ~0;
543 
544 	if (tile->primary_gt) {
545 		ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
546 						       XE_ENGINE_CLASS_COMPUTE);
547 		bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
548 						       XE_ENGINE_CLASS_COPY);
549 	}
550 
551 	/* Disable RCS, BCS, VCS and VECS class engines. */
552 	xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
553 	xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
554 	if (ccs_mask)
555 		xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
556 
557 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
558 	xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK,	~0);
559 	xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK,	~0);
560 	if (bcs_mask & (BIT(1)|BIT(2)))
561 		xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
562 	if (bcs_mask & (BIT(3)|BIT(4)))
563 		xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
564 	if (bcs_mask & (BIT(5)|BIT(6)))
565 		xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
566 	if (bcs_mask & (BIT(7)|BIT(8)))
567 		xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
568 	xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK,	~0);
569 	xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK,	~0);
570 	xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK,	~0);
571 	if (ccs_mask & (BIT(0)|BIT(1)))
572 		xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
573 	if (ccs_mask & (BIT(2)|BIT(3)))
574 		xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
575 
576 	if ((tile->media_gt &&
577 	     xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
578 	    tile_to_xe(tile)->info.has_heci_gscfi) {
579 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
580 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
581 		xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
582 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
583 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
584 	}
585 
586 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
587 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK,  ~0);
588 	xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE,	 0);
589 	xe_mmio_write32(mmio, GUC_SG_INTR_MASK,		~0);
590 }
591 
592 static void xelp_irq_reset(struct xe_tile *tile)
593 {
594 	xelp_intr_disable(tile_to_xe(tile));
595 
596 	gt_irq_reset(tile);
597 
598 	if (IS_SRIOV_VF(tile_to_xe(tile)))
599 		return;
600 
601 	mask_and_disable(tile, PCU_IRQ_OFFSET);
602 }
603 
604 static void dg1_irq_reset(struct xe_tile *tile)
605 {
606 	if (xe_tile_is_root(tile))
607 		dg1_intr_disable(tile_to_xe(tile));
608 
609 	gt_irq_reset(tile);
610 
611 	if (IS_SRIOV_VF(tile_to_xe(tile)))
612 		return;
613 
614 	mask_and_disable(tile, PCU_IRQ_OFFSET);
615 }
616 
617 static void dg1_irq_reset_mstr(struct xe_tile *tile)
618 {
619 	struct xe_mmio *mmio = &tile->mmio;
620 
621 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
622 }
623 
624 static void vf_irq_reset(struct xe_device *xe)
625 {
626 	struct xe_tile *tile;
627 	unsigned int id;
628 
629 	xe_assert(xe, IS_SRIOV_VF(xe));
630 
631 	if (GRAPHICS_VERx100(xe) < 1210)
632 		xelp_intr_disable(xe);
633 	else
634 		xe_assert(xe, xe_device_has_memirq(xe));
635 
636 	for_each_tile(tile, xe, id) {
637 		if (xe_device_has_memirq(xe))
638 			xe_memirq_reset(&tile->memirq);
639 		else
640 			gt_irq_reset(tile);
641 	}
642 }
643 
644 static void xe_irq_reset(struct xe_device *xe)
645 {
646 	struct xe_tile *tile;
647 	u8 id;
648 
649 	if (IS_SRIOV_VF(xe))
650 		return vf_irq_reset(xe);
651 
652 	if (xe_device_uses_memirq(xe)) {
653 		for_each_tile(tile, xe, id)
654 			xe_memirq_reset(&tile->memirq);
655 	}
656 
657 	for_each_tile(tile, xe, id) {
658 		if (GRAPHICS_VERx100(xe) >= 1210)
659 			dg1_irq_reset(tile);
660 		else
661 			xelp_irq_reset(tile);
662 	}
663 
664 	tile = xe_device_get_root_tile(xe);
665 	mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
666 	xe_display_irq_reset(xe);
667 	xe_i2c_irq_reset(xe);
668 
669 	/*
670 	 * The tile's top-level status register should be the last one
671 	 * to be reset to avoid possible bit re-latching from lower
672 	 * level interrupts.
673 	 */
674 	if (GRAPHICS_VERx100(xe) >= 1210) {
675 		for_each_tile(tile, xe, id)
676 			dg1_irq_reset_mstr(tile);
677 	}
678 }
679 
680 static void vf_irq_postinstall(struct xe_device *xe)
681 {
682 	struct xe_tile *tile;
683 	unsigned int id;
684 
685 	for_each_tile(tile, xe, id)
686 		if (xe_device_has_memirq(xe))
687 			xe_memirq_postinstall(&tile->memirq);
688 
689 	if (GRAPHICS_VERx100(xe) < 1210)
690 		xelp_intr_enable(xe, true);
691 	else
692 		xe_assert(xe, xe_device_has_memirq(xe));
693 }
694 
695 static void xe_irq_postinstall(struct xe_device *xe)
696 {
697 	if (IS_SRIOV_VF(xe))
698 		return vf_irq_postinstall(xe);
699 
700 	if (xe_device_uses_memirq(xe)) {
701 		struct xe_tile *tile;
702 		unsigned int id;
703 
704 		for_each_tile(tile, xe, id)
705 			xe_memirq_postinstall(&tile->memirq);
706 	}
707 
708 	xe_display_irq_postinstall(xe);
709 	xe_i2c_irq_postinstall(xe);
710 
711 	/*
712 	 * ASLE backlight operations are reported via GUnit GSE interrupts
713 	 * on the root tile.
714 	 */
715 	unmask_and_enable(xe_device_get_root_tile(xe),
716 			  GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
717 
718 	/* Enable top-level interrupts */
719 	if (GRAPHICS_VERx100(xe) >= 1210)
720 		dg1_intr_enable(xe, true);
721 	else
722 		xelp_intr_enable(xe, true);
723 }
724 
725 static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
726 {
727 	struct xe_device *xe = arg;
728 	struct xe_tile *tile;
729 	unsigned int id;
730 
731 	if (!atomic_read(&xe->irq.enabled))
732 		return IRQ_NONE;
733 
734 	for_each_tile(tile, xe, id)
735 		xe_memirq_handler(&tile->memirq);
736 
737 	return IRQ_HANDLED;
738 }
739 
740 static irq_handler_t xe_irq_handler(struct xe_device *xe)
741 {
742 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
743 		return vf_mem_irq_handler;
744 
745 	if (GRAPHICS_VERx100(xe) >= 1210)
746 		return dg1_irq_handler;
747 	else
748 		return xelp_irq_handler;
749 }
750 
751 static int xe_irq_msi_request_irqs(struct xe_device *xe)
752 {
753 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
754 	irq_handler_t irq_handler;
755 	int irq, err;
756 
757 	irq_handler = xe_irq_handler(xe);
758 	if (!irq_handler) {
759 		drm_err(&xe->drm, "No supported interrupt handler");
760 		return -EINVAL;
761 	}
762 
763 	irq = pci_irq_vector(pdev, 0);
764 	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
765 	if (err < 0) {
766 		drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
767 		return err;
768 	}
769 
770 	return 0;
771 }
772 
773 static void xe_irq_msi_free(struct xe_device *xe)
774 {
775 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
776 	int irq;
777 
778 	irq = pci_irq_vector(pdev, 0);
779 	free_irq(irq, xe);
780 }
781 
782 static void irq_uninstall(void *arg)
783 {
784 	struct xe_device *xe = arg;
785 
786 	if (!atomic_xchg(&xe->irq.enabled, 0))
787 		return;
788 
789 	xe_irq_reset(xe);
790 
791 	if (xe_device_has_msix(xe))
792 		xe_irq_msix_free(xe);
793 	else
794 		xe_irq_msi_free(xe);
795 }
796 
797 int xe_irq_init(struct xe_device *xe)
798 {
799 	spin_lock_init(&xe->irq.lock);
800 
801 	return xe_irq_msix_init(xe);
802 }
803 
804 int xe_irq_install(struct xe_device *xe)
805 {
806 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
807 	unsigned int irq_flags = PCI_IRQ_MSI;
808 	int nvec = 1;
809 	int err;
810 
811 	xe_hw_error_init(xe);
812 
813 	xe_irq_reset(xe);
814 
815 	if (xe_device_has_msix(xe)) {
816 		nvec = xe->irq.msix.nvec;
817 		irq_flags = PCI_IRQ_MSIX;
818 	}
819 
820 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
821 	if (err < 0) {
822 		drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
823 		return err;
824 	}
825 
826 	err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
827 					xe_irq_msi_request_irqs(xe);
828 	if (err)
829 		return err;
830 
831 	atomic_set(&xe->irq.enabled, 1);
832 
833 	xe_irq_postinstall(xe);
834 
835 	return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
836 }
837 
838 static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
839 {
840 	synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
841 }
842 
843 void xe_irq_suspend(struct xe_device *xe)
844 {
845 	atomic_set(&xe->irq.enabled, 0); /* no new irqs */
846 
847 	/* flush irqs */
848 	if (xe_device_has_msix(xe))
849 		xe_irq_msix_synchronize_irq(xe);
850 	else
851 		xe_irq_msi_synchronize_irq(xe);
852 	xe_irq_reset(xe); /* turn irqs off */
853 }
854 
855 void xe_irq_resume(struct xe_device *xe)
856 {
857 	struct xe_gt *gt;
858 	int id;
859 
860 	/*
861 	 * lock not needed:
862 	 * 1. no irq will arrive before the postinstall
863 	 * 2. display is not yet resumed
864 	 */
865 	atomic_set(&xe->irq.enabled, 1);
866 	xe_irq_reset(xe);
867 	xe_irq_postinstall(xe); /* turn irqs on */
868 
869 	for_each_gt(gt, xe, id)
870 		xe_irq_enable_hwe(gt);
871 }
872 
873 /* MSI-X related definitions and functions below. */
874 
875 enum xe_irq_msix_static {
876 	GUC2HOST_MSIX = 0,
877 	DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
878 	/* Must be last */
879 	NUM_OF_STATIC_MSIX,
880 };
881 
882 static int xe_irq_msix_init(struct xe_device *xe)
883 {
884 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
885 	int nvec = pci_msix_vec_count(pdev);
886 
887 	if (nvec == -EINVAL)
888 		return 0;  /* MSI */
889 
890 	if (nvec < 0) {
891 		drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
892 		return nvec;
893 	}
894 
895 	xe->irq.msix.nvec = nvec;
896 	xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
897 	return 0;
898 }
899 
900 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
901 {
902 	unsigned int tile_id, gt_id;
903 	struct xe_device *xe = arg;
904 	struct xe_memirq *memirq;
905 	struct xe_hw_engine *hwe;
906 	enum xe_hw_engine_id id;
907 	struct xe_tile *tile;
908 	struct xe_gt *gt;
909 
910 	if (!atomic_read(&xe->irq.enabled))
911 		return IRQ_NONE;
912 
913 	for_each_tile(tile, xe, tile_id) {
914 		memirq = &tile->memirq;
915 		if (!memirq->bo)
916 			continue;
917 
918 		for_each_gt(gt, xe, gt_id) {
919 			if (gt->tile != tile)
920 				continue;
921 
922 			for_each_hw_engine(hwe, gt, id)
923 				xe_memirq_hwe_handler(memirq, hwe);
924 		}
925 	}
926 
927 	return IRQ_HANDLED;
928 }
929 
930 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
931 				    bool dynamic_msix, u16 *msix)
932 {
933 	struct xa_limit limit;
934 	int ret;
935 	u32 id;
936 
937 	limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
938 				 XA_LIMIT(*msix, *msix);
939 	ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
940 	if (ret)
941 		return ret;
942 
943 	if (dynamic_msix)
944 		*msix = id;
945 
946 	return 0;
947 }
948 
949 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
950 {
951 	xa_erase(&xe->irq.msix.indexes, msix);
952 }
953 
954 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
955 					    void *irq_buf, const char *name, u16 msix)
956 {
957 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
958 	int ret, irq;
959 
960 	irq = pci_irq_vector(pdev, msix);
961 	if (irq < 0)
962 		return irq;
963 
964 	ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
965 	if (ret < 0)
966 		return ret;
967 
968 	return 0;
969 }
970 
971 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
972 			    const char *name, bool dynamic_msix, u16 *msix)
973 {
974 	int ret;
975 
976 	ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
977 	if (ret)
978 		return ret;
979 
980 	ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
981 	if (ret) {
982 		drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
983 		xe_irq_msix_release_vector(xe, *msix);
984 		return ret;
985 	}
986 
987 	return 0;
988 }
989 
990 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
991 {
992 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
993 	int irq;
994 	void *irq_buf;
995 
996 	irq_buf = xa_load(&xe->irq.msix.indexes, msix);
997 	if (!irq_buf)
998 		return;
999 
1000 	irq = pci_irq_vector(pdev, msix);
1001 	if (irq < 0) {
1002 		drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
1003 		return;
1004 	}
1005 
1006 	free_irq(irq, irq_buf);
1007 	xe_irq_msix_release_vector(xe, msix);
1008 }
1009 
1010 int xe_irq_msix_request_irqs(struct xe_device *xe)
1011 {
1012 	int err;
1013 	u16 msix;
1014 
1015 	msix = GUC2HOST_MSIX;
1016 	err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
1017 				      DRIVER_NAME "-guc2host", false, &msix);
1018 	if (err)
1019 		return err;
1020 
1021 	msix = DEFAULT_MSIX;
1022 	err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
1023 				      DRIVER_NAME "-default-msix", false, &msix);
1024 	if (err) {
1025 		xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
1026 		return err;
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 void xe_irq_msix_free(struct xe_device *xe)
1033 {
1034 	unsigned long msix;
1035 	u32 *dummy;
1036 
1037 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1038 		xe_irq_msix_free_irq(xe, msix);
1039 	xa_destroy(&xe->irq.msix.indexes);
1040 }
1041 
1042 void xe_irq_msix_synchronize_irq(struct xe_device *xe)
1043 {
1044 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
1045 	unsigned long msix;
1046 	u32 *dummy;
1047 
1048 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1049 		synchronize_irq(pci_irq_vector(pdev, msix));
1050 }
1051