xref: /linux/drivers/gpu/drm/xe/xe_irq.c (revision df2e3152f1cb798ed8ffa7e488c50261e6dc50e3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_irq.h"
7 
8 #include <linux/sched/clock.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "display/xe_display.h"
13 #include "regs/xe_guc_regs.h"
14 #include "regs/xe_irq_regs.h"
15 #include "xe_device.h"
16 #include "xe_drv.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_guc.h"
20 #include "xe_hw_engine.h"
21 #include "xe_memirq.h"
22 #include "xe_mmio.h"
23 #include "xe_sriov.h"
24 
25 /*
26  * Interrupt registers for a unit are always consecutive and ordered
27  * ISR, IMR, IIR, IER.
28  */
29 #define IMR(offset)				XE_REG(offset + 0x4)
30 #define IIR(offset)				XE_REG(offset + 0x8)
31 #define IER(offset)				XE_REG(offset + 0xc)
32 
33 static int xe_irq_msix_init(struct xe_device *xe);
34 static void xe_irq_msix_free(struct xe_device *xe);
35 static int xe_irq_msix_request_irqs(struct xe_device *xe);
36 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
37 
38 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
39 {
40 	u32 val = xe_mmio_read32(mmio, reg);
41 
42 	if (val == 0)
43 		return;
44 
45 	drm_WARN(&mmio->tile->xe->drm, 1,
46 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
47 		 reg.addr, val);
48 	xe_mmio_write32(mmio, reg, 0xffffffff);
49 	xe_mmio_read32(mmio, reg);
50 	xe_mmio_write32(mmio, reg, 0xffffffff);
51 	xe_mmio_read32(mmio, reg);
52 }
53 
54 /*
55  * Unmask and enable the specified interrupts.  Does not check current state,
56  * so any bits not specified here will become masked and disabled.
57  */
58 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
59 {
60 	struct xe_mmio *mmio = &tile->mmio;
61 
62 	/*
63 	 * If we're just enabling an interrupt now, it shouldn't already
64 	 * be raised in the IIR.
65 	 */
66 	assert_iir_is_zero(mmio, IIR(irqregs));
67 
68 	xe_mmio_write32(mmio, IER(irqregs), bits);
69 	xe_mmio_write32(mmio, IMR(irqregs), ~bits);
70 
71 	/* Posting read */
72 	xe_mmio_read32(mmio, IMR(irqregs));
73 }
74 
75 /* Mask and disable all interrupts. */
76 static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
77 {
78 	struct xe_mmio *mmio = &tile->mmio;
79 
80 	xe_mmio_write32(mmio, IMR(irqregs), ~0);
81 	/* Posting read */
82 	xe_mmio_read32(mmio, IMR(irqregs));
83 
84 	xe_mmio_write32(mmio, IER(irqregs), 0);
85 
86 	/* IIR can theoretically queue up two events. Be paranoid. */
87 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
88 	xe_mmio_read32(mmio, IIR(irqregs));
89 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
90 	xe_mmio_read32(mmio, IIR(irqregs));
91 }
92 
93 static u32 xelp_intr_disable(struct xe_device *xe)
94 {
95 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
96 
97 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
98 
99 	/*
100 	 * Now with master disabled, get a sample of level indications
101 	 * for this interrupt. Indications will be cleared on related acks.
102 	 * New indications can and will light up during processing,
103 	 * and will generate new interrupt after enabling master.
104 	 */
105 	return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
106 }
107 
108 static u32
109 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
110 {
111 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
112 	u32 iir;
113 
114 	if (!(master_ctl & GU_MISC_IRQ))
115 		return 0;
116 
117 	iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
118 	if (likely(iir))
119 		xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
120 
121 	return iir;
122 }
123 
124 static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
125 {
126 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
127 
128 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
129 	if (stall)
130 		xe_mmio_read32(mmio, GFX_MSTR_IRQ);
131 }
132 
133 /* Enable/unmask the HWE interrupts for a specific GT's engines. */
134 void xe_irq_enable_hwe(struct xe_gt *gt)
135 {
136 	struct xe_device *xe = gt_to_xe(gt);
137 	struct xe_mmio *mmio = &gt->mmio;
138 	u32 ccs_mask, bcs_mask;
139 	u32 irqs, dmask, smask;
140 	u32 gsc_mask = 0;
141 	u32 heci_mask = 0;
142 
143 	if (xe_device_uses_memirq(xe))
144 		return;
145 
146 	if (xe_device_uc_enabled(xe)) {
147 		irqs = GT_RENDER_USER_INTERRUPT |
148 			GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
149 	} else {
150 		irqs = GT_RENDER_USER_INTERRUPT |
151 		       GT_CS_MASTER_ERROR_INTERRUPT |
152 		       GT_CONTEXT_SWITCH_INTERRUPT |
153 		       GT_WAIT_SEMAPHORE_INTERRUPT;
154 	}
155 
156 	ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
157 	bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
158 
159 	dmask = irqs << 16 | irqs;
160 	smask = irqs << 16;
161 
162 	if (!xe_gt_is_media_type(gt)) {
163 		/* Enable interrupts for each engine class */
164 		xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
165 		if (ccs_mask)
166 			xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
167 
168 		/* Unmask interrupts for each engine instance */
169 		xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
170 		xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
171 		if (bcs_mask & (BIT(1)|BIT(2)))
172 			xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
173 		if (bcs_mask & (BIT(3)|BIT(4)))
174 			xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
175 		if (bcs_mask & (BIT(5)|BIT(6)))
176 			xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
177 		if (bcs_mask & (BIT(7)|BIT(8)))
178 			xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
179 		if (ccs_mask & (BIT(0)|BIT(1)))
180 			xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
181 		if (ccs_mask & (BIT(2)|BIT(3)))
182 			xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
183 	}
184 
185 	if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
186 		/* Enable interrupts for each engine class */
187 		xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
188 
189 		/* Unmask interrupts for each engine instance */
190 		xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
191 		xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
192 		xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
193 
194 		/*
195 		 * the heci2 interrupt is enabled via the same register as the
196 		 * GSCCS interrupts, but it has its own mask register.
197 		 */
198 		if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
199 			gsc_mask = irqs | GSC_ER_COMPLETE;
200 			heci_mask = GSC_IRQ_INTF(1);
201 		} else if (xe->info.has_heci_gscfi) {
202 			gsc_mask = GSC_IRQ_INTF(1);
203 		}
204 
205 		if (gsc_mask) {
206 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
207 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
208 		}
209 		if (heci_mask)
210 			xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
211 	}
212 }
213 
214 static u32
215 gt_engine_identity(struct xe_device *xe,
216 		   struct xe_mmio *mmio,
217 		   const unsigned int bank,
218 		   const unsigned int bit)
219 {
220 	u32 timeout_ts;
221 	u32 ident;
222 
223 	lockdep_assert_held(&xe->irq.lock);
224 
225 	xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
226 
227 	/*
228 	 * NB: Specs do not specify how long to spin wait,
229 	 * so we do ~100us as an educated guess.
230 	 */
231 	timeout_ts = (local_clock() >> 10) + 100;
232 	do {
233 		ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
234 	} while (!(ident & INTR_DATA_VALID) &&
235 		 !time_after32(local_clock() >> 10, timeout_ts));
236 
237 	if (unlikely(!(ident & INTR_DATA_VALID))) {
238 		drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
239 			bank, bit, ident);
240 		return 0;
241 	}
242 
243 	xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
244 
245 	return ident;
246 }
247 
248 #define   OTHER_MEDIA_GUC_INSTANCE           16
249 
250 static void
251 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
252 {
253 	if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
254 		return xe_guc_irq_handler(&gt->uc.guc, iir);
255 	if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
256 		return xe_guc_irq_handler(&gt->uc.guc, iir);
257 	if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
258 		return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
259 
260 	if (instance != OTHER_GUC_INSTANCE &&
261 	    instance != OTHER_MEDIA_GUC_INSTANCE) {
262 		WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
263 			  instance, iir);
264 	}
265 }
266 
267 static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
268 				    enum xe_engine_class class,
269 				    unsigned int instance)
270 {
271 	struct xe_device *xe = tile_to_xe(tile);
272 
273 	if (MEDIA_VER(xe) < 13)
274 		return tile->primary_gt;
275 
276 	switch (class) {
277 	case XE_ENGINE_CLASS_VIDEO_DECODE:
278 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
279 		return tile->media_gt;
280 	case XE_ENGINE_CLASS_OTHER:
281 		switch (instance) {
282 		case OTHER_MEDIA_GUC_INSTANCE:
283 		case OTHER_GSC_INSTANCE:
284 		case OTHER_GSC_HECI2_INSTANCE:
285 			return tile->media_gt;
286 		default:
287 			break;
288 		}
289 		fallthrough;
290 	default:
291 		return tile->primary_gt;
292 	}
293 }
294 
295 static void gt_irq_handler(struct xe_tile *tile,
296 			   u32 master_ctl, unsigned long *intr_dw,
297 			   u32 *identity)
298 {
299 	struct xe_device *xe = tile_to_xe(tile);
300 	struct xe_mmio *mmio = &tile->mmio;
301 	unsigned int bank, bit;
302 	u16 instance, intr_vec;
303 	enum xe_engine_class class;
304 	struct xe_hw_engine *hwe;
305 
306 	spin_lock(&xe->irq.lock);
307 
308 	for (bank = 0; bank < 2; bank++) {
309 		if (!(master_ctl & GT_DW_IRQ(bank)))
310 			continue;
311 
312 		intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
313 		for_each_set_bit(bit, intr_dw + bank, 32)
314 			identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
315 		xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
316 
317 		for_each_set_bit(bit, intr_dw + bank, 32) {
318 			struct xe_gt *engine_gt;
319 
320 			class = INTR_ENGINE_CLASS(identity[bit]);
321 			instance = INTR_ENGINE_INSTANCE(identity[bit]);
322 			intr_vec = INTR_ENGINE_INTR(identity[bit]);
323 
324 			engine_gt = pick_engine_gt(tile, class, instance);
325 
326 			hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
327 			if (hwe) {
328 				xe_hw_engine_handle_irq(hwe, intr_vec);
329 				continue;
330 			}
331 
332 			if (class == XE_ENGINE_CLASS_OTHER) {
333 				/* HECI GSCFI interrupts come from outside of GT */
334 				if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
335 					xe_heci_gsc_irq_handler(xe, intr_vec);
336 				else
337 					gt_other_irq_handler(engine_gt, instance, intr_vec);
338 			}
339 		}
340 	}
341 
342 	spin_unlock(&xe->irq.lock);
343 }
344 
345 /*
346  * Top-level interrupt handler for Xe_LP platforms (which did not have
347  * a "master tile" interrupt register.
348  */
349 static irqreturn_t xelp_irq_handler(int irq, void *arg)
350 {
351 	struct xe_device *xe = arg;
352 	struct xe_tile *tile = xe_device_get_root_tile(xe);
353 	u32 master_ctl, gu_misc_iir;
354 	unsigned long intr_dw[2];
355 	u32 identity[32];
356 
357 	if (!atomic_read(&xe->irq.enabled))
358 		return IRQ_NONE;
359 
360 	master_ctl = xelp_intr_disable(xe);
361 	if (!master_ctl) {
362 		xelp_intr_enable(xe, false);
363 		return IRQ_NONE;
364 	}
365 
366 	gt_irq_handler(tile, master_ctl, intr_dw, identity);
367 
368 	xe_display_irq_handler(xe, master_ctl);
369 
370 	gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
371 
372 	xelp_intr_enable(xe, false);
373 
374 	xe_display_irq_enable(xe, gu_misc_iir);
375 
376 	return IRQ_HANDLED;
377 }
378 
379 static u32 dg1_intr_disable(struct xe_device *xe)
380 {
381 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
382 	u32 val;
383 
384 	/* First disable interrupts */
385 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
386 
387 	/* Get the indication levels and ack the master unit */
388 	val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
389 	if (unlikely(!val))
390 		return 0;
391 
392 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
393 
394 	return val;
395 }
396 
397 static void dg1_intr_enable(struct xe_device *xe, bool stall)
398 {
399 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
400 
401 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
402 	if (stall)
403 		xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
404 }
405 
406 /*
407  * Top-level interrupt handler for Xe_LP+ and beyond.  These platforms have
408  * a "master tile" interrupt register which must be consulted before the
409  * "graphics master" interrupt register.
410  */
411 static irqreturn_t dg1_irq_handler(int irq, void *arg)
412 {
413 	struct xe_device *xe = arg;
414 	struct xe_tile *tile;
415 	u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
416 	unsigned long intr_dw[2];
417 	u32 identity[32];
418 	u8 id;
419 
420 	/* TODO: This really shouldn't be copied+pasted */
421 
422 	if (!atomic_read(&xe->irq.enabled))
423 		return IRQ_NONE;
424 
425 	master_tile_ctl = dg1_intr_disable(xe);
426 	if (!master_tile_ctl) {
427 		dg1_intr_enable(xe, false);
428 		return IRQ_NONE;
429 	}
430 
431 	for_each_tile(tile, xe, id) {
432 		struct xe_mmio *mmio = &tile->mmio;
433 
434 		if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
435 			continue;
436 
437 		master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
438 
439 		/*
440 		 * We might be in irq handler just when PCIe DPC is initiated
441 		 * and all MMIO reads will be returned with all 1's. Ignore this
442 		 * irq as device is inaccessible.
443 		 */
444 		if (master_ctl == REG_GENMASK(31, 0)) {
445 			drm_dbg(&tile_to_xe(tile)->drm,
446 				"Ignore this IRQ as device might be in DPC containment.\n");
447 			return IRQ_HANDLED;
448 		}
449 
450 		xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
451 
452 		gt_irq_handler(tile, master_ctl, intr_dw, identity);
453 
454 		/*
455 		 * Display interrupts (including display backlight operations
456 		 * that get reported as Gunit GSE) would only be hooked up to
457 		 * the primary tile.
458 		 */
459 		if (id == 0) {
460 			if (xe->info.has_heci_cscfi)
461 				xe_heci_csc_irq_handler(xe, master_ctl);
462 			xe_display_irq_handler(xe, master_ctl);
463 			gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
464 		}
465 	}
466 
467 	dg1_intr_enable(xe, false);
468 	xe_display_irq_enable(xe, gu_misc_iir);
469 
470 	return IRQ_HANDLED;
471 }
472 
473 static void gt_irq_reset(struct xe_tile *tile)
474 {
475 	struct xe_mmio *mmio = &tile->mmio;
476 
477 	u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
478 						   XE_ENGINE_CLASS_COMPUTE);
479 	u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
480 						   XE_ENGINE_CLASS_COPY);
481 
482 	/* Disable RCS, BCS, VCS and VECS class engines. */
483 	xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
484 	xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
485 	if (ccs_mask)
486 		xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
487 
488 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
489 	xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK,	~0);
490 	xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK,	~0);
491 	if (bcs_mask & (BIT(1)|BIT(2)))
492 		xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
493 	if (bcs_mask & (BIT(3)|BIT(4)))
494 		xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
495 	if (bcs_mask & (BIT(5)|BIT(6)))
496 		xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
497 	if (bcs_mask & (BIT(7)|BIT(8)))
498 		xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
499 	xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK,	~0);
500 	xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK,	~0);
501 	xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK,	~0);
502 	if (ccs_mask & (BIT(0)|BIT(1)))
503 		xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
504 	if (ccs_mask & (BIT(2)|BIT(3)))
505 		xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
506 
507 	if ((tile->media_gt &&
508 	     xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
509 	    tile_to_xe(tile)->info.has_heci_gscfi) {
510 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
511 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
512 		xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
513 	}
514 
515 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
516 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK,  ~0);
517 	xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE,	 0);
518 	xe_mmio_write32(mmio, GUC_SG_INTR_MASK,		~0);
519 }
520 
521 static void xelp_irq_reset(struct xe_tile *tile)
522 {
523 	xelp_intr_disable(tile_to_xe(tile));
524 
525 	gt_irq_reset(tile);
526 
527 	if (IS_SRIOV_VF(tile_to_xe(tile)))
528 		return;
529 
530 	mask_and_disable(tile, PCU_IRQ_OFFSET);
531 }
532 
533 static void dg1_irq_reset(struct xe_tile *tile)
534 {
535 	if (tile->id == 0)
536 		dg1_intr_disable(tile_to_xe(tile));
537 
538 	gt_irq_reset(tile);
539 
540 	if (IS_SRIOV_VF(tile_to_xe(tile)))
541 		return;
542 
543 	mask_and_disable(tile, PCU_IRQ_OFFSET);
544 }
545 
546 static void dg1_irq_reset_mstr(struct xe_tile *tile)
547 {
548 	struct xe_mmio *mmio = &tile->mmio;
549 
550 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
551 }
552 
553 static void vf_irq_reset(struct xe_device *xe)
554 {
555 	struct xe_tile *tile;
556 	unsigned int id;
557 
558 	xe_assert(xe, IS_SRIOV_VF(xe));
559 
560 	if (GRAPHICS_VERx100(xe) < 1210)
561 		xelp_intr_disable(xe);
562 	else
563 		xe_assert(xe, xe_device_has_memirq(xe));
564 
565 	for_each_tile(tile, xe, id) {
566 		if (xe_device_has_memirq(xe))
567 			xe_memirq_reset(&tile->memirq);
568 		else
569 			gt_irq_reset(tile);
570 	}
571 }
572 
573 static void xe_irq_reset(struct xe_device *xe)
574 {
575 	struct xe_tile *tile;
576 	u8 id;
577 
578 	if (IS_SRIOV_VF(xe))
579 		return vf_irq_reset(xe);
580 
581 	if (xe_device_uses_memirq(xe)) {
582 		for_each_tile(tile, xe, id)
583 			xe_memirq_reset(&tile->memirq);
584 	}
585 
586 	for_each_tile(tile, xe, id) {
587 		if (GRAPHICS_VERx100(xe) >= 1210)
588 			dg1_irq_reset(tile);
589 		else
590 			xelp_irq_reset(tile);
591 	}
592 
593 	tile = xe_device_get_root_tile(xe);
594 	mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
595 	xe_display_irq_reset(xe);
596 
597 	/*
598 	 * The tile's top-level status register should be the last one
599 	 * to be reset to avoid possible bit re-latching from lower
600 	 * level interrupts.
601 	 */
602 	if (GRAPHICS_VERx100(xe) >= 1210) {
603 		for_each_tile(tile, xe, id)
604 			dg1_irq_reset_mstr(tile);
605 	}
606 }
607 
608 static void vf_irq_postinstall(struct xe_device *xe)
609 {
610 	struct xe_tile *tile;
611 	unsigned int id;
612 
613 	for_each_tile(tile, xe, id)
614 		if (xe_device_has_memirq(xe))
615 			xe_memirq_postinstall(&tile->memirq);
616 
617 	if (GRAPHICS_VERx100(xe) < 1210)
618 		xelp_intr_enable(xe, true);
619 	else
620 		xe_assert(xe, xe_device_has_memirq(xe));
621 }
622 
623 static void xe_irq_postinstall(struct xe_device *xe)
624 {
625 	if (IS_SRIOV_VF(xe))
626 		return vf_irq_postinstall(xe);
627 
628 	if (xe_device_uses_memirq(xe)) {
629 		struct xe_tile *tile;
630 		unsigned int id;
631 
632 		for_each_tile(tile, xe, id)
633 			xe_memirq_postinstall(&tile->memirq);
634 	}
635 
636 	xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
637 
638 	/*
639 	 * ASLE backlight operations are reported via GUnit GSE interrupts
640 	 * on the root tile.
641 	 */
642 	unmask_and_enable(xe_device_get_root_tile(xe),
643 			  GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
644 
645 	/* Enable top-level interrupts */
646 	if (GRAPHICS_VERx100(xe) >= 1210)
647 		dg1_intr_enable(xe, true);
648 	else
649 		xelp_intr_enable(xe, true);
650 }
651 
652 static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
653 {
654 	struct xe_device *xe = arg;
655 	struct xe_tile *tile;
656 	unsigned int id;
657 
658 	if (!atomic_read(&xe->irq.enabled))
659 		return IRQ_NONE;
660 
661 	for_each_tile(tile, xe, id)
662 		xe_memirq_handler(&tile->memirq);
663 
664 	return IRQ_HANDLED;
665 }
666 
667 static irq_handler_t xe_irq_handler(struct xe_device *xe)
668 {
669 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
670 		return vf_mem_irq_handler;
671 
672 	if (GRAPHICS_VERx100(xe) >= 1210)
673 		return dg1_irq_handler;
674 	else
675 		return xelp_irq_handler;
676 }
677 
678 static int xe_irq_msi_request_irqs(struct xe_device *xe)
679 {
680 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
681 	irq_handler_t irq_handler;
682 	int irq, err;
683 
684 	irq_handler = xe_irq_handler(xe);
685 	if (!irq_handler) {
686 		drm_err(&xe->drm, "No supported interrupt handler");
687 		return -EINVAL;
688 	}
689 
690 	irq = pci_irq_vector(pdev, 0);
691 	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
692 	if (err < 0) {
693 		drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
694 		return err;
695 	}
696 
697 	return 0;
698 }
699 
700 static void xe_irq_msi_free(struct xe_device *xe)
701 {
702 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
703 	int irq;
704 
705 	irq = pci_irq_vector(pdev, 0);
706 	free_irq(irq, xe);
707 }
708 
709 static void irq_uninstall(void *arg)
710 {
711 	struct xe_device *xe = arg;
712 
713 	if (!atomic_xchg(&xe->irq.enabled, 0))
714 		return;
715 
716 	xe_irq_reset(xe);
717 
718 	if (xe_device_has_msix(xe))
719 		xe_irq_msix_free(xe);
720 	else
721 		xe_irq_msi_free(xe);
722 }
723 
724 int xe_irq_init(struct xe_device *xe)
725 {
726 	spin_lock_init(&xe->irq.lock);
727 
728 	return xe_irq_msix_init(xe);
729 }
730 
731 int xe_irq_install(struct xe_device *xe)
732 {
733 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
734 	unsigned int irq_flags = PCI_IRQ_MSI;
735 	int nvec = 1;
736 	int err;
737 
738 	xe_irq_reset(xe);
739 
740 	if (xe_device_has_msix(xe)) {
741 		nvec = xe->irq.msix.nvec;
742 		irq_flags = PCI_IRQ_MSIX;
743 	}
744 
745 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
746 	if (err < 0) {
747 		drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
748 		return err;
749 	}
750 
751 	err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
752 					xe_irq_msi_request_irqs(xe);
753 	if (err)
754 		return err;
755 
756 	atomic_set(&xe->irq.enabled, 1);
757 
758 	xe_irq_postinstall(xe);
759 
760 	err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
761 	if (err)
762 		goto free_irq_handler;
763 
764 	return 0;
765 
766 free_irq_handler:
767 	if (xe_device_has_msix(xe))
768 		xe_irq_msix_free(xe);
769 	else
770 		xe_irq_msi_free(xe);
771 
772 	return err;
773 }
774 
775 static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
776 {
777 	synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
778 }
779 
780 void xe_irq_suspend(struct xe_device *xe)
781 {
782 	atomic_set(&xe->irq.enabled, 0); /* no new irqs */
783 
784 	/* flush irqs */
785 	if (xe_device_has_msix(xe))
786 		xe_irq_msix_synchronize_irq(xe);
787 	else
788 		xe_irq_msi_synchronize_irq(xe);
789 	xe_irq_reset(xe); /* turn irqs off */
790 }
791 
792 void xe_irq_resume(struct xe_device *xe)
793 {
794 	struct xe_gt *gt;
795 	int id;
796 
797 	/*
798 	 * lock not needed:
799 	 * 1. no irq will arrive before the postinstall
800 	 * 2. display is not yet resumed
801 	 */
802 	atomic_set(&xe->irq.enabled, 1);
803 	xe_irq_reset(xe);
804 	xe_irq_postinstall(xe); /* turn irqs on */
805 
806 	for_each_gt(gt, xe, id)
807 		xe_irq_enable_hwe(gt);
808 }
809 
810 /* MSI-X related definitions and functions below. */
811 
812 enum xe_irq_msix_static {
813 	GUC2HOST_MSIX = 0,
814 	DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
815 	/* Must be last */
816 	NUM_OF_STATIC_MSIX,
817 };
818 
819 static int xe_irq_msix_init(struct xe_device *xe)
820 {
821 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
822 	int nvec = pci_msix_vec_count(pdev);
823 
824 	if (nvec == -EINVAL)
825 		return 0;  /* MSI */
826 
827 	if (nvec < 0) {
828 		drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
829 		return nvec;
830 	}
831 
832 	xe->irq.msix.nvec = nvec;
833 	xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
834 	return 0;
835 }
836 
837 static irqreturn_t guc2host_irq_handler(int irq, void *arg)
838 {
839 	struct xe_device *xe = arg;
840 	struct xe_tile *tile;
841 	u8 id;
842 
843 	if (!atomic_read(&xe->irq.enabled))
844 		return IRQ_NONE;
845 
846 	for_each_tile(tile, xe, id)
847 		xe_guc_irq_handler(&tile->primary_gt->uc.guc,
848 				   GUC_INTR_GUC2HOST);
849 
850 	return IRQ_HANDLED;
851 }
852 
853 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
854 {
855 	unsigned int tile_id, gt_id;
856 	struct xe_device *xe = arg;
857 	struct xe_memirq *memirq;
858 	struct xe_hw_engine *hwe;
859 	enum xe_hw_engine_id id;
860 	struct xe_tile *tile;
861 	struct xe_gt *gt;
862 
863 	if (!atomic_read(&xe->irq.enabled))
864 		return IRQ_NONE;
865 
866 	for_each_tile(tile, xe, tile_id) {
867 		memirq = &tile->memirq;
868 		if (!memirq->bo)
869 			continue;
870 
871 		for_each_gt(gt, xe, gt_id) {
872 			if (gt->tile != tile)
873 				continue;
874 
875 			for_each_hw_engine(hwe, gt, id)
876 				xe_memirq_hwe_handler(memirq, hwe);
877 		}
878 	}
879 
880 	return IRQ_HANDLED;
881 }
882 
883 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
884 				    bool dynamic_msix, u16 *msix)
885 {
886 	struct xa_limit limit;
887 	int ret;
888 	u32 id;
889 
890 	limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
891 				 XA_LIMIT(*msix, *msix);
892 	ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
893 	if (ret)
894 		return ret;
895 
896 	if (dynamic_msix)
897 		*msix = id;
898 
899 	return 0;
900 }
901 
902 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
903 {
904 	xa_erase(&xe->irq.msix.indexes, msix);
905 }
906 
907 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
908 					    void *irq_buf, const char *name, u16 msix)
909 {
910 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
911 	int ret, irq;
912 
913 	irq = pci_irq_vector(pdev, msix);
914 	if (irq < 0)
915 		return irq;
916 
917 	ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
918 	if (ret < 0)
919 		return ret;
920 
921 	return 0;
922 }
923 
924 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
925 			    const char *name, bool dynamic_msix, u16 *msix)
926 {
927 	int ret;
928 
929 	ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
930 	if (ret)
931 		return ret;
932 
933 	ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
934 	if (ret) {
935 		drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
936 		xe_irq_msix_release_vector(xe, *msix);
937 		return ret;
938 	}
939 
940 	return 0;
941 }
942 
943 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
944 {
945 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
946 	int irq;
947 	void *irq_buf;
948 
949 	irq_buf = xa_load(&xe->irq.msix.indexes, msix);
950 	if (!irq_buf)
951 		return;
952 
953 	irq = pci_irq_vector(pdev, msix);
954 	if (irq < 0) {
955 		drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
956 		return;
957 	}
958 
959 	free_irq(irq, irq_buf);
960 	xe_irq_msix_release_vector(xe, msix);
961 }
962 
963 int xe_irq_msix_request_irqs(struct xe_device *xe)
964 {
965 	int err;
966 	u16 msix;
967 
968 	msix = GUC2HOST_MSIX;
969 	err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
970 				      DRIVER_NAME "-guc2host", false, &msix);
971 	if (err)
972 		return err;
973 
974 	msix = DEFAULT_MSIX;
975 	err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
976 				      DRIVER_NAME "-default-msix", false, &msix);
977 	if (err) {
978 		xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
979 		return err;
980 	}
981 
982 	return 0;
983 }
984 
985 void xe_irq_msix_free(struct xe_device *xe)
986 {
987 	unsigned long msix;
988 	u32 *dummy;
989 
990 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
991 		xe_irq_msix_free_irq(xe, msix);
992 	xa_destroy(&xe->irq.msix.indexes);
993 }
994 
995 void xe_irq_msix_synchronize_irq(struct xe_device *xe)
996 {
997 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
998 	unsigned long msix;
999 	u32 *dummy;
1000 
1001 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1002 		synchronize_irq(pci_irq_vector(pdev, msix));
1003 }
1004