xref: /linux/drivers/gpu/drm/xe/xe_irq.c (revision d7b618bc41ee3d44c070212dff93949702ede997)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_irq.h"
7 
8 #include <linux/sched/clock.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "display/xe_display.h"
13 #include "regs/xe_guc_regs.h"
14 #include "regs/xe_irq_regs.h"
15 #include "xe_device.h"
16 #include "xe_drv.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_guc.h"
20 #include "xe_hw_engine.h"
21 #include "xe_i2c.h"
22 #include "xe_memirq.h"
23 #include "xe_mmio.h"
24 #include "xe_pxp.h"
25 #include "xe_sriov.h"
26 
27 /*
28  * Interrupt registers for a unit are always consecutive and ordered
29  * ISR, IMR, IIR, IER.
30  */
31 #define IMR(offset)				XE_REG(offset + 0x4)
32 #define IIR(offset)				XE_REG(offset + 0x8)
33 #define IER(offset)				XE_REG(offset + 0xc)
34 
35 static int xe_irq_msix_init(struct xe_device *xe);
36 static void xe_irq_msix_free(struct xe_device *xe);
37 static int xe_irq_msix_request_irqs(struct xe_device *xe);
38 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
39 
40 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
41 {
42 	u32 val = xe_mmio_read32(mmio, reg);
43 
44 	if (val == 0)
45 		return;
46 
47 	drm_WARN(&mmio->tile->xe->drm, 1,
48 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
49 		 reg.addr, val);
50 	xe_mmio_write32(mmio, reg, 0xffffffff);
51 	xe_mmio_read32(mmio, reg);
52 	xe_mmio_write32(mmio, reg, 0xffffffff);
53 	xe_mmio_read32(mmio, reg);
54 }
55 
56 /*
57  * Unmask and enable the specified interrupts.  Does not check current state,
58  * so any bits not specified here will become masked and disabled.
59  */
60 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
61 {
62 	struct xe_mmio *mmio = &tile->mmio;
63 
64 	/*
65 	 * If we're just enabling an interrupt now, it shouldn't already
66 	 * be raised in the IIR.
67 	 */
68 	assert_iir_is_zero(mmio, IIR(irqregs));
69 
70 	xe_mmio_write32(mmio, IER(irqregs), bits);
71 	xe_mmio_write32(mmio, IMR(irqregs), ~bits);
72 
73 	/* Posting read */
74 	xe_mmio_read32(mmio, IMR(irqregs));
75 }
76 
77 /* Mask and disable all interrupts. */
78 static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
79 {
80 	struct xe_mmio *mmio = &tile->mmio;
81 
82 	xe_mmio_write32(mmio, IMR(irqregs), ~0);
83 	/* Posting read */
84 	xe_mmio_read32(mmio, IMR(irqregs));
85 
86 	xe_mmio_write32(mmio, IER(irqregs), 0);
87 
88 	/* IIR can theoretically queue up two events. Be paranoid. */
89 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
90 	xe_mmio_read32(mmio, IIR(irqregs));
91 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
92 	xe_mmio_read32(mmio, IIR(irqregs));
93 }
94 
95 static u32 xelp_intr_disable(struct xe_device *xe)
96 {
97 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
98 
99 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
100 
101 	/*
102 	 * Now with master disabled, get a sample of level indications
103 	 * for this interrupt. Indications will be cleared on related acks.
104 	 * New indications can and will light up during processing,
105 	 * and will generate new interrupt after enabling master.
106 	 */
107 	return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
108 }
109 
110 static u32
111 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
112 {
113 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
114 	u32 iir;
115 
116 	if (!(master_ctl & GU_MISC_IRQ))
117 		return 0;
118 
119 	iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
120 	if (likely(iir))
121 		xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
122 
123 	return iir;
124 }
125 
126 static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
127 {
128 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
129 
130 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
131 	if (stall)
132 		xe_mmio_read32(mmio, GFX_MSTR_IRQ);
133 }
134 
135 /* Enable/unmask the HWE interrupts for a specific GT's engines. */
136 void xe_irq_enable_hwe(struct xe_gt *gt)
137 {
138 	struct xe_device *xe = gt_to_xe(gt);
139 	struct xe_mmio *mmio = &gt->mmio;
140 	u32 ccs_mask, bcs_mask;
141 	u32 irqs, dmask, smask;
142 	u32 gsc_mask = 0;
143 	u32 heci_mask = 0;
144 
145 	if (xe_device_uses_memirq(xe))
146 		return;
147 
148 	if (xe_device_uc_enabled(xe)) {
149 		irqs = GT_RENDER_USER_INTERRUPT |
150 			GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
151 	} else {
152 		irqs = GT_RENDER_USER_INTERRUPT |
153 		       GT_CS_MASTER_ERROR_INTERRUPT |
154 		       GT_CONTEXT_SWITCH_INTERRUPT |
155 		       GT_WAIT_SEMAPHORE_INTERRUPT;
156 	}
157 
158 	ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
159 	bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
160 
161 	dmask = irqs << 16 | irqs;
162 	smask = irqs << 16;
163 
164 	if (!xe_gt_is_media_type(gt)) {
165 		/* Enable interrupts for each engine class */
166 		xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
167 		if (ccs_mask)
168 			xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
169 
170 		/* Unmask interrupts for each engine instance */
171 		xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
172 		xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
173 		if (bcs_mask & (BIT(1)|BIT(2)))
174 			xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
175 		if (bcs_mask & (BIT(3)|BIT(4)))
176 			xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
177 		if (bcs_mask & (BIT(5)|BIT(6)))
178 			xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
179 		if (bcs_mask & (BIT(7)|BIT(8)))
180 			xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
181 		if (ccs_mask & (BIT(0)|BIT(1)))
182 			xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
183 		if (ccs_mask & (BIT(2)|BIT(3)))
184 			xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
185 	}
186 
187 	if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
188 		/* Enable interrupts for each engine class */
189 		xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
190 
191 		/* Unmask interrupts for each engine instance */
192 		xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
193 		xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
194 		xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
195 
196 		/*
197 		 * the heci2 interrupt is enabled via the same register as the
198 		 * GSCCS interrupts, but it has its own mask register.
199 		 */
200 		if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
201 			gsc_mask = irqs | GSC_ER_COMPLETE;
202 			heci_mask = GSC_IRQ_INTF(1);
203 		} else if (xe->info.has_heci_gscfi) {
204 			gsc_mask = GSC_IRQ_INTF(1);
205 		}
206 
207 		if (gsc_mask) {
208 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
209 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
210 		}
211 		if (heci_mask)
212 			xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
213 
214 		if (xe_pxp_is_supported(xe)) {
215 			u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
216 				       KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
217 				       KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
218 
219 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
220 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
221 		}
222 	}
223 }
224 
225 static u32
226 gt_engine_identity(struct xe_device *xe,
227 		   struct xe_mmio *mmio,
228 		   const unsigned int bank,
229 		   const unsigned int bit)
230 {
231 	u32 timeout_ts;
232 	u32 ident;
233 
234 	lockdep_assert_held(&xe->irq.lock);
235 
236 	xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
237 
238 	/*
239 	 * NB: Specs do not specify how long to spin wait,
240 	 * so we do ~100us as an educated guess.
241 	 */
242 	timeout_ts = (local_clock() >> 10) + 100;
243 	do {
244 		ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
245 	} while (!(ident & INTR_DATA_VALID) &&
246 		 !time_after32(local_clock() >> 10, timeout_ts));
247 
248 	if (unlikely(!(ident & INTR_DATA_VALID))) {
249 		drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
250 			bank, bit, ident);
251 		return 0;
252 	}
253 
254 	xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
255 
256 	return ident;
257 }
258 
259 #define   OTHER_MEDIA_GUC_INSTANCE           16
260 
261 static void
262 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
263 {
264 	if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
265 		return xe_guc_irq_handler(&gt->uc.guc, iir);
266 	if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
267 		return xe_guc_irq_handler(&gt->uc.guc, iir);
268 	if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
269 		return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
270 
271 	if (instance != OTHER_GUC_INSTANCE &&
272 	    instance != OTHER_MEDIA_GUC_INSTANCE) {
273 		WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
274 			  instance, iir);
275 	}
276 }
277 
278 static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
279 				    enum xe_engine_class class,
280 				    unsigned int instance)
281 {
282 	struct xe_device *xe = tile_to_xe(tile);
283 
284 	if (MEDIA_VER(xe) < 13)
285 		return tile->primary_gt;
286 
287 	switch (class) {
288 	case XE_ENGINE_CLASS_VIDEO_DECODE:
289 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
290 		return tile->media_gt;
291 	case XE_ENGINE_CLASS_OTHER:
292 		switch (instance) {
293 		case OTHER_MEDIA_GUC_INSTANCE:
294 		case OTHER_GSC_INSTANCE:
295 		case OTHER_GSC_HECI2_INSTANCE:
296 			return tile->media_gt;
297 		default:
298 			break;
299 		}
300 		fallthrough;
301 	default:
302 		return tile->primary_gt;
303 	}
304 }
305 
306 static void gt_irq_handler(struct xe_tile *tile,
307 			   u32 master_ctl, unsigned long *intr_dw,
308 			   u32 *identity)
309 {
310 	struct xe_device *xe = tile_to_xe(tile);
311 	struct xe_mmio *mmio = &tile->mmio;
312 	unsigned int bank, bit;
313 	u16 instance, intr_vec;
314 	enum xe_engine_class class;
315 	struct xe_hw_engine *hwe;
316 
317 	spin_lock(&xe->irq.lock);
318 
319 	for (bank = 0; bank < 2; bank++) {
320 		if (!(master_ctl & GT_DW_IRQ(bank)))
321 			continue;
322 
323 		intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
324 		for_each_set_bit(bit, intr_dw + bank, 32)
325 			identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
326 		xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
327 
328 		for_each_set_bit(bit, intr_dw + bank, 32) {
329 			struct xe_gt *engine_gt;
330 
331 			class = INTR_ENGINE_CLASS(identity[bit]);
332 			instance = INTR_ENGINE_INSTANCE(identity[bit]);
333 			intr_vec = INTR_ENGINE_INTR(identity[bit]);
334 
335 			engine_gt = pick_engine_gt(tile, class, instance);
336 
337 			hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
338 			if (hwe) {
339 				xe_hw_engine_handle_irq(hwe, intr_vec);
340 				continue;
341 			}
342 
343 			if (class == XE_ENGINE_CLASS_OTHER) {
344 				/*
345 				 * HECI GSCFI interrupts come from outside of GT.
346 				 * KCR irqs come from inside GT but are handled
347 				 * by the global PXP subsystem.
348 				 */
349 				if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
350 					xe_heci_gsc_irq_handler(xe, intr_vec);
351 				else if (instance == OTHER_KCR_INSTANCE)
352 					xe_pxp_irq_handler(xe, intr_vec);
353 				else
354 					gt_other_irq_handler(engine_gt, instance, intr_vec);
355 			}
356 		}
357 	}
358 
359 	spin_unlock(&xe->irq.lock);
360 }
361 
362 /*
363  * Top-level interrupt handler for Xe_LP platforms (which did not have
364  * a "master tile" interrupt register.
365  */
366 static irqreturn_t xelp_irq_handler(int irq, void *arg)
367 {
368 	struct xe_device *xe = arg;
369 	struct xe_tile *tile = xe_device_get_root_tile(xe);
370 	u32 master_ctl, gu_misc_iir;
371 	unsigned long intr_dw[2];
372 	u32 identity[32];
373 
374 	if (!atomic_read(&xe->irq.enabled))
375 		return IRQ_NONE;
376 
377 	master_ctl = xelp_intr_disable(xe);
378 	if (!master_ctl) {
379 		xelp_intr_enable(xe, false);
380 		return IRQ_NONE;
381 	}
382 
383 	gt_irq_handler(tile, master_ctl, intr_dw, identity);
384 
385 	xe_display_irq_handler(xe, master_ctl);
386 
387 	gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
388 
389 	xelp_intr_enable(xe, false);
390 
391 	xe_display_irq_enable(xe, gu_misc_iir);
392 
393 	return IRQ_HANDLED;
394 }
395 
396 static u32 dg1_intr_disable(struct xe_device *xe)
397 {
398 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
399 	u32 val;
400 
401 	/* First disable interrupts */
402 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
403 
404 	/* Get the indication levels and ack the master unit */
405 	val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
406 	if (unlikely(!val))
407 		return 0;
408 
409 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
410 
411 	return val;
412 }
413 
414 static void dg1_intr_enable(struct xe_device *xe, bool stall)
415 {
416 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
417 
418 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
419 	if (stall)
420 		xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
421 }
422 
423 /*
424  * Top-level interrupt handler for Xe_LP+ and beyond.  These platforms have
425  * a "master tile" interrupt register which must be consulted before the
426  * "graphics master" interrupt register.
427  */
428 static irqreturn_t dg1_irq_handler(int irq, void *arg)
429 {
430 	struct xe_device *xe = arg;
431 	struct xe_tile *tile;
432 	u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
433 	unsigned long intr_dw[2];
434 	u32 identity[32];
435 	u8 id;
436 
437 	/* TODO: This really shouldn't be copied+pasted */
438 
439 	if (!atomic_read(&xe->irq.enabled))
440 		return IRQ_NONE;
441 
442 	master_tile_ctl = dg1_intr_disable(xe);
443 	if (!master_tile_ctl) {
444 		dg1_intr_enable(xe, false);
445 		return IRQ_NONE;
446 	}
447 
448 	for_each_tile(tile, xe, id) {
449 		struct xe_mmio *mmio = &tile->mmio;
450 
451 		if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
452 			continue;
453 
454 		master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
455 
456 		/*
457 		 * We might be in irq handler just when PCIe DPC is initiated
458 		 * and all MMIO reads will be returned with all 1's. Ignore this
459 		 * irq as device is inaccessible.
460 		 */
461 		if (master_ctl == REG_GENMASK(31, 0)) {
462 			drm_dbg(&tile_to_xe(tile)->drm,
463 				"Ignore this IRQ as device might be in DPC containment.\n");
464 			return IRQ_HANDLED;
465 		}
466 
467 		xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
468 
469 		gt_irq_handler(tile, master_ctl, intr_dw, identity);
470 
471 		/*
472 		 * Display interrupts (including display backlight operations
473 		 * that get reported as Gunit GSE) would only be hooked up to
474 		 * the primary tile.
475 		 */
476 		if (id == 0) {
477 			if (xe->info.has_heci_cscfi)
478 				xe_heci_csc_irq_handler(xe, master_ctl);
479 			xe_display_irq_handler(xe, master_ctl);
480 			xe_i2c_irq_handler(xe, master_ctl);
481 			gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
482 		}
483 	}
484 
485 	dg1_intr_enable(xe, false);
486 	xe_display_irq_enable(xe, gu_misc_iir);
487 
488 	return IRQ_HANDLED;
489 }
490 
491 static void gt_irq_reset(struct xe_tile *tile)
492 {
493 	struct xe_mmio *mmio = &tile->mmio;
494 
495 	u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
496 						   XE_ENGINE_CLASS_COMPUTE);
497 	u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
498 						   XE_ENGINE_CLASS_COPY);
499 
500 	/* Disable RCS, BCS, VCS and VECS class engines. */
501 	xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
502 	xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
503 	if (ccs_mask)
504 		xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
505 
506 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
507 	xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK,	~0);
508 	xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK,	~0);
509 	if (bcs_mask & (BIT(1)|BIT(2)))
510 		xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
511 	if (bcs_mask & (BIT(3)|BIT(4)))
512 		xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
513 	if (bcs_mask & (BIT(5)|BIT(6)))
514 		xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
515 	if (bcs_mask & (BIT(7)|BIT(8)))
516 		xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
517 	xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK,	~0);
518 	xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK,	~0);
519 	xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK,	~0);
520 	if (ccs_mask & (BIT(0)|BIT(1)))
521 		xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
522 	if (ccs_mask & (BIT(2)|BIT(3)))
523 		xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
524 
525 	if ((tile->media_gt &&
526 	     xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
527 	    tile_to_xe(tile)->info.has_heci_gscfi) {
528 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
529 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
530 		xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
531 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
532 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
533 	}
534 
535 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
536 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK,  ~0);
537 	xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE,	 0);
538 	xe_mmio_write32(mmio, GUC_SG_INTR_MASK,		~0);
539 }
540 
541 static void xelp_irq_reset(struct xe_tile *tile)
542 {
543 	xelp_intr_disable(tile_to_xe(tile));
544 
545 	gt_irq_reset(tile);
546 
547 	if (IS_SRIOV_VF(tile_to_xe(tile)))
548 		return;
549 
550 	mask_and_disable(tile, PCU_IRQ_OFFSET);
551 }
552 
553 static void dg1_irq_reset(struct xe_tile *tile)
554 {
555 	if (tile->id == 0)
556 		dg1_intr_disable(tile_to_xe(tile));
557 
558 	gt_irq_reset(tile);
559 
560 	if (IS_SRIOV_VF(tile_to_xe(tile)))
561 		return;
562 
563 	mask_and_disable(tile, PCU_IRQ_OFFSET);
564 }
565 
566 static void dg1_irq_reset_mstr(struct xe_tile *tile)
567 {
568 	struct xe_mmio *mmio = &tile->mmio;
569 
570 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
571 }
572 
573 static void vf_irq_reset(struct xe_device *xe)
574 {
575 	struct xe_tile *tile;
576 	unsigned int id;
577 
578 	xe_assert(xe, IS_SRIOV_VF(xe));
579 
580 	if (GRAPHICS_VERx100(xe) < 1210)
581 		xelp_intr_disable(xe);
582 	else
583 		xe_assert(xe, xe_device_has_memirq(xe));
584 
585 	for_each_tile(tile, xe, id) {
586 		if (xe_device_has_memirq(xe))
587 			xe_memirq_reset(&tile->memirq);
588 		else
589 			gt_irq_reset(tile);
590 	}
591 }
592 
593 static void xe_irq_reset(struct xe_device *xe)
594 {
595 	struct xe_tile *tile;
596 	u8 id;
597 
598 	if (IS_SRIOV_VF(xe))
599 		return vf_irq_reset(xe);
600 
601 	if (xe_device_uses_memirq(xe)) {
602 		for_each_tile(tile, xe, id)
603 			xe_memirq_reset(&tile->memirq);
604 	}
605 
606 	for_each_tile(tile, xe, id) {
607 		if (GRAPHICS_VERx100(xe) >= 1210)
608 			dg1_irq_reset(tile);
609 		else
610 			xelp_irq_reset(tile);
611 	}
612 
613 	tile = xe_device_get_root_tile(xe);
614 	mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
615 	xe_display_irq_reset(xe);
616 
617 	/*
618 	 * The tile's top-level status register should be the last one
619 	 * to be reset to avoid possible bit re-latching from lower
620 	 * level interrupts.
621 	 */
622 	if (GRAPHICS_VERx100(xe) >= 1210) {
623 		for_each_tile(tile, xe, id)
624 			dg1_irq_reset_mstr(tile);
625 	}
626 }
627 
628 static void vf_irq_postinstall(struct xe_device *xe)
629 {
630 	struct xe_tile *tile;
631 	unsigned int id;
632 
633 	for_each_tile(tile, xe, id)
634 		if (xe_device_has_memirq(xe))
635 			xe_memirq_postinstall(&tile->memirq);
636 
637 	if (GRAPHICS_VERx100(xe) < 1210)
638 		xelp_intr_enable(xe, true);
639 	else
640 		xe_assert(xe, xe_device_has_memirq(xe));
641 }
642 
643 static void xe_irq_postinstall(struct xe_device *xe)
644 {
645 	if (IS_SRIOV_VF(xe))
646 		return vf_irq_postinstall(xe);
647 
648 	if (xe_device_uses_memirq(xe)) {
649 		struct xe_tile *tile;
650 		unsigned int id;
651 
652 		for_each_tile(tile, xe, id)
653 			xe_memirq_postinstall(&tile->memirq);
654 	}
655 
656 	xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
657 
658 	/*
659 	 * ASLE backlight operations are reported via GUnit GSE interrupts
660 	 * on the root tile.
661 	 */
662 	unmask_and_enable(xe_device_get_root_tile(xe),
663 			  GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
664 
665 	/* Enable top-level interrupts */
666 	if (GRAPHICS_VERx100(xe) >= 1210)
667 		dg1_intr_enable(xe, true);
668 	else
669 		xelp_intr_enable(xe, true);
670 }
671 
672 static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
673 {
674 	struct xe_device *xe = arg;
675 	struct xe_tile *tile;
676 	unsigned int id;
677 
678 	if (!atomic_read(&xe->irq.enabled))
679 		return IRQ_NONE;
680 
681 	for_each_tile(tile, xe, id)
682 		xe_memirq_handler(&tile->memirq);
683 
684 	return IRQ_HANDLED;
685 }
686 
687 static irq_handler_t xe_irq_handler(struct xe_device *xe)
688 {
689 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
690 		return vf_mem_irq_handler;
691 
692 	if (GRAPHICS_VERx100(xe) >= 1210)
693 		return dg1_irq_handler;
694 	else
695 		return xelp_irq_handler;
696 }
697 
698 static int xe_irq_msi_request_irqs(struct xe_device *xe)
699 {
700 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
701 	irq_handler_t irq_handler;
702 	int irq, err;
703 
704 	irq_handler = xe_irq_handler(xe);
705 	if (!irq_handler) {
706 		drm_err(&xe->drm, "No supported interrupt handler");
707 		return -EINVAL;
708 	}
709 
710 	irq = pci_irq_vector(pdev, 0);
711 	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
712 	if (err < 0) {
713 		drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
714 		return err;
715 	}
716 
717 	return 0;
718 }
719 
720 static void xe_irq_msi_free(struct xe_device *xe)
721 {
722 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
723 	int irq;
724 
725 	irq = pci_irq_vector(pdev, 0);
726 	free_irq(irq, xe);
727 }
728 
729 static void irq_uninstall(void *arg)
730 {
731 	struct xe_device *xe = arg;
732 
733 	if (!atomic_xchg(&xe->irq.enabled, 0))
734 		return;
735 
736 	xe_irq_reset(xe);
737 
738 	if (xe_device_has_msix(xe))
739 		xe_irq_msix_free(xe);
740 	else
741 		xe_irq_msi_free(xe);
742 }
743 
744 int xe_irq_init(struct xe_device *xe)
745 {
746 	spin_lock_init(&xe->irq.lock);
747 
748 	return xe_irq_msix_init(xe);
749 }
750 
751 int xe_irq_install(struct xe_device *xe)
752 {
753 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
754 	unsigned int irq_flags = PCI_IRQ_MSI;
755 	int nvec = 1;
756 	int err;
757 
758 	xe_irq_reset(xe);
759 
760 	if (xe_device_has_msix(xe)) {
761 		nvec = xe->irq.msix.nvec;
762 		irq_flags = PCI_IRQ_MSIX;
763 	}
764 
765 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
766 	if (err < 0) {
767 		drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
768 		return err;
769 	}
770 
771 	err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
772 					xe_irq_msi_request_irqs(xe);
773 	if (err)
774 		return err;
775 
776 	atomic_set(&xe->irq.enabled, 1);
777 
778 	xe_irq_postinstall(xe);
779 
780 	return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
781 }
782 
783 static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
784 {
785 	synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
786 }
787 
788 void xe_irq_suspend(struct xe_device *xe)
789 {
790 	atomic_set(&xe->irq.enabled, 0); /* no new irqs */
791 
792 	/* flush irqs */
793 	if (xe_device_has_msix(xe))
794 		xe_irq_msix_synchronize_irq(xe);
795 	else
796 		xe_irq_msi_synchronize_irq(xe);
797 	xe_irq_reset(xe); /* turn irqs off */
798 }
799 
800 void xe_irq_resume(struct xe_device *xe)
801 {
802 	struct xe_gt *gt;
803 	int id;
804 
805 	/*
806 	 * lock not needed:
807 	 * 1. no irq will arrive before the postinstall
808 	 * 2. display is not yet resumed
809 	 */
810 	atomic_set(&xe->irq.enabled, 1);
811 	xe_irq_reset(xe);
812 	xe_irq_postinstall(xe); /* turn irqs on */
813 
814 	for_each_gt(gt, xe, id)
815 		xe_irq_enable_hwe(gt);
816 }
817 
818 /* MSI-X related definitions and functions below. */
819 
820 enum xe_irq_msix_static {
821 	GUC2HOST_MSIX = 0,
822 	DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
823 	/* Must be last */
824 	NUM_OF_STATIC_MSIX,
825 };
826 
827 static int xe_irq_msix_init(struct xe_device *xe)
828 {
829 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
830 	int nvec = pci_msix_vec_count(pdev);
831 
832 	if (nvec == -EINVAL)
833 		return 0;  /* MSI */
834 
835 	if (nvec < 0) {
836 		drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
837 		return nvec;
838 	}
839 
840 	xe->irq.msix.nvec = nvec;
841 	xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
842 	return 0;
843 }
844 
845 static irqreturn_t guc2host_irq_handler(int irq, void *arg)
846 {
847 	struct xe_device *xe = arg;
848 	struct xe_tile *tile;
849 	u8 id;
850 
851 	if (!atomic_read(&xe->irq.enabled))
852 		return IRQ_NONE;
853 
854 	for_each_tile(tile, xe, id)
855 		xe_guc_irq_handler(&tile->primary_gt->uc.guc,
856 				   GUC_INTR_GUC2HOST);
857 
858 	return IRQ_HANDLED;
859 }
860 
861 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
862 {
863 	unsigned int tile_id, gt_id;
864 	struct xe_device *xe = arg;
865 	struct xe_memirq *memirq;
866 	struct xe_hw_engine *hwe;
867 	enum xe_hw_engine_id id;
868 	struct xe_tile *tile;
869 	struct xe_gt *gt;
870 
871 	if (!atomic_read(&xe->irq.enabled))
872 		return IRQ_NONE;
873 
874 	for_each_tile(tile, xe, tile_id) {
875 		memirq = &tile->memirq;
876 		if (!memirq->bo)
877 			continue;
878 
879 		for_each_gt(gt, xe, gt_id) {
880 			if (gt->tile != tile)
881 				continue;
882 
883 			for_each_hw_engine(hwe, gt, id)
884 				xe_memirq_hwe_handler(memirq, hwe);
885 		}
886 	}
887 
888 	return IRQ_HANDLED;
889 }
890 
891 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
892 				    bool dynamic_msix, u16 *msix)
893 {
894 	struct xa_limit limit;
895 	int ret;
896 	u32 id;
897 
898 	limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
899 				 XA_LIMIT(*msix, *msix);
900 	ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
901 	if (ret)
902 		return ret;
903 
904 	if (dynamic_msix)
905 		*msix = id;
906 
907 	return 0;
908 }
909 
910 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
911 {
912 	xa_erase(&xe->irq.msix.indexes, msix);
913 }
914 
915 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
916 					    void *irq_buf, const char *name, u16 msix)
917 {
918 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
919 	int ret, irq;
920 
921 	irq = pci_irq_vector(pdev, msix);
922 	if (irq < 0)
923 		return irq;
924 
925 	ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
926 	if (ret < 0)
927 		return ret;
928 
929 	return 0;
930 }
931 
932 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
933 			    const char *name, bool dynamic_msix, u16 *msix)
934 {
935 	int ret;
936 
937 	ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
938 	if (ret)
939 		return ret;
940 
941 	ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
942 	if (ret) {
943 		drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
944 		xe_irq_msix_release_vector(xe, *msix);
945 		return ret;
946 	}
947 
948 	return 0;
949 }
950 
951 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
952 {
953 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
954 	int irq;
955 	void *irq_buf;
956 
957 	irq_buf = xa_load(&xe->irq.msix.indexes, msix);
958 	if (!irq_buf)
959 		return;
960 
961 	irq = pci_irq_vector(pdev, msix);
962 	if (irq < 0) {
963 		drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
964 		return;
965 	}
966 
967 	free_irq(irq, irq_buf);
968 	xe_irq_msix_release_vector(xe, msix);
969 }
970 
971 int xe_irq_msix_request_irqs(struct xe_device *xe)
972 {
973 	int err;
974 	u16 msix;
975 
976 	msix = GUC2HOST_MSIX;
977 	err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
978 				      DRIVER_NAME "-guc2host", false, &msix);
979 	if (err)
980 		return err;
981 
982 	msix = DEFAULT_MSIX;
983 	err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
984 				      DRIVER_NAME "-default-msix", false, &msix);
985 	if (err) {
986 		xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
987 		return err;
988 	}
989 
990 	return 0;
991 }
992 
993 void xe_irq_msix_free(struct xe_device *xe)
994 {
995 	unsigned long msix;
996 	u32 *dummy;
997 
998 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
999 		xe_irq_msix_free_irq(xe, msix);
1000 	xa_destroy(&xe->irq.msix.indexes);
1001 }
1002 
1003 void xe_irq_msix_synchronize_irq(struct xe_device *xe)
1004 {
1005 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
1006 	unsigned long msix;
1007 	u32 *dummy;
1008 
1009 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1010 		synchronize_irq(pci_irq_vector(pdev, msix));
1011 }
1012