xref: /linux/drivers/gpu/drm/xe/xe_irq.c (revision df02351331671abb26788bc13f6d276e26ae068f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_irq.h"
7 
8 #include <linux/sched/clock.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "display/xe_display.h"
13 #include "regs/xe_guc_regs.h"
14 #include "regs/xe_irq_regs.h"
15 #include "xe_device.h"
16 #include "xe_drv.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_guc.h"
20 #include "xe_hw_engine.h"
21 #include "xe_memirq.h"
22 #include "xe_mmio.h"
23 #include "xe_pxp.h"
24 #include "xe_sriov.h"
25 
26 /*
27  * Interrupt registers for a unit are always consecutive and ordered
28  * ISR, IMR, IIR, IER.
29  */
30 #define IMR(offset)				XE_REG(offset + 0x4)
31 #define IIR(offset)				XE_REG(offset + 0x8)
32 #define IER(offset)				XE_REG(offset + 0xc)
33 
34 static int xe_irq_msix_init(struct xe_device *xe);
35 static void xe_irq_msix_free(struct xe_device *xe);
36 static int xe_irq_msix_request_irqs(struct xe_device *xe);
37 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
38 
assert_iir_is_zero(struct xe_mmio * mmio,struct xe_reg reg)39 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
40 {
41 	u32 val = xe_mmio_read32(mmio, reg);
42 
43 	if (val == 0)
44 		return;
45 
46 	drm_WARN(&mmio->tile->xe->drm, 1,
47 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
48 		 reg.addr, val);
49 	xe_mmio_write32(mmio, reg, 0xffffffff);
50 	xe_mmio_read32(mmio, reg);
51 	xe_mmio_write32(mmio, reg, 0xffffffff);
52 	xe_mmio_read32(mmio, reg);
53 }
54 
55 /*
56  * Unmask and enable the specified interrupts.  Does not check current state,
57  * so any bits not specified here will become masked and disabled.
58  */
unmask_and_enable(struct xe_tile * tile,u32 irqregs,u32 bits)59 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
60 {
61 	struct xe_mmio *mmio = &tile->mmio;
62 
63 	/*
64 	 * If we're just enabling an interrupt now, it shouldn't already
65 	 * be raised in the IIR.
66 	 */
67 	assert_iir_is_zero(mmio, IIR(irqregs));
68 
69 	xe_mmio_write32(mmio, IER(irqregs), bits);
70 	xe_mmio_write32(mmio, IMR(irqregs), ~bits);
71 
72 	/* Posting read */
73 	xe_mmio_read32(mmio, IMR(irqregs));
74 }
75 
76 /* Mask and disable all interrupts. */
mask_and_disable(struct xe_tile * tile,u32 irqregs)77 static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
78 {
79 	struct xe_mmio *mmio = &tile->mmio;
80 
81 	xe_mmio_write32(mmio, IMR(irqregs), ~0);
82 	/* Posting read */
83 	xe_mmio_read32(mmio, IMR(irqregs));
84 
85 	xe_mmio_write32(mmio, IER(irqregs), 0);
86 
87 	/* IIR can theoretically queue up two events. Be paranoid. */
88 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
89 	xe_mmio_read32(mmio, IIR(irqregs));
90 	xe_mmio_write32(mmio, IIR(irqregs), ~0);
91 	xe_mmio_read32(mmio, IIR(irqregs));
92 }
93 
xelp_intr_disable(struct xe_device * xe)94 static u32 xelp_intr_disable(struct xe_device *xe)
95 {
96 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
97 
98 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
99 
100 	/*
101 	 * Now with master disabled, get a sample of level indications
102 	 * for this interrupt. Indications will be cleared on related acks.
103 	 * New indications can and will light up during processing,
104 	 * and will generate new interrupt after enabling master.
105 	 */
106 	return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
107 }
108 
109 static u32
gu_misc_irq_ack(struct xe_device * xe,const u32 master_ctl)110 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
111 {
112 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
113 	u32 iir;
114 
115 	if (!(master_ctl & GU_MISC_IRQ))
116 		return 0;
117 
118 	iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
119 	if (likely(iir))
120 		xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
121 
122 	return iir;
123 }
124 
xelp_intr_enable(struct xe_device * xe,bool stall)125 static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
126 {
127 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
128 
129 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
130 	if (stall)
131 		xe_mmio_read32(mmio, GFX_MSTR_IRQ);
132 }
133 
134 /* Enable/unmask the HWE interrupts for a specific GT's engines. */
xe_irq_enable_hwe(struct xe_gt * gt)135 void xe_irq_enable_hwe(struct xe_gt *gt)
136 {
137 	struct xe_device *xe = gt_to_xe(gt);
138 	struct xe_mmio *mmio = &gt->mmio;
139 	u32 ccs_mask, bcs_mask;
140 	u32 irqs, dmask, smask;
141 	u32 gsc_mask = 0;
142 	u32 heci_mask = 0;
143 
144 	if (xe_device_uses_memirq(xe))
145 		return;
146 
147 	if (xe_device_uc_enabled(xe)) {
148 		irqs = GT_RENDER_USER_INTERRUPT |
149 			GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
150 	} else {
151 		irqs = GT_RENDER_USER_INTERRUPT |
152 		       GT_CS_MASTER_ERROR_INTERRUPT |
153 		       GT_CONTEXT_SWITCH_INTERRUPT |
154 		       GT_WAIT_SEMAPHORE_INTERRUPT;
155 	}
156 
157 	ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
158 	bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
159 
160 	dmask = irqs << 16 | irqs;
161 	smask = irqs << 16;
162 
163 	if (!xe_gt_is_media_type(gt)) {
164 		/* Enable interrupts for each engine class */
165 		xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
166 		if (ccs_mask)
167 			xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
168 
169 		/* Unmask interrupts for each engine instance */
170 		xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
171 		xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
172 		if (bcs_mask & (BIT(1)|BIT(2)))
173 			xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
174 		if (bcs_mask & (BIT(3)|BIT(4)))
175 			xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
176 		if (bcs_mask & (BIT(5)|BIT(6)))
177 			xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
178 		if (bcs_mask & (BIT(7)|BIT(8)))
179 			xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
180 		if (ccs_mask & (BIT(0)|BIT(1)))
181 			xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
182 		if (ccs_mask & (BIT(2)|BIT(3)))
183 			xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
184 	}
185 
186 	if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
187 		/* Enable interrupts for each engine class */
188 		xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
189 
190 		/* Unmask interrupts for each engine instance */
191 		xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
192 		xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
193 		xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
194 
195 		/*
196 		 * the heci2 interrupt is enabled via the same register as the
197 		 * GSCCS interrupts, but it has its own mask register.
198 		 */
199 		if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
200 			gsc_mask = irqs | GSC_ER_COMPLETE;
201 			heci_mask = GSC_IRQ_INTF(1);
202 		} else if (xe->info.has_heci_gscfi) {
203 			gsc_mask = GSC_IRQ_INTF(1);
204 		}
205 
206 		if (gsc_mask) {
207 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
208 			xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
209 		}
210 		if (heci_mask)
211 			xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
212 
213 		if (xe_pxp_is_supported(xe)) {
214 			u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
215 				       KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
216 				       KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
217 
218 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
219 			xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
220 		}
221 	}
222 }
223 
224 static u32
gt_engine_identity(struct xe_device * xe,struct xe_mmio * mmio,const unsigned int bank,const unsigned int bit)225 gt_engine_identity(struct xe_device *xe,
226 		   struct xe_mmio *mmio,
227 		   const unsigned int bank,
228 		   const unsigned int bit)
229 {
230 	u32 timeout_ts;
231 	u32 ident;
232 
233 	lockdep_assert_held(&xe->irq.lock);
234 
235 	xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
236 
237 	/*
238 	 * NB: Specs do not specify how long to spin wait,
239 	 * so we do ~100us as an educated guess.
240 	 */
241 	timeout_ts = (local_clock() >> 10) + 100;
242 	do {
243 		ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
244 	} while (!(ident & INTR_DATA_VALID) &&
245 		 !time_after32(local_clock() >> 10, timeout_ts));
246 
247 	if (unlikely(!(ident & INTR_DATA_VALID))) {
248 		drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
249 			bank, bit, ident);
250 		return 0;
251 	}
252 
253 	xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
254 
255 	return ident;
256 }
257 
258 #define   OTHER_MEDIA_GUC_INSTANCE           16
259 
260 static void
gt_other_irq_handler(struct xe_gt * gt,const u8 instance,const u16 iir)261 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
262 {
263 	if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
264 		return xe_guc_irq_handler(&gt->uc.guc, iir);
265 	if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
266 		return xe_guc_irq_handler(&gt->uc.guc, iir);
267 	if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
268 		return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
269 
270 	if (instance != OTHER_GUC_INSTANCE &&
271 	    instance != OTHER_MEDIA_GUC_INSTANCE) {
272 		WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
273 			  instance, iir);
274 	}
275 }
276 
pick_engine_gt(struct xe_tile * tile,enum xe_engine_class class,unsigned int instance)277 static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
278 				    enum xe_engine_class class,
279 				    unsigned int instance)
280 {
281 	struct xe_device *xe = tile_to_xe(tile);
282 
283 	if (MEDIA_VER(xe) < 13)
284 		return tile->primary_gt;
285 
286 	switch (class) {
287 	case XE_ENGINE_CLASS_VIDEO_DECODE:
288 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
289 		return tile->media_gt;
290 	case XE_ENGINE_CLASS_OTHER:
291 		switch (instance) {
292 		case OTHER_MEDIA_GUC_INSTANCE:
293 		case OTHER_GSC_INSTANCE:
294 		case OTHER_GSC_HECI2_INSTANCE:
295 			return tile->media_gt;
296 		default:
297 			break;
298 		}
299 		fallthrough;
300 	default:
301 		return tile->primary_gt;
302 	}
303 }
304 
gt_irq_handler(struct xe_tile * tile,u32 master_ctl,unsigned long * intr_dw,u32 * identity)305 static void gt_irq_handler(struct xe_tile *tile,
306 			   u32 master_ctl, unsigned long *intr_dw,
307 			   u32 *identity)
308 {
309 	struct xe_device *xe = tile_to_xe(tile);
310 	struct xe_mmio *mmio = &tile->mmio;
311 	unsigned int bank, bit;
312 	u16 instance, intr_vec;
313 	enum xe_engine_class class;
314 	struct xe_hw_engine *hwe;
315 
316 	spin_lock(&xe->irq.lock);
317 
318 	for (bank = 0; bank < 2; bank++) {
319 		if (!(master_ctl & GT_DW_IRQ(bank)))
320 			continue;
321 
322 		intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
323 		for_each_set_bit(bit, intr_dw + bank, 32)
324 			identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
325 		xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
326 
327 		for_each_set_bit(bit, intr_dw + bank, 32) {
328 			struct xe_gt *engine_gt;
329 
330 			class = INTR_ENGINE_CLASS(identity[bit]);
331 			instance = INTR_ENGINE_INSTANCE(identity[bit]);
332 			intr_vec = INTR_ENGINE_INTR(identity[bit]);
333 
334 			engine_gt = pick_engine_gt(tile, class, instance);
335 
336 			hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
337 			if (hwe) {
338 				xe_hw_engine_handle_irq(hwe, intr_vec);
339 				continue;
340 			}
341 
342 			if (class == XE_ENGINE_CLASS_OTHER) {
343 				/*
344 				 * HECI GSCFI interrupts come from outside of GT.
345 				 * KCR irqs come from inside GT but are handled
346 				 * by the global PXP subsystem.
347 				 */
348 				if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
349 					xe_heci_gsc_irq_handler(xe, intr_vec);
350 				else if (instance == OTHER_KCR_INSTANCE)
351 					xe_pxp_irq_handler(xe, intr_vec);
352 				else
353 					gt_other_irq_handler(engine_gt, instance, intr_vec);
354 			}
355 		}
356 	}
357 
358 	spin_unlock(&xe->irq.lock);
359 }
360 
361 /*
362  * Top-level interrupt handler for Xe_LP platforms (which did not have
363  * a "master tile" interrupt register.
364  */
xelp_irq_handler(int irq,void * arg)365 static irqreturn_t xelp_irq_handler(int irq, void *arg)
366 {
367 	struct xe_device *xe = arg;
368 	struct xe_tile *tile = xe_device_get_root_tile(xe);
369 	u32 master_ctl, gu_misc_iir;
370 	unsigned long intr_dw[2];
371 	u32 identity[32];
372 
373 	if (!atomic_read(&xe->irq.enabled))
374 		return IRQ_NONE;
375 
376 	master_ctl = xelp_intr_disable(xe);
377 	if (!master_ctl) {
378 		xelp_intr_enable(xe, false);
379 		return IRQ_NONE;
380 	}
381 
382 	gt_irq_handler(tile, master_ctl, intr_dw, identity);
383 
384 	xe_display_irq_handler(xe, master_ctl);
385 
386 	gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
387 
388 	xelp_intr_enable(xe, false);
389 
390 	xe_display_irq_enable(xe, gu_misc_iir);
391 
392 	return IRQ_HANDLED;
393 }
394 
dg1_intr_disable(struct xe_device * xe)395 static u32 dg1_intr_disable(struct xe_device *xe)
396 {
397 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
398 	u32 val;
399 
400 	/* First disable interrupts */
401 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
402 
403 	/* Get the indication levels and ack the master unit */
404 	val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
405 	if (unlikely(!val))
406 		return 0;
407 
408 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
409 
410 	return val;
411 }
412 
dg1_intr_enable(struct xe_device * xe,bool stall)413 static void dg1_intr_enable(struct xe_device *xe, bool stall)
414 {
415 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
416 
417 	xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
418 	if (stall)
419 		xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
420 }
421 
422 /*
423  * Top-level interrupt handler for Xe_LP+ and beyond.  These platforms have
424  * a "master tile" interrupt register which must be consulted before the
425  * "graphics master" interrupt register.
426  */
dg1_irq_handler(int irq,void * arg)427 static irqreturn_t dg1_irq_handler(int irq, void *arg)
428 {
429 	struct xe_device *xe = arg;
430 	struct xe_tile *tile;
431 	u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
432 	unsigned long intr_dw[2];
433 	u32 identity[32];
434 	u8 id;
435 
436 	/* TODO: This really shouldn't be copied+pasted */
437 
438 	if (!atomic_read(&xe->irq.enabled))
439 		return IRQ_NONE;
440 
441 	master_tile_ctl = dg1_intr_disable(xe);
442 	if (!master_tile_ctl) {
443 		dg1_intr_enable(xe, false);
444 		return IRQ_NONE;
445 	}
446 
447 	for_each_tile(tile, xe, id) {
448 		struct xe_mmio *mmio = &tile->mmio;
449 
450 		if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
451 			continue;
452 
453 		master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
454 
455 		/*
456 		 * We might be in irq handler just when PCIe DPC is initiated
457 		 * and all MMIO reads will be returned with all 1's. Ignore this
458 		 * irq as device is inaccessible.
459 		 */
460 		if (master_ctl == REG_GENMASK(31, 0)) {
461 			drm_dbg(&tile_to_xe(tile)->drm,
462 				"Ignore this IRQ as device might be in DPC containment.\n");
463 			return IRQ_HANDLED;
464 		}
465 
466 		xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
467 
468 		gt_irq_handler(tile, master_ctl, intr_dw, identity);
469 
470 		/*
471 		 * Display interrupts (including display backlight operations
472 		 * that get reported as Gunit GSE) would only be hooked up to
473 		 * the primary tile.
474 		 */
475 		if (id == 0) {
476 			if (xe->info.has_heci_cscfi)
477 				xe_heci_csc_irq_handler(xe, master_ctl);
478 			xe_display_irq_handler(xe, master_ctl);
479 			gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
480 		}
481 	}
482 
483 	dg1_intr_enable(xe, false);
484 	xe_display_irq_enable(xe, gu_misc_iir);
485 
486 	return IRQ_HANDLED;
487 }
488 
gt_irq_reset(struct xe_tile * tile)489 static void gt_irq_reset(struct xe_tile *tile)
490 {
491 	struct xe_mmio *mmio = &tile->mmio;
492 
493 	u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
494 						   XE_ENGINE_CLASS_COMPUTE);
495 	u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
496 						   XE_ENGINE_CLASS_COPY);
497 
498 	/* Disable RCS, BCS, VCS and VECS class engines. */
499 	xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
500 	xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
501 	if (ccs_mask)
502 		xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
503 
504 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
505 	xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK,	~0);
506 	xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK,	~0);
507 	if (bcs_mask & (BIT(1)|BIT(2)))
508 		xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
509 	if (bcs_mask & (BIT(3)|BIT(4)))
510 		xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
511 	if (bcs_mask & (BIT(5)|BIT(6)))
512 		xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
513 	if (bcs_mask & (BIT(7)|BIT(8)))
514 		xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
515 	xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK,	~0);
516 	xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK,	~0);
517 	xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK,	~0);
518 	if (ccs_mask & (BIT(0)|BIT(1)))
519 		xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
520 	if (ccs_mask & (BIT(2)|BIT(3)))
521 		xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
522 
523 	if ((tile->media_gt &&
524 	     xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
525 	    tile_to_xe(tile)->info.has_heci_gscfi) {
526 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
527 		xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
528 		xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
529 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
530 		xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
531 	}
532 
533 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
534 	xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK,  ~0);
535 	xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE,	 0);
536 	xe_mmio_write32(mmio, GUC_SG_INTR_MASK,		~0);
537 }
538 
xelp_irq_reset(struct xe_tile * tile)539 static void xelp_irq_reset(struct xe_tile *tile)
540 {
541 	xelp_intr_disable(tile_to_xe(tile));
542 
543 	gt_irq_reset(tile);
544 
545 	if (IS_SRIOV_VF(tile_to_xe(tile)))
546 		return;
547 
548 	mask_and_disable(tile, PCU_IRQ_OFFSET);
549 }
550 
dg1_irq_reset(struct xe_tile * tile)551 static void dg1_irq_reset(struct xe_tile *tile)
552 {
553 	if (tile->id == 0)
554 		dg1_intr_disable(tile_to_xe(tile));
555 
556 	gt_irq_reset(tile);
557 
558 	if (IS_SRIOV_VF(tile_to_xe(tile)))
559 		return;
560 
561 	mask_and_disable(tile, PCU_IRQ_OFFSET);
562 }
563 
dg1_irq_reset_mstr(struct xe_tile * tile)564 static void dg1_irq_reset_mstr(struct xe_tile *tile)
565 {
566 	struct xe_mmio *mmio = &tile->mmio;
567 
568 	xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
569 }
570 
vf_irq_reset(struct xe_device * xe)571 static void vf_irq_reset(struct xe_device *xe)
572 {
573 	struct xe_tile *tile;
574 	unsigned int id;
575 
576 	xe_assert(xe, IS_SRIOV_VF(xe));
577 
578 	if (GRAPHICS_VERx100(xe) < 1210)
579 		xelp_intr_disable(xe);
580 	else
581 		xe_assert(xe, xe_device_has_memirq(xe));
582 
583 	for_each_tile(tile, xe, id) {
584 		if (xe_device_has_memirq(xe))
585 			xe_memirq_reset(&tile->memirq);
586 		else
587 			gt_irq_reset(tile);
588 	}
589 }
590 
xe_irq_reset(struct xe_device * xe)591 static void xe_irq_reset(struct xe_device *xe)
592 {
593 	struct xe_tile *tile;
594 	u8 id;
595 
596 	if (IS_SRIOV_VF(xe))
597 		return vf_irq_reset(xe);
598 
599 	if (xe_device_uses_memirq(xe)) {
600 		for_each_tile(tile, xe, id)
601 			xe_memirq_reset(&tile->memirq);
602 	}
603 
604 	for_each_tile(tile, xe, id) {
605 		if (GRAPHICS_VERx100(xe) >= 1210)
606 			dg1_irq_reset(tile);
607 		else
608 			xelp_irq_reset(tile);
609 	}
610 
611 	tile = xe_device_get_root_tile(xe);
612 	mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
613 	xe_display_irq_reset(xe);
614 
615 	/*
616 	 * The tile's top-level status register should be the last one
617 	 * to be reset to avoid possible bit re-latching from lower
618 	 * level interrupts.
619 	 */
620 	if (GRAPHICS_VERx100(xe) >= 1210) {
621 		for_each_tile(tile, xe, id)
622 			dg1_irq_reset_mstr(tile);
623 	}
624 }
625 
vf_irq_postinstall(struct xe_device * xe)626 static void vf_irq_postinstall(struct xe_device *xe)
627 {
628 	struct xe_tile *tile;
629 	unsigned int id;
630 
631 	for_each_tile(tile, xe, id)
632 		if (xe_device_has_memirq(xe))
633 			xe_memirq_postinstall(&tile->memirq);
634 
635 	if (GRAPHICS_VERx100(xe) < 1210)
636 		xelp_intr_enable(xe, true);
637 	else
638 		xe_assert(xe, xe_device_has_memirq(xe));
639 }
640 
xe_irq_postinstall(struct xe_device * xe)641 static void xe_irq_postinstall(struct xe_device *xe)
642 {
643 	if (IS_SRIOV_VF(xe))
644 		return vf_irq_postinstall(xe);
645 
646 	if (xe_device_uses_memirq(xe)) {
647 		struct xe_tile *tile;
648 		unsigned int id;
649 
650 		for_each_tile(tile, xe, id)
651 			xe_memirq_postinstall(&tile->memirq);
652 	}
653 
654 	xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
655 
656 	/*
657 	 * ASLE backlight operations are reported via GUnit GSE interrupts
658 	 * on the root tile.
659 	 */
660 	unmask_and_enable(xe_device_get_root_tile(xe),
661 			  GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
662 
663 	/* Enable top-level interrupts */
664 	if (GRAPHICS_VERx100(xe) >= 1210)
665 		dg1_intr_enable(xe, true);
666 	else
667 		xelp_intr_enable(xe, true);
668 }
669 
vf_mem_irq_handler(int irq,void * arg)670 static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
671 {
672 	struct xe_device *xe = arg;
673 	struct xe_tile *tile;
674 	unsigned int id;
675 
676 	if (!atomic_read(&xe->irq.enabled))
677 		return IRQ_NONE;
678 
679 	for_each_tile(tile, xe, id)
680 		xe_memirq_handler(&tile->memirq);
681 
682 	return IRQ_HANDLED;
683 }
684 
xe_irq_handler(struct xe_device * xe)685 static irq_handler_t xe_irq_handler(struct xe_device *xe)
686 {
687 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
688 		return vf_mem_irq_handler;
689 
690 	if (GRAPHICS_VERx100(xe) >= 1210)
691 		return dg1_irq_handler;
692 	else
693 		return xelp_irq_handler;
694 }
695 
xe_irq_msi_request_irqs(struct xe_device * xe)696 static int xe_irq_msi_request_irqs(struct xe_device *xe)
697 {
698 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
699 	irq_handler_t irq_handler;
700 	int irq, err;
701 
702 	irq_handler = xe_irq_handler(xe);
703 	if (!irq_handler) {
704 		drm_err(&xe->drm, "No supported interrupt handler");
705 		return -EINVAL;
706 	}
707 
708 	irq = pci_irq_vector(pdev, 0);
709 	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
710 	if (err < 0) {
711 		drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
712 		return err;
713 	}
714 
715 	return 0;
716 }
717 
xe_irq_msi_free(struct xe_device * xe)718 static void xe_irq_msi_free(struct xe_device *xe)
719 {
720 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
721 	int irq;
722 
723 	irq = pci_irq_vector(pdev, 0);
724 	free_irq(irq, xe);
725 }
726 
irq_uninstall(void * arg)727 static void irq_uninstall(void *arg)
728 {
729 	struct xe_device *xe = arg;
730 
731 	if (!atomic_xchg(&xe->irq.enabled, 0))
732 		return;
733 
734 	xe_irq_reset(xe);
735 
736 	if (xe_device_has_msix(xe))
737 		xe_irq_msix_free(xe);
738 	else
739 		xe_irq_msi_free(xe);
740 }
741 
xe_irq_init(struct xe_device * xe)742 int xe_irq_init(struct xe_device *xe)
743 {
744 	spin_lock_init(&xe->irq.lock);
745 
746 	return xe_irq_msix_init(xe);
747 }
748 
xe_irq_install(struct xe_device * xe)749 int xe_irq_install(struct xe_device *xe)
750 {
751 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
752 	unsigned int irq_flags = PCI_IRQ_MSI;
753 	int nvec = 1;
754 	int err;
755 
756 	xe_irq_reset(xe);
757 
758 	if (xe_device_has_msix(xe)) {
759 		nvec = xe->irq.msix.nvec;
760 		irq_flags = PCI_IRQ_MSIX;
761 	}
762 
763 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
764 	if (err < 0) {
765 		drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
766 		return err;
767 	}
768 
769 	err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
770 					xe_irq_msi_request_irqs(xe);
771 	if (err)
772 		return err;
773 
774 	atomic_set(&xe->irq.enabled, 1);
775 
776 	xe_irq_postinstall(xe);
777 
778 	return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
779 }
780 
xe_irq_msi_synchronize_irq(struct xe_device * xe)781 static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
782 {
783 	synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
784 }
785 
xe_irq_suspend(struct xe_device * xe)786 void xe_irq_suspend(struct xe_device *xe)
787 {
788 	atomic_set(&xe->irq.enabled, 0); /* no new irqs */
789 
790 	/* flush irqs */
791 	if (xe_device_has_msix(xe))
792 		xe_irq_msix_synchronize_irq(xe);
793 	else
794 		xe_irq_msi_synchronize_irq(xe);
795 	xe_irq_reset(xe); /* turn irqs off */
796 }
797 
xe_irq_resume(struct xe_device * xe)798 void xe_irq_resume(struct xe_device *xe)
799 {
800 	struct xe_gt *gt;
801 	int id;
802 
803 	/*
804 	 * lock not needed:
805 	 * 1. no irq will arrive before the postinstall
806 	 * 2. display is not yet resumed
807 	 */
808 	atomic_set(&xe->irq.enabled, 1);
809 	xe_irq_reset(xe);
810 	xe_irq_postinstall(xe); /* turn irqs on */
811 
812 	for_each_gt(gt, xe, id)
813 		xe_irq_enable_hwe(gt);
814 }
815 
816 /* MSI-X related definitions and functions below. */
817 
818 enum xe_irq_msix_static {
819 	GUC2HOST_MSIX = 0,
820 	DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
821 	/* Must be last */
822 	NUM_OF_STATIC_MSIX,
823 };
824 
xe_irq_msix_init(struct xe_device * xe)825 static int xe_irq_msix_init(struct xe_device *xe)
826 {
827 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
828 	int nvec = pci_msix_vec_count(pdev);
829 
830 	if (nvec == -EINVAL)
831 		return 0;  /* MSI */
832 
833 	if (nvec < 0) {
834 		drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
835 		return nvec;
836 	}
837 
838 	xe->irq.msix.nvec = nvec;
839 	xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
840 	return 0;
841 }
842 
guc2host_irq_handler(int irq,void * arg)843 static irqreturn_t guc2host_irq_handler(int irq, void *arg)
844 {
845 	struct xe_device *xe = arg;
846 	struct xe_tile *tile;
847 	u8 id;
848 
849 	if (!atomic_read(&xe->irq.enabled))
850 		return IRQ_NONE;
851 
852 	for_each_tile(tile, xe, id)
853 		xe_guc_irq_handler(&tile->primary_gt->uc.guc,
854 				   GUC_INTR_GUC2HOST);
855 
856 	return IRQ_HANDLED;
857 }
858 
xe_irq_msix_default_hwe_handler(int irq,void * arg)859 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
860 {
861 	unsigned int tile_id, gt_id;
862 	struct xe_device *xe = arg;
863 	struct xe_memirq *memirq;
864 	struct xe_hw_engine *hwe;
865 	enum xe_hw_engine_id id;
866 	struct xe_tile *tile;
867 	struct xe_gt *gt;
868 
869 	if (!atomic_read(&xe->irq.enabled))
870 		return IRQ_NONE;
871 
872 	for_each_tile(tile, xe, tile_id) {
873 		memirq = &tile->memirq;
874 		if (!memirq->bo)
875 			continue;
876 
877 		for_each_gt(gt, xe, gt_id) {
878 			if (gt->tile != tile)
879 				continue;
880 
881 			for_each_hw_engine(hwe, gt, id)
882 				xe_memirq_hwe_handler(memirq, hwe);
883 		}
884 	}
885 
886 	return IRQ_HANDLED;
887 }
888 
xe_irq_msix_alloc_vector(struct xe_device * xe,void * irq_buf,bool dynamic_msix,u16 * msix)889 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
890 				    bool dynamic_msix, u16 *msix)
891 {
892 	struct xa_limit limit;
893 	int ret;
894 	u32 id;
895 
896 	limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
897 				 XA_LIMIT(*msix, *msix);
898 	ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
899 	if (ret)
900 		return ret;
901 
902 	if (dynamic_msix)
903 		*msix = id;
904 
905 	return 0;
906 }
907 
xe_irq_msix_release_vector(struct xe_device * xe,u16 msix)908 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
909 {
910 	xa_erase(&xe->irq.msix.indexes, msix);
911 }
912 
xe_irq_msix_request_irq_internal(struct xe_device * xe,irq_handler_t handler,void * irq_buf,const char * name,u16 msix)913 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
914 					    void *irq_buf, const char *name, u16 msix)
915 {
916 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
917 	int ret, irq;
918 
919 	irq = pci_irq_vector(pdev, msix);
920 	if (irq < 0)
921 		return irq;
922 
923 	ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
924 	if (ret < 0)
925 		return ret;
926 
927 	return 0;
928 }
929 
xe_irq_msix_request_irq(struct xe_device * xe,irq_handler_t handler,void * irq_buf,const char * name,bool dynamic_msix,u16 * msix)930 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
931 			    const char *name, bool dynamic_msix, u16 *msix)
932 {
933 	int ret;
934 
935 	ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
936 	if (ret)
937 		return ret;
938 
939 	ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
940 	if (ret) {
941 		drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
942 		xe_irq_msix_release_vector(xe, *msix);
943 		return ret;
944 	}
945 
946 	return 0;
947 }
948 
xe_irq_msix_free_irq(struct xe_device * xe,u16 msix)949 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
950 {
951 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
952 	int irq;
953 	void *irq_buf;
954 
955 	irq_buf = xa_load(&xe->irq.msix.indexes, msix);
956 	if (!irq_buf)
957 		return;
958 
959 	irq = pci_irq_vector(pdev, msix);
960 	if (irq < 0) {
961 		drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
962 		return;
963 	}
964 
965 	free_irq(irq, irq_buf);
966 	xe_irq_msix_release_vector(xe, msix);
967 }
968 
xe_irq_msix_request_irqs(struct xe_device * xe)969 int xe_irq_msix_request_irqs(struct xe_device *xe)
970 {
971 	int err;
972 	u16 msix;
973 
974 	msix = GUC2HOST_MSIX;
975 	err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
976 				      DRIVER_NAME "-guc2host", false, &msix);
977 	if (err)
978 		return err;
979 
980 	msix = DEFAULT_MSIX;
981 	err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
982 				      DRIVER_NAME "-default-msix", false, &msix);
983 	if (err) {
984 		xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
985 		return err;
986 	}
987 
988 	return 0;
989 }
990 
xe_irq_msix_free(struct xe_device * xe)991 void xe_irq_msix_free(struct xe_device *xe)
992 {
993 	unsigned long msix;
994 	u32 *dummy;
995 
996 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
997 		xe_irq_msix_free_irq(xe, msix);
998 	xa_destroy(&xe->irq.msix.indexes);
999 }
1000 
xe_irq_msix_synchronize_irq(struct xe_device * xe)1001 void xe_irq_msix_synchronize_irq(struct xe_device *xe)
1002 {
1003 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
1004 	unsigned long msix;
1005 	u32 *dummy;
1006 
1007 	xa_for_each(&xe->irq.msix.indexes, msix, dummy)
1008 		synchronize_irq(pci_irq_vector(pdev, msix));
1009 }
1010