1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_irq.h"
7
8 #include <linux/sched/clock.h>
9
10 #include <drm/drm_managed.h>
11
12 #include "display/xe_display.h"
13 #include "regs/xe_guc_regs.h"
14 #include "regs/xe_irq_regs.h"
15 #include "xe_device.h"
16 #include "xe_drv.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_guc.h"
20 #include "xe_hw_engine.h"
21 #include "xe_hw_error.h"
22 #include "xe_i2c.h"
23 #include "xe_memirq.h"
24 #include "xe_mmio.h"
25 #include "xe_pxp.h"
26 #include "xe_sriov.h"
27 #include "xe_tile.h"
28
29 /*
30 * Interrupt registers for a unit are always consecutive and ordered
31 * ISR, IMR, IIR, IER.
32 */
33 #define IMR(offset) XE_REG(offset + 0x4)
34 #define IIR(offset) XE_REG(offset + 0x8)
35 #define IER(offset) XE_REG(offset + 0xc)
36
37 static int xe_irq_msix_init(struct xe_device *xe);
38 static void xe_irq_msix_free(struct xe_device *xe);
39 static int xe_irq_msix_request_irqs(struct xe_device *xe);
40 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
41
assert_iir_is_zero(struct xe_mmio * mmio,struct xe_reg reg)42 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
43 {
44 u32 val = xe_mmio_read32(mmio, reg);
45
46 if (val == 0)
47 return;
48
49 drm_WARN(&mmio->tile->xe->drm, 1,
50 "Interrupt register 0x%x is not zero: 0x%08x\n",
51 reg.addr, val);
52 xe_mmio_write32(mmio, reg, 0xffffffff);
53 xe_mmio_read32(mmio, reg);
54 xe_mmio_write32(mmio, reg, 0xffffffff);
55 xe_mmio_read32(mmio, reg);
56 }
57
58 /*
59 * Unmask and enable the specified interrupts. Does not check current state,
60 * so any bits not specified here will become masked and disabled.
61 */
unmask_and_enable(struct xe_tile * tile,u32 irqregs,u32 bits)62 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
63 {
64 struct xe_mmio *mmio = &tile->mmio;
65
66 /*
67 * If we're just enabling an interrupt now, it shouldn't already
68 * be raised in the IIR.
69 */
70 assert_iir_is_zero(mmio, IIR(irqregs));
71
72 xe_mmio_write32(mmio, IER(irqregs), bits);
73 xe_mmio_write32(mmio, IMR(irqregs), ~bits);
74
75 /* Posting read */
76 xe_mmio_read32(mmio, IMR(irqregs));
77 }
78
79 /* Mask and disable all interrupts. */
mask_and_disable(struct xe_tile * tile,u32 irqregs)80 static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
81 {
82 struct xe_mmio *mmio = &tile->mmio;
83
84 xe_mmio_write32(mmio, IMR(irqregs), ~0);
85 /* Posting read */
86 xe_mmio_read32(mmio, IMR(irqregs));
87
88 xe_mmio_write32(mmio, IER(irqregs), 0);
89
90 /* IIR can theoretically queue up two events. Be paranoid. */
91 xe_mmio_write32(mmio, IIR(irqregs), ~0);
92 xe_mmio_read32(mmio, IIR(irqregs));
93 xe_mmio_write32(mmio, IIR(irqregs), ~0);
94 xe_mmio_read32(mmio, IIR(irqregs));
95 }
96
xelp_intr_disable(struct xe_device * xe)97 static u32 xelp_intr_disable(struct xe_device *xe)
98 {
99 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
100
101 xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
102
103 /*
104 * Now with master disabled, get a sample of level indications
105 * for this interrupt. Indications will be cleared on related acks.
106 * New indications can and will light up during processing,
107 * and will generate new interrupt after enabling master.
108 */
109 return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
110 }
111
112 static u32
gu_misc_irq_ack(struct xe_device * xe,const u32 master_ctl)113 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
114 {
115 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
116 u32 iir;
117
118 if (!(master_ctl & GU_MISC_IRQ))
119 return 0;
120
121 iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
122 if (likely(iir))
123 xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
124
125 return iir;
126 }
127
xelp_intr_enable(struct xe_device * xe,bool stall)128 static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
129 {
130 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
131
132 xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
133 if (stall)
134 xe_mmio_read32(mmio, GFX_MSTR_IRQ);
135 }
136
137 /* Enable/unmask the HWE interrupts for a specific GT's engines. */
xe_irq_enable_hwe(struct xe_gt * gt)138 void xe_irq_enable_hwe(struct xe_gt *gt)
139 {
140 struct xe_device *xe = gt_to_xe(gt);
141 struct xe_mmio *mmio = >->mmio;
142 u32 ccs_mask, bcs_mask;
143 u32 irqs, dmask, smask;
144 u32 gsc_mask = 0;
145 u32 heci_mask = 0;
146
147 if (xe_device_uses_memirq(xe))
148 return;
149
150 if (xe_device_uc_enabled(xe)) {
151 irqs = GT_RENDER_USER_INTERRUPT |
152 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
153 } else {
154 irqs = GT_RENDER_USER_INTERRUPT |
155 GT_CS_MASTER_ERROR_INTERRUPT |
156 GT_CONTEXT_SWITCH_INTERRUPT |
157 GT_WAIT_SEMAPHORE_INTERRUPT;
158 }
159
160 ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
161 bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
162
163 dmask = irqs << 16 | irqs;
164 smask = irqs << 16;
165
166 if (xe_gt_is_main_type(gt)) {
167 /* Enable interrupts for each engine class */
168 xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
169 if (ccs_mask)
170 xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
171
172 /* Unmask interrupts for each engine instance */
173 xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
174 xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
175 if (bcs_mask & (BIT(1)|BIT(2)))
176 xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
177 if (bcs_mask & (BIT(3)|BIT(4)))
178 xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
179 if (bcs_mask & (BIT(5)|BIT(6)))
180 xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
181 if (bcs_mask & (BIT(7)|BIT(8)))
182 xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
183 if (ccs_mask & (BIT(0)|BIT(1)))
184 xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
185 if (ccs_mask & (BIT(2)|BIT(3)))
186 xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
187 }
188
189 if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
190 /* Enable interrupts for each engine class */
191 xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
192
193 /* Unmask interrupts for each engine instance */
194 xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
195 xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
196 xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
197
198 /*
199 * the heci2 interrupt is enabled via the same register as the
200 * GSCCS interrupts, but it has its own mask register.
201 */
202 if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
203 gsc_mask = irqs | GSC_ER_COMPLETE;
204 heci_mask = GSC_IRQ_INTF(1);
205 } else if (xe->info.has_heci_gscfi) {
206 gsc_mask = GSC_IRQ_INTF(1);
207 }
208
209 if (gsc_mask) {
210 xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
211 xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
212 }
213 if (heci_mask)
214 xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
215
216 if (xe_pxp_is_supported(xe)) {
217 u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
218 KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
219 KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
220
221 xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
222 xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
223 }
224 }
225 }
226
227 static u32
gt_engine_identity(struct xe_device * xe,struct xe_mmio * mmio,const unsigned int bank,const unsigned int bit)228 gt_engine_identity(struct xe_device *xe,
229 struct xe_mmio *mmio,
230 const unsigned int bank,
231 const unsigned int bit)
232 {
233 u32 timeout_ts;
234 u32 ident;
235
236 lockdep_assert_held(&xe->irq.lock);
237
238 xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
239
240 /*
241 * NB: Specs do not specify how long to spin wait,
242 * so we do ~100us as an educated guess.
243 */
244 timeout_ts = (local_clock() >> 10) + 100;
245 do {
246 ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
247 } while (!(ident & INTR_DATA_VALID) &&
248 !time_after32(local_clock() >> 10, timeout_ts));
249
250 if (unlikely(!(ident & INTR_DATA_VALID))) {
251 drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
252 bank, bit, ident);
253 return 0;
254 }
255
256 xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
257
258 return ident;
259 }
260
261 #define OTHER_MEDIA_GUC_INSTANCE 16
262
263 static void
gt_other_irq_handler(struct xe_gt * gt,const u8 instance,const u16 iir)264 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
265 {
266 if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt))
267 return xe_guc_irq_handler(>->uc.guc, iir);
268 if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
269 return xe_guc_irq_handler(>->uc.guc, iir);
270 if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
271 return xe_gsc_proxy_irq_handler(>->uc.gsc, iir);
272
273 if (instance != OTHER_GUC_INSTANCE &&
274 instance != OTHER_MEDIA_GUC_INSTANCE) {
275 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
276 instance, iir);
277 }
278 }
279
pick_engine_gt(struct xe_tile * tile,enum xe_engine_class class,unsigned int instance)280 static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
281 enum xe_engine_class class,
282 unsigned int instance)
283 {
284 struct xe_device *xe = tile_to_xe(tile);
285
286 if (MEDIA_VER(xe) < 13)
287 return tile->primary_gt;
288
289 switch (class) {
290 case XE_ENGINE_CLASS_VIDEO_DECODE:
291 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
292 return tile->media_gt;
293 case XE_ENGINE_CLASS_OTHER:
294 switch (instance) {
295 case OTHER_MEDIA_GUC_INSTANCE:
296 case OTHER_GSC_INSTANCE:
297 case OTHER_GSC_HECI2_INSTANCE:
298 return tile->media_gt;
299 default:
300 break;
301 }
302 fallthrough;
303 default:
304 return tile->primary_gt;
305 }
306 }
307
gt_irq_handler(struct xe_tile * tile,u32 master_ctl,unsigned long * intr_dw,u32 * identity)308 static void gt_irq_handler(struct xe_tile *tile,
309 u32 master_ctl, unsigned long *intr_dw,
310 u32 *identity)
311 {
312 struct xe_device *xe = tile_to_xe(tile);
313 struct xe_mmio *mmio = &tile->mmio;
314 unsigned int bank, bit;
315 u16 instance, intr_vec;
316 enum xe_engine_class class;
317 struct xe_hw_engine *hwe;
318
319 spin_lock(&xe->irq.lock);
320
321 for (bank = 0; bank < 2; bank++) {
322 if (!(master_ctl & GT_DW_IRQ(bank)))
323 continue;
324
325 intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
326 for_each_set_bit(bit, intr_dw + bank, 32)
327 identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
328 xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
329
330 for_each_set_bit(bit, intr_dw + bank, 32) {
331 struct xe_gt *engine_gt;
332
333 class = INTR_ENGINE_CLASS(identity[bit]);
334 instance = INTR_ENGINE_INSTANCE(identity[bit]);
335 intr_vec = INTR_ENGINE_INTR(identity[bit]);
336
337 engine_gt = pick_engine_gt(tile, class, instance);
338
339 hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
340 if (hwe) {
341 xe_hw_engine_handle_irq(hwe, intr_vec);
342 continue;
343 }
344
345 if (class == XE_ENGINE_CLASS_OTHER) {
346 /*
347 * HECI GSCFI interrupts come from outside of GT.
348 * KCR irqs come from inside GT but are handled
349 * by the global PXP subsystem.
350 */
351 if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
352 xe_heci_gsc_irq_handler(xe, intr_vec);
353 else if (instance == OTHER_KCR_INSTANCE)
354 xe_pxp_irq_handler(xe, intr_vec);
355 else
356 gt_other_irq_handler(engine_gt, instance, intr_vec);
357 }
358 }
359 }
360
361 spin_unlock(&xe->irq.lock);
362 }
363
364 /*
365 * Top-level interrupt handler for Xe_LP platforms (which did not have
366 * a "master tile" interrupt register.
367 */
xelp_irq_handler(int irq,void * arg)368 static irqreturn_t xelp_irq_handler(int irq, void *arg)
369 {
370 struct xe_device *xe = arg;
371 struct xe_tile *tile = xe_device_get_root_tile(xe);
372 u32 master_ctl, gu_misc_iir;
373 unsigned long intr_dw[2];
374 u32 identity[32];
375
376 if (!atomic_read(&xe->irq.enabled))
377 return IRQ_NONE;
378
379 master_ctl = xelp_intr_disable(xe);
380 if (!master_ctl) {
381 xelp_intr_enable(xe, false);
382 return IRQ_NONE;
383 }
384
385 gt_irq_handler(tile, master_ctl, intr_dw, identity);
386
387 xe_display_irq_handler(xe, master_ctl);
388
389 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
390
391 xelp_intr_enable(xe, false);
392
393 xe_display_irq_enable(xe, gu_misc_iir);
394
395 return IRQ_HANDLED;
396 }
397
dg1_intr_disable(struct xe_device * xe)398 static u32 dg1_intr_disable(struct xe_device *xe)
399 {
400 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
401 u32 val;
402
403 /* First disable interrupts */
404 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
405
406 /* Get the indication levels and ack the master unit */
407 val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
408 if (unlikely(!val))
409 return 0;
410
411 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
412
413 return val;
414 }
415
dg1_intr_enable(struct xe_device * xe,bool stall)416 static void dg1_intr_enable(struct xe_device *xe, bool stall)
417 {
418 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
419
420 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
421 if (stall)
422 xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
423 }
424
425 /*
426 * Top-level interrupt handler for Xe_LP+ and beyond. These platforms have
427 * a "master tile" interrupt register which must be consulted before the
428 * "graphics master" interrupt register.
429 */
dg1_irq_handler(int irq,void * arg)430 static irqreturn_t dg1_irq_handler(int irq, void *arg)
431 {
432 struct xe_device *xe = arg;
433 struct xe_tile *tile;
434 u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
435 unsigned long intr_dw[2];
436 u32 identity[32];
437 u8 id;
438
439 /* TODO: This really shouldn't be copied+pasted */
440
441 if (!atomic_read(&xe->irq.enabled))
442 return IRQ_NONE;
443
444 master_tile_ctl = dg1_intr_disable(xe);
445 if (!master_tile_ctl) {
446 dg1_intr_enable(xe, false);
447 return IRQ_NONE;
448 }
449
450 for_each_tile(tile, xe, id) {
451 struct xe_mmio *mmio = &tile->mmio;
452
453 if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
454 continue;
455
456 master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
457
458 /*
459 * We might be in irq handler just when PCIe DPC is initiated
460 * and all MMIO reads will be returned with all 1's. Ignore this
461 * irq as device is inaccessible.
462 */
463 if (master_ctl == REG_GENMASK(31, 0)) {
464 drm_dbg(&tile_to_xe(tile)->drm,
465 "Ignore this IRQ as device might be in DPC containment.\n");
466 return IRQ_HANDLED;
467 }
468
469 xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
470
471 gt_irq_handler(tile, master_ctl, intr_dw, identity);
472 xe_hw_error_irq_handler(tile, master_ctl);
473
474 /*
475 * Display interrupts (including display backlight operations
476 * that get reported as Gunit GSE) would only be hooked up to
477 * the primary tile.
478 */
479 if (id == 0) {
480 if (xe->info.has_heci_cscfi)
481 xe_heci_csc_irq_handler(xe, master_ctl);
482 xe_display_irq_handler(xe, master_ctl);
483 xe_i2c_irq_handler(xe, master_ctl);
484 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
485 }
486 }
487
488 dg1_intr_enable(xe, false);
489 xe_display_irq_enable(xe, gu_misc_iir);
490
491 return IRQ_HANDLED;
492 }
493
gt_irq_reset(struct xe_tile * tile)494 static void gt_irq_reset(struct xe_tile *tile)
495 {
496 struct xe_mmio *mmio = &tile->mmio;
497
498 u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
499 XE_ENGINE_CLASS_COMPUTE);
500 u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
501 XE_ENGINE_CLASS_COPY);
502
503 /* Disable RCS, BCS, VCS and VECS class engines. */
504 xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
505 xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
506 if (ccs_mask)
507 xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
508
509 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
510 xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~0);
511 xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~0);
512 if (bcs_mask & (BIT(1)|BIT(2)))
513 xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
514 if (bcs_mask & (BIT(3)|BIT(4)))
515 xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
516 if (bcs_mask & (BIT(5)|BIT(6)))
517 xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
518 if (bcs_mask & (BIT(7)|BIT(8)))
519 xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
520 xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~0);
521 xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~0);
522 xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~0);
523 if (ccs_mask & (BIT(0)|BIT(1)))
524 xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
525 if (ccs_mask & (BIT(2)|BIT(3)))
526 xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
527
528 if ((tile->media_gt &&
529 xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
530 tile_to_xe(tile)->info.has_heci_gscfi) {
531 xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
532 xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
533 xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
534 xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
535 xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
536 }
537
538 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
539 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK, ~0);
540 xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE, 0);
541 xe_mmio_write32(mmio, GUC_SG_INTR_MASK, ~0);
542 }
543
xelp_irq_reset(struct xe_tile * tile)544 static void xelp_irq_reset(struct xe_tile *tile)
545 {
546 xelp_intr_disable(tile_to_xe(tile));
547
548 gt_irq_reset(tile);
549
550 if (IS_SRIOV_VF(tile_to_xe(tile)))
551 return;
552
553 mask_and_disable(tile, PCU_IRQ_OFFSET);
554 }
555
dg1_irq_reset(struct xe_tile * tile)556 static void dg1_irq_reset(struct xe_tile *tile)
557 {
558 if (xe_tile_is_root(tile))
559 dg1_intr_disable(tile_to_xe(tile));
560
561 gt_irq_reset(tile);
562
563 if (IS_SRIOV_VF(tile_to_xe(tile)))
564 return;
565
566 mask_and_disable(tile, PCU_IRQ_OFFSET);
567 }
568
dg1_irq_reset_mstr(struct xe_tile * tile)569 static void dg1_irq_reset_mstr(struct xe_tile *tile)
570 {
571 struct xe_mmio *mmio = &tile->mmio;
572
573 xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
574 }
575
vf_irq_reset(struct xe_device * xe)576 static void vf_irq_reset(struct xe_device *xe)
577 {
578 struct xe_tile *tile;
579 unsigned int id;
580
581 xe_assert(xe, IS_SRIOV_VF(xe));
582
583 if (GRAPHICS_VERx100(xe) < 1210)
584 xelp_intr_disable(xe);
585 else
586 xe_assert(xe, xe_device_has_memirq(xe));
587
588 for_each_tile(tile, xe, id) {
589 if (xe_device_has_memirq(xe))
590 xe_memirq_reset(&tile->memirq);
591 else
592 gt_irq_reset(tile);
593 }
594 }
595
xe_irq_reset(struct xe_device * xe)596 static void xe_irq_reset(struct xe_device *xe)
597 {
598 struct xe_tile *tile;
599 u8 id;
600
601 if (IS_SRIOV_VF(xe))
602 return vf_irq_reset(xe);
603
604 if (xe_device_uses_memirq(xe)) {
605 for_each_tile(tile, xe, id)
606 xe_memirq_reset(&tile->memirq);
607 }
608
609 for_each_tile(tile, xe, id) {
610 if (GRAPHICS_VERx100(xe) >= 1210)
611 dg1_irq_reset(tile);
612 else
613 xelp_irq_reset(tile);
614 }
615
616 tile = xe_device_get_root_tile(xe);
617 mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
618 xe_display_irq_reset(xe);
619
620 /*
621 * The tile's top-level status register should be the last one
622 * to be reset to avoid possible bit re-latching from lower
623 * level interrupts.
624 */
625 if (GRAPHICS_VERx100(xe) >= 1210) {
626 for_each_tile(tile, xe, id)
627 dg1_irq_reset_mstr(tile);
628 }
629 }
630
vf_irq_postinstall(struct xe_device * xe)631 static void vf_irq_postinstall(struct xe_device *xe)
632 {
633 struct xe_tile *tile;
634 unsigned int id;
635
636 for_each_tile(tile, xe, id)
637 if (xe_device_has_memirq(xe))
638 xe_memirq_postinstall(&tile->memirq);
639
640 if (GRAPHICS_VERx100(xe) < 1210)
641 xelp_intr_enable(xe, true);
642 else
643 xe_assert(xe, xe_device_has_memirq(xe));
644 }
645
xe_irq_postinstall(struct xe_device * xe)646 static void xe_irq_postinstall(struct xe_device *xe)
647 {
648 if (IS_SRIOV_VF(xe))
649 return vf_irq_postinstall(xe);
650
651 if (xe_device_uses_memirq(xe)) {
652 struct xe_tile *tile;
653 unsigned int id;
654
655 for_each_tile(tile, xe, id)
656 xe_memirq_postinstall(&tile->memirq);
657 }
658
659 xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
660
661 /*
662 * ASLE backlight operations are reported via GUnit GSE interrupts
663 * on the root tile.
664 */
665 unmask_and_enable(xe_device_get_root_tile(xe),
666 GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
667
668 /* Enable top-level interrupts */
669 if (GRAPHICS_VERx100(xe) >= 1210)
670 dg1_intr_enable(xe, true);
671 else
672 xelp_intr_enable(xe, true);
673 }
674
vf_mem_irq_handler(int irq,void * arg)675 static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
676 {
677 struct xe_device *xe = arg;
678 struct xe_tile *tile;
679 unsigned int id;
680
681 if (!atomic_read(&xe->irq.enabled))
682 return IRQ_NONE;
683
684 for_each_tile(tile, xe, id)
685 xe_memirq_handler(&tile->memirq);
686
687 return IRQ_HANDLED;
688 }
689
xe_irq_handler(struct xe_device * xe)690 static irq_handler_t xe_irq_handler(struct xe_device *xe)
691 {
692 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
693 return vf_mem_irq_handler;
694
695 if (GRAPHICS_VERx100(xe) >= 1210)
696 return dg1_irq_handler;
697 else
698 return xelp_irq_handler;
699 }
700
xe_irq_msi_request_irqs(struct xe_device * xe)701 static int xe_irq_msi_request_irqs(struct xe_device *xe)
702 {
703 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
704 irq_handler_t irq_handler;
705 int irq, err;
706
707 irq_handler = xe_irq_handler(xe);
708 if (!irq_handler) {
709 drm_err(&xe->drm, "No supported interrupt handler");
710 return -EINVAL;
711 }
712
713 irq = pci_irq_vector(pdev, 0);
714 err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
715 if (err < 0) {
716 drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
717 return err;
718 }
719
720 return 0;
721 }
722
xe_irq_msi_free(struct xe_device * xe)723 static void xe_irq_msi_free(struct xe_device *xe)
724 {
725 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
726 int irq;
727
728 irq = pci_irq_vector(pdev, 0);
729 free_irq(irq, xe);
730 }
731
irq_uninstall(void * arg)732 static void irq_uninstall(void *arg)
733 {
734 struct xe_device *xe = arg;
735
736 if (!atomic_xchg(&xe->irq.enabled, 0))
737 return;
738
739 xe_irq_reset(xe);
740
741 if (xe_device_has_msix(xe))
742 xe_irq_msix_free(xe);
743 else
744 xe_irq_msi_free(xe);
745 }
746
xe_irq_init(struct xe_device * xe)747 int xe_irq_init(struct xe_device *xe)
748 {
749 spin_lock_init(&xe->irq.lock);
750
751 return xe_irq_msix_init(xe);
752 }
753
xe_irq_install(struct xe_device * xe)754 int xe_irq_install(struct xe_device *xe)
755 {
756 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
757 unsigned int irq_flags = PCI_IRQ_MSI;
758 int nvec = 1;
759 int err;
760
761 xe_hw_error_init(xe);
762
763 xe_irq_reset(xe);
764
765 if (xe_device_has_msix(xe)) {
766 nvec = xe->irq.msix.nvec;
767 irq_flags = PCI_IRQ_MSIX;
768 }
769
770 err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
771 if (err < 0) {
772 drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
773 return err;
774 }
775
776 err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
777 xe_irq_msi_request_irqs(xe);
778 if (err)
779 return err;
780
781 atomic_set(&xe->irq.enabled, 1);
782
783 xe_irq_postinstall(xe);
784
785 return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
786 }
787
xe_irq_msi_synchronize_irq(struct xe_device * xe)788 static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
789 {
790 synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
791 }
792
xe_irq_suspend(struct xe_device * xe)793 void xe_irq_suspend(struct xe_device *xe)
794 {
795 atomic_set(&xe->irq.enabled, 0); /* no new irqs */
796
797 /* flush irqs */
798 if (xe_device_has_msix(xe))
799 xe_irq_msix_synchronize_irq(xe);
800 else
801 xe_irq_msi_synchronize_irq(xe);
802 xe_irq_reset(xe); /* turn irqs off */
803 }
804
xe_irq_resume(struct xe_device * xe)805 void xe_irq_resume(struct xe_device *xe)
806 {
807 struct xe_gt *gt;
808 int id;
809
810 /*
811 * lock not needed:
812 * 1. no irq will arrive before the postinstall
813 * 2. display is not yet resumed
814 */
815 atomic_set(&xe->irq.enabled, 1);
816 xe_irq_reset(xe);
817 xe_irq_postinstall(xe); /* turn irqs on */
818
819 for_each_gt(gt, xe, id)
820 xe_irq_enable_hwe(gt);
821 }
822
823 /* MSI-X related definitions and functions below. */
824
825 enum xe_irq_msix_static {
826 GUC2HOST_MSIX = 0,
827 DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
828 /* Must be last */
829 NUM_OF_STATIC_MSIX,
830 };
831
xe_irq_msix_init(struct xe_device * xe)832 static int xe_irq_msix_init(struct xe_device *xe)
833 {
834 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
835 int nvec = pci_msix_vec_count(pdev);
836
837 if (nvec == -EINVAL)
838 return 0; /* MSI */
839
840 if (nvec < 0) {
841 drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
842 return nvec;
843 }
844
845 xe->irq.msix.nvec = nvec;
846 xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
847 return 0;
848 }
849
xe_irq_msix_default_hwe_handler(int irq,void * arg)850 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
851 {
852 unsigned int tile_id, gt_id;
853 struct xe_device *xe = arg;
854 struct xe_memirq *memirq;
855 struct xe_hw_engine *hwe;
856 enum xe_hw_engine_id id;
857 struct xe_tile *tile;
858 struct xe_gt *gt;
859
860 if (!atomic_read(&xe->irq.enabled))
861 return IRQ_NONE;
862
863 for_each_tile(tile, xe, tile_id) {
864 memirq = &tile->memirq;
865 if (!memirq->bo)
866 continue;
867
868 for_each_gt(gt, xe, gt_id) {
869 if (gt->tile != tile)
870 continue;
871
872 for_each_hw_engine(hwe, gt, id)
873 xe_memirq_hwe_handler(memirq, hwe);
874 }
875 }
876
877 return IRQ_HANDLED;
878 }
879
xe_irq_msix_alloc_vector(struct xe_device * xe,void * irq_buf,bool dynamic_msix,u16 * msix)880 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
881 bool dynamic_msix, u16 *msix)
882 {
883 struct xa_limit limit;
884 int ret;
885 u32 id;
886
887 limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
888 XA_LIMIT(*msix, *msix);
889 ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
890 if (ret)
891 return ret;
892
893 if (dynamic_msix)
894 *msix = id;
895
896 return 0;
897 }
898
xe_irq_msix_release_vector(struct xe_device * xe,u16 msix)899 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
900 {
901 xa_erase(&xe->irq.msix.indexes, msix);
902 }
903
xe_irq_msix_request_irq_internal(struct xe_device * xe,irq_handler_t handler,void * irq_buf,const char * name,u16 msix)904 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
905 void *irq_buf, const char *name, u16 msix)
906 {
907 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
908 int ret, irq;
909
910 irq = pci_irq_vector(pdev, msix);
911 if (irq < 0)
912 return irq;
913
914 ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
915 if (ret < 0)
916 return ret;
917
918 return 0;
919 }
920
xe_irq_msix_request_irq(struct xe_device * xe,irq_handler_t handler,void * irq_buf,const char * name,bool dynamic_msix,u16 * msix)921 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
922 const char *name, bool dynamic_msix, u16 *msix)
923 {
924 int ret;
925
926 ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
927 if (ret)
928 return ret;
929
930 ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
931 if (ret) {
932 drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
933 xe_irq_msix_release_vector(xe, *msix);
934 return ret;
935 }
936
937 return 0;
938 }
939
xe_irq_msix_free_irq(struct xe_device * xe,u16 msix)940 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
941 {
942 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
943 int irq;
944 void *irq_buf;
945
946 irq_buf = xa_load(&xe->irq.msix.indexes, msix);
947 if (!irq_buf)
948 return;
949
950 irq = pci_irq_vector(pdev, msix);
951 if (irq < 0) {
952 drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
953 return;
954 }
955
956 free_irq(irq, irq_buf);
957 xe_irq_msix_release_vector(xe, msix);
958 }
959
xe_irq_msix_request_irqs(struct xe_device * xe)960 int xe_irq_msix_request_irqs(struct xe_device *xe)
961 {
962 int err;
963 u16 msix;
964
965 msix = GUC2HOST_MSIX;
966 err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
967 DRIVER_NAME "-guc2host", false, &msix);
968 if (err)
969 return err;
970
971 msix = DEFAULT_MSIX;
972 err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
973 DRIVER_NAME "-default-msix", false, &msix);
974 if (err) {
975 xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
976 return err;
977 }
978
979 return 0;
980 }
981
xe_irq_msix_free(struct xe_device * xe)982 void xe_irq_msix_free(struct xe_device *xe)
983 {
984 unsigned long msix;
985 u32 *dummy;
986
987 xa_for_each(&xe->irq.msix.indexes, msix, dummy)
988 xe_irq_msix_free_irq(xe, msix);
989 xa_destroy(&xe->irq.msix.indexes);
990 }
991
xe_irq_msix_synchronize_irq(struct xe_device * xe)992 void xe_irq_msix_synchronize_irq(struct xe_device *xe)
993 {
994 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
995 unsigned long msix;
996 u32 *dummy;
997
998 xa_for_each(&xe->irq.msix.indexes, msix, dummy)
999 synchronize_irq(pci_irq_vector(pdev, msix));
1000 }
1001