1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_irq.h" 7 8 #include <linux/sched/clock.h> 9 10 #include <drm/drm_managed.h> 11 12 #include "display/xe_display.h" 13 #include "regs/xe_guc_regs.h" 14 #include "regs/xe_irq_regs.h" 15 #include "xe_device.h" 16 #include "xe_drv.h" 17 #include "xe_gsc_proxy.h" 18 #include "xe_gt.h" 19 #include "xe_guc.h" 20 #include "xe_hw_engine.h" 21 #include "xe_memirq.h" 22 #include "xe_mmio.h" 23 #include "xe_sriov.h" 24 25 /* 26 * Interrupt registers for a unit are always consecutive and ordered 27 * ISR, IMR, IIR, IER. 28 */ 29 #define IMR(offset) XE_REG(offset + 0x4) 30 #define IIR(offset) XE_REG(offset + 0x8) 31 #define IER(offset) XE_REG(offset + 0xc) 32 33 static int xe_irq_msix_init(struct xe_device *xe); 34 static void xe_irq_msix_free(struct xe_device *xe); 35 static int xe_irq_msix_request_irqs(struct xe_device *xe); 36 static void xe_irq_msix_synchronize_irq(struct xe_device *xe); 37 38 static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg) 39 { 40 u32 val = xe_mmio_read32(mmio, reg); 41 42 if (val == 0) 43 return; 44 45 drm_WARN(&mmio->tile->xe->drm, 1, 46 "Interrupt register 0x%x is not zero: 0x%08x\n", 47 reg.addr, val); 48 xe_mmio_write32(mmio, reg, 0xffffffff); 49 xe_mmio_read32(mmio, reg); 50 xe_mmio_write32(mmio, reg, 0xffffffff); 51 xe_mmio_read32(mmio, reg); 52 } 53 54 /* 55 * Unmask and enable the specified interrupts. Does not check current state, 56 * so any bits not specified here will become masked and disabled. 57 */ 58 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits) 59 { 60 struct xe_mmio *mmio = &tile->mmio; 61 62 /* 63 * If we're just enabling an interrupt now, it shouldn't already 64 * be raised in the IIR. 65 */ 66 assert_iir_is_zero(mmio, IIR(irqregs)); 67 68 xe_mmio_write32(mmio, IER(irqregs), bits); 69 xe_mmio_write32(mmio, IMR(irqregs), ~bits); 70 71 /* Posting read */ 72 xe_mmio_read32(mmio, IMR(irqregs)); 73 } 74 75 /* Mask and disable all interrupts. */ 76 static void mask_and_disable(struct xe_tile *tile, u32 irqregs) 77 { 78 struct xe_mmio *mmio = &tile->mmio; 79 80 xe_mmio_write32(mmio, IMR(irqregs), ~0); 81 /* Posting read */ 82 xe_mmio_read32(mmio, IMR(irqregs)); 83 84 xe_mmio_write32(mmio, IER(irqregs), 0); 85 86 /* IIR can theoretically queue up two events. Be paranoid. */ 87 xe_mmio_write32(mmio, IIR(irqregs), ~0); 88 xe_mmio_read32(mmio, IIR(irqregs)); 89 xe_mmio_write32(mmio, IIR(irqregs), ~0); 90 xe_mmio_read32(mmio, IIR(irqregs)); 91 } 92 93 static u32 xelp_intr_disable(struct xe_device *xe) 94 { 95 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 96 97 xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0); 98 99 /* 100 * Now with master disabled, get a sample of level indications 101 * for this interrupt. Indications will be cleared on related acks. 102 * New indications can and will light up during processing, 103 * and will generate new interrupt after enabling master. 104 */ 105 return xe_mmio_read32(mmio, GFX_MSTR_IRQ); 106 } 107 108 static u32 109 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) 110 { 111 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 112 u32 iir; 113 114 if (!(master_ctl & GU_MISC_IRQ)) 115 return 0; 116 117 iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET)); 118 if (likely(iir)) 119 xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir); 120 121 return iir; 122 } 123 124 static inline void xelp_intr_enable(struct xe_device *xe, bool stall) 125 { 126 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 127 128 xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ); 129 if (stall) 130 xe_mmio_read32(mmio, GFX_MSTR_IRQ); 131 } 132 133 /* Enable/unmask the HWE interrupts for a specific GT's engines. */ 134 void xe_irq_enable_hwe(struct xe_gt *gt) 135 { 136 struct xe_device *xe = gt_to_xe(gt); 137 struct xe_mmio *mmio = >->mmio; 138 u32 ccs_mask, bcs_mask; 139 u32 irqs, dmask, smask; 140 u32 gsc_mask = 0; 141 u32 heci_mask = 0; 142 143 if (xe_device_uses_memirq(xe)) 144 return; 145 146 if (xe_device_uc_enabled(xe)) { 147 irqs = GT_RENDER_USER_INTERRUPT | 148 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 149 } else { 150 irqs = GT_RENDER_USER_INTERRUPT | 151 GT_CS_MASTER_ERROR_INTERRUPT | 152 GT_CONTEXT_SWITCH_INTERRUPT | 153 GT_WAIT_SEMAPHORE_INTERRUPT; 154 } 155 156 ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE); 157 bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY); 158 159 dmask = irqs << 16 | irqs; 160 smask = irqs << 16; 161 162 if (!xe_gt_is_media_type(gt)) { 163 /* Enable interrupts for each engine class */ 164 xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); 165 if (ccs_mask) 166 xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask); 167 168 /* Unmask interrupts for each engine instance */ 169 xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask); 170 xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask); 171 if (bcs_mask & (BIT(1)|BIT(2))) 172 xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); 173 if (bcs_mask & (BIT(3)|BIT(4))) 174 xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); 175 if (bcs_mask & (BIT(5)|BIT(6))) 176 xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); 177 if (bcs_mask & (BIT(7)|BIT(8))) 178 xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); 179 if (ccs_mask & (BIT(0)|BIT(1))) 180 xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask); 181 if (ccs_mask & (BIT(2)|BIT(3))) 182 xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask); 183 } 184 185 if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { 186 /* Enable interrupts for each engine class */ 187 xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask); 188 189 /* Unmask interrupts for each engine instance */ 190 xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask); 191 xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask); 192 xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask); 193 194 /* 195 * the heci2 interrupt is enabled via the same register as the 196 * GSCCS interrupts, but it has its own mask register. 197 */ 198 if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) { 199 gsc_mask = irqs | GSC_ER_COMPLETE; 200 heci_mask = GSC_IRQ_INTF(1); 201 } else if (xe->info.has_heci_gscfi) { 202 gsc_mask = GSC_IRQ_INTF(1); 203 } 204 205 if (gsc_mask) { 206 xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); 207 xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask); 208 } 209 if (heci_mask) 210 xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); 211 } 212 } 213 214 static u32 215 gt_engine_identity(struct xe_device *xe, 216 struct xe_mmio *mmio, 217 const unsigned int bank, 218 const unsigned int bit) 219 { 220 u32 timeout_ts; 221 u32 ident; 222 223 lockdep_assert_held(&xe->irq.lock); 224 225 xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit)); 226 227 /* 228 * NB: Specs do not specify how long to spin wait, 229 * so we do ~100us as an educated guess. 230 */ 231 timeout_ts = (local_clock() >> 10) + 100; 232 do { 233 ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank)); 234 } while (!(ident & INTR_DATA_VALID) && 235 !time_after32(local_clock() >> 10, timeout_ts)); 236 237 if (unlikely(!(ident & INTR_DATA_VALID))) { 238 drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 239 bank, bit, ident); 240 return 0; 241 } 242 243 xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident); 244 245 return ident; 246 } 247 248 #define OTHER_MEDIA_GUC_INSTANCE 16 249 250 static void 251 gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir) 252 { 253 if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt)) 254 return xe_guc_irq_handler(>->uc.guc, iir); 255 if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt)) 256 return xe_guc_irq_handler(>->uc.guc, iir); 257 if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt)) 258 return xe_gsc_proxy_irq_handler(>->uc.gsc, iir); 259 260 if (instance != OTHER_GUC_INSTANCE && 261 instance != OTHER_MEDIA_GUC_INSTANCE) { 262 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 263 instance, iir); 264 } 265 } 266 267 static struct xe_gt *pick_engine_gt(struct xe_tile *tile, 268 enum xe_engine_class class, 269 unsigned int instance) 270 { 271 struct xe_device *xe = tile_to_xe(tile); 272 273 if (MEDIA_VER(xe) < 13) 274 return tile->primary_gt; 275 276 switch (class) { 277 case XE_ENGINE_CLASS_VIDEO_DECODE: 278 case XE_ENGINE_CLASS_VIDEO_ENHANCE: 279 return tile->media_gt; 280 case XE_ENGINE_CLASS_OTHER: 281 switch (instance) { 282 case OTHER_MEDIA_GUC_INSTANCE: 283 case OTHER_GSC_INSTANCE: 284 case OTHER_GSC_HECI2_INSTANCE: 285 return tile->media_gt; 286 default: 287 break; 288 } 289 fallthrough; 290 default: 291 return tile->primary_gt; 292 } 293 } 294 295 static void gt_irq_handler(struct xe_tile *tile, 296 u32 master_ctl, unsigned long *intr_dw, 297 u32 *identity) 298 { 299 struct xe_device *xe = tile_to_xe(tile); 300 struct xe_mmio *mmio = &tile->mmio; 301 unsigned int bank, bit; 302 u16 instance, intr_vec; 303 enum xe_engine_class class; 304 struct xe_hw_engine *hwe; 305 306 spin_lock(&xe->irq.lock); 307 308 for (bank = 0; bank < 2; bank++) { 309 if (!(master_ctl & GT_DW_IRQ(bank))) 310 continue; 311 312 intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank)); 313 for_each_set_bit(bit, intr_dw + bank, 32) 314 identity[bit] = gt_engine_identity(xe, mmio, bank, bit); 315 xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]); 316 317 for_each_set_bit(bit, intr_dw + bank, 32) { 318 struct xe_gt *engine_gt; 319 320 class = INTR_ENGINE_CLASS(identity[bit]); 321 instance = INTR_ENGINE_INSTANCE(identity[bit]); 322 intr_vec = INTR_ENGINE_INTR(identity[bit]); 323 324 engine_gt = pick_engine_gt(tile, class, instance); 325 326 hwe = xe_gt_hw_engine(engine_gt, class, instance, false); 327 if (hwe) { 328 xe_hw_engine_handle_irq(hwe, intr_vec); 329 continue; 330 } 331 332 if (class == XE_ENGINE_CLASS_OTHER) { 333 /* HECI GSCFI interrupts come from outside of GT */ 334 if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE) 335 xe_heci_gsc_irq_handler(xe, intr_vec); 336 else 337 gt_other_irq_handler(engine_gt, instance, intr_vec); 338 } 339 } 340 } 341 342 spin_unlock(&xe->irq.lock); 343 } 344 345 /* 346 * Top-level interrupt handler for Xe_LP platforms (which did not have 347 * a "master tile" interrupt register. 348 */ 349 static irqreturn_t xelp_irq_handler(int irq, void *arg) 350 { 351 struct xe_device *xe = arg; 352 struct xe_tile *tile = xe_device_get_root_tile(xe); 353 u32 master_ctl, gu_misc_iir; 354 unsigned long intr_dw[2]; 355 u32 identity[32]; 356 357 if (!atomic_read(&xe->irq.enabled)) 358 return IRQ_NONE; 359 360 master_ctl = xelp_intr_disable(xe); 361 if (!master_ctl) { 362 xelp_intr_enable(xe, false); 363 return IRQ_NONE; 364 } 365 366 gt_irq_handler(tile, master_ctl, intr_dw, identity); 367 368 xe_display_irq_handler(xe, master_ctl); 369 370 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); 371 372 xelp_intr_enable(xe, false); 373 374 xe_display_irq_enable(xe, gu_misc_iir); 375 376 return IRQ_HANDLED; 377 } 378 379 static u32 dg1_intr_disable(struct xe_device *xe) 380 { 381 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 382 u32 val; 383 384 /* First disable interrupts */ 385 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0); 386 387 /* Get the indication levels and ack the master unit */ 388 val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR); 389 if (unlikely(!val)) 390 return 0; 391 392 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val); 393 394 return val; 395 } 396 397 static void dg1_intr_enable(struct xe_device *xe, bool stall) 398 { 399 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 400 401 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 402 if (stall) 403 xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR); 404 } 405 406 /* 407 * Top-level interrupt handler for Xe_LP+ and beyond. These platforms have 408 * a "master tile" interrupt register which must be consulted before the 409 * "graphics master" interrupt register. 410 */ 411 static irqreturn_t dg1_irq_handler(int irq, void *arg) 412 { 413 struct xe_device *xe = arg; 414 struct xe_tile *tile; 415 u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0; 416 unsigned long intr_dw[2]; 417 u32 identity[32]; 418 u8 id; 419 420 /* TODO: This really shouldn't be copied+pasted */ 421 422 if (!atomic_read(&xe->irq.enabled)) 423 return IRQ_NONE; 424 425 master_tile_ctl = dg1_intr_disable(xe); 426 if (!master_tile_ctl) { 427 dg1_intr_enable(xe, false); 428 return IRQ_NONE; 429 } 430 431 for_each_tile(tile, xe, id) { 432 struct xe_mmio *mmio = &tile->mmio; 433 434 if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0) 435 continue; 436 437 master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ); 438 439 /* 440 * We might be in irq handler just when PCIe DPC is initiated 441 * and all MMIO reads will be returned with all 1's. Ignore this 442 * irq as device is inaccessible. 443 */ 444 if (master_ctl == REG_GENMASK(31, 0)) { 445 drm_dbg(&tile_to_xe(tile)->drm, 446 "Ignore this IRQ as device might be in DPC containment.\n"); 447 return IRQ_HANDLED; 448 } 449 450 xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl); 451 452 gt_irq_handler(tile, master_ctl, intr_dw, identity); 453 454 /* 455 * Display interrupts (including display backlight operations 456 * that get reported as Gunit GSE) would only be hooked up to 457 * the primary tile. 458 */ 459 if (id == 0) { 460 if (xe->info.has_heci_cscfi) 461 xe_heci_csc_irq_handler(xe, master_ctl); 462 xe_display_irq_handler(xe, master_ctl); 463 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); 464 } 465 } 466 467 dg1_intr_enable(xe, false); 468 xe_display_irq_enable(xe, gu_misc_iir); 469 470 return IRQ_HANDLED; 471 } 472 473 static void gt_irq_reset(struct xe_tile *tile) 474 { 475 struct xe_mmio *mmio = &tile->mmio; 476 477 u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, 478 XE_ENGINE_CLASS_COMPUTE); 479 u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, 480 XE_ENGINE_CLASS_COPY); 481 482 /* Disable RCS, BCS, VCS and VECS class engines. */ 483 xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0); 484 xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0); 485 if (ccs_mask) 486 xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0); 487 488 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 489 xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~0); 490 xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~0); 491 if (bcs_mask & (BIT(1)|BIT(2))) 492 xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0); 493 if (bcs_mask & (BIT(3)|BIT(4))) 494 xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0); 495 if (bcs_mask & (BIT(5)|BIT(6))) 496 xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0); 497 if (bcs_mask & (BIT(7)|BIT(8))) 498 xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0); 499 xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~0); 500 xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~0); 501 xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~0); 502 if (ccs_mask & (BIT(0)|BIT(1))) 503 xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0); 504 if (ccs_mask & (BIT(2)|BIT(3))) 505 xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); 506 507 if ((tile->media_gt && 508 xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) || 509 tile_to_xe(tile)->info.has_heci_gscfi) { 510 xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0); 511 xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0); 512 xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0); 513 } 514 515 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0); 516 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK, ~0); 517 xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE, 0); 518 xe_mmio_write32(mmio, GUC_SG_INTR_MASK, ~0); 519 } 520 521 static void xelp_irq_reset(struct xe_tile *tile) 522 { 523 xelp_intr_disable(tile_to_xe(tile)); 524 525 gt_irq_reset(tile); 526 527 if (IS_SRIOV_VF(tile_to_xe(tile))) 528 return; 529 530 mask_and_disable(tile, PCU_IRQ_OFFSET); 531 } 532 533 static void dg1_irq_reset(struct xe_tile *tile) 534 { 535 if (tile->id == 0) 536 dg1_intr_disable(tile_to_xe(tile)); 537 538 gt_irq_reset(tile); 539 540 if (IS_SRIOV_VF(tile_to_xe(tile))) 541 return; 542 543 mask_and_disable(tile, PCU_IRQ_OFFSET); 544 } 545 546 static void dg1_irq_reset_mstr(struct xe_tile *tile) 547 { 548 struct xe_mmio *mmio = &tile->mmio; 549 550 xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0); 551 } 552 553 static void vf_irq_reset(struct xe_device *xe) 554 { 555 struct xe_tile *tile; 556 unsigned int id; 557 558 xe_assert(xe, IS_SRIOV_VF(xe)); 559 560 if (GRAPHICS_VERx100(xe) < 1210) 561 xelp_intr_disable(xe); 562 else 563 xe_assert(xe, xe_device_has_memirq(xe)); 564 565 for_each_tile(tile, xe, id) { 566 if (xe_device_has_memirq(xe)) 567 xe_memirq_reset(&tile->memirq); 568 else 569 gt_irq_reset(tile); 570 } 571 } 572 573 static void xe_irq_reset(struct xe_device *xe) 574 { 575 struct xe_tile *tile; 576 u8 id; 577 578 if (IS_SRIOV_VF(xe)) 579 return vf_irq_reset(xe); 580 581 if (xe_device_uses_memirq(xe)) { 582 for_each_tile(tile, xe, id) 583 xe_memirq_reset(&tile->memirq); 584 } 585 586 for_each_tile(tile, xe, id) { 587 if (GRAPHICS_VERx100(xe) >= 1210) 588 dg1_irq_reset(tile); 589 else 590 xelp_irq_reset(tile); 591 } 592 593 tile = xe_device_get_root_tile(xe); 594 mask_and_disable(tile, GU_MISC_IRQ_OFFSET); 595 xe_display_irq_reset(xe); 596 597 /* 598 * The tile's top-level status register should be the last one 599 * to be reset to avoid possible bit re-latching from lower 600 * level interrupts. 601 */ 602 if (GRAPHICS_VERx100(xe) >= 1210) { 603 for_each_tile(tile, xe, id) 604 dg1_irq_reset_mstr(tile); 605 } 606 } 607 608 static void vf_irq_postinstall(struct xe_device *xe) 609 { 610 struct xe_tile *tile; 611 unsigned int id; 612 613 for_each_tile(tile, xe, id) 614 if (xe_device_has_memirq(xe)) 615 xe_memirq_postinstall(&tile->memirq); 616 617 if (GRAPHICS_VERx100(xe) < 1210) 618 xelp_intr_enable(xe, true); 619 else 620 xe_assert(xe, xe_device_has_memirq(xe)); 621 } 622 623 static void xe_irq_postinstall(struct xe_device *xe) 624 { 625 if (IS_SRIOV_VF(xe)) 626 return vf_irq_postinstall(xe); 627 628 if (xe_device_uses_memirq(xe)) { 629 struct xe_tile *tile; 630 unsigned int id; 631 632 for_each_tile(tile, xe, id) 633 xe_memirq_postinstall(&tile->memirq); 634 } 635 636 xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe)); 637 638 /* 639 * ASLE backlight operations are reported via GUnit GSE interrupts 640 * on the root tile. 641 */ 642 unmask_and_enable(xe_device_get_root_tile(xe), 643 GU_MISC_IRQ_OFFSET, GU_MISC_GSE); 644 645 /* Enable top-level interrupts */ 646 if (GRAPHICS_VERx100(xe) >= 1210) 647 dg1_intr_enable(xe, true); 648 else 649 xelp_intr_enable(xe, true); 650 } 651 652 static irqreturn_t vf_mem_irq_handler(int irq, void *arg) 653 { 654 struct xe_device *xe = arg; 655 struct xe_tile *tile; 656 unsigned int id; 657 658 if (!atomic_read(&xe->irq.enabled)) 659 return IRQ_NONE; 660 661 for_each_tile(tile, xe, id) 662 xe_memirq_handler(&tile->memirq); 663 664 return IRQ_HANDLED; 665 } 666 667 static irq_handler_t xe_irq_handler(struct xe_device *xe) 668 { 669 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) 670 return vf_mem_irq_handler; 671 672 if (GRAPHICS_VERx100(xe) >= 1210) 673 return dg1_irq_handler; 674 else 675 return xelp_irq_handler; 676 } 677 678 static int xe_irq_msi_request_irqs(struct xe_device *xe) 679 { 680 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 681 irq_handler_t irq_handler; 682 int irq, err; 683 684 irq_handler = xe_irq_handler(xe); 685 if (!irq_handler) { 686 drm_err(&xe->drm, "No supported interrupt handler"); 687 return -EINVAL; 688 } 689 690 irq = pci_irq_vector(pdev, 0); 691 err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe); 692 if (err < 0) { 693 drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err); 694 return err; 695 } 696 697 return 0; 698 } 699 700 static void xe_irq_msi_free(struct xe_device *xe) 701 { 702 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 703 int irq; 704 705 irq = pci_irq_vector(pdev, 0); 706 free_irq(irq, xe); 707 } 708 709 static void irq_uninstall(void *arg) 710 { 711 struct xe_device *xe = arg; 712 713 if (!atomic_xchg(&xe->irq.enabled, 0)) 714 return; 715 716 xe_irq_reset(xe); 717 718 if (xe_device_has_msix(xe)) 719 xe_irq_msix_free(xe); 720 else 721 xe_irq_msi_free(xe); 722 } 723 724 int xe_irq_init(struct xe_device *xe) 725 { 726 spin_lock_init(&xe->irq.lock); 727 728 return xe_irq_msix_init(xe); 729 } 730 731 int xe_irq_install(struct xe_device *xe) 732 { 733 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 734 unsigned int irq_flags = PCI_IRQ_MSI; 735 int nvec = 1; 736 int err; 737 738 xe_irq_reset(xe); 739 740 if (xe_device_has_msix(xe)) { 741 nvec = xe->irq.msix.nvec; 742 irq_flags = PCI_IRQ_MSIX; 743 } 744 745 err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags); 746 if (err < 0) { 747 drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err); 748 return err; 749 } 750 751 err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) : 752 xe_irq_msi_request_irqs(xe); 753 if (err) 754 return err; 755 756 atomic_set(&xe->irq.enabled, 1); 757 758 xe_irq_postinstall(xe); 759 760 return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe); 761 } 762 763 static void xe_irq_msi_synchronize_irq(struct xe_device *xe) 764 { 765 synchronize_irq(to_pci_dev(xe->drm.dev)->irq); 766 } 767 768 void xe_irq_suspend(struct xe_device *xe) 769 { 770 atomic_set(&xe->irq.enabled, 0); /* no new irqs */ 771 772 /* flush irqs */ 773 if (xe_device_has_msix(xe)) 774 xe_irq_msix_synchronize_irq(xe); 775 else 776 xe_irq_msi_synchronize_irq(xe); 777 xe_irq_reset(xe); /* turn irqs off */ 778 } 779 780 void xe_irq_resume(struct xe_device *xe) 781 { 782 struct xe_gt *gt; 783 int id; 784 785 /* 786 * lock not needed: 787 * 1. no irq will arrive before the postinstall 788 * 2. display is not yet resumed 789 */ 790 atomic_set(&xe->irq.enabled, 1); 791 xe_irq_reset(xe); 792 xe_irq_postinstall(xe); /* turn irqs on */ 793 794 for_each_gt(gt, xe, id) 795 xe_irq_enable_hwe(gt); 796 } 797 798 /* MSI-X related definitions and functions below. */ 799 800 enum xe_irq_msix_static { 801 GUC2HOST_MSIX = 0, 802 DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX, 803 /* Must be last */ 804 NUM_OF_STATIC_MSIX, 805 }; 806 807 static int xe_irq_msix_init(struct xe_device *xe) 808 { 809 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 810 int nvec = pci_msix_vec_count(pdev); 811 812 if (nvec == -EINVAL) 813 return 0; /* MSI */ 814 815 if (nvec < 0) { 816 drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec); 817 return nvec; 818 } 819 820 xe->irq.msix.nvec = nvec; 821 xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC); 822 return 0; 823 } 824 825 static irqreturn_t guc2host_irq_handler(int irq, void *arg) 826 { 827 struct xe_device *xe = arg; 828 struct xe_tile *tile; 829 u8 id; 830 831 if (!atomic_read(&xe->irq.enabled)) 832 return IRQ_NONE; 833 834 for_each_tile(tile, xe, id) 835 xe_guc_irq_handler(&tile->primary_gt->uc.guc, 836 GUC_INTR_GUC2HOST); 837 838 return IRQ_HANDLED; 839 } 840 841 static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg) 842 { 843 unsigned int tile_id, gt_id; 844 struct xe_device *xe = arg; 845 struct xe_memirq *memirq; 846 struct xe_hw_engine *hwe; 847 enum xe_hw_engine_id id; 848 struct xe_tile *tile; 849 struct xe_gt *gt; 850 851 if (!atomic_read(&xe->irq.enabled)) 852 return IRQ_NONE; 853 854 for_each_tile(tile, xe, tile_id) { 855 memirq = &tile->memirq; 856 if (!memirq->bo) 857 continue; 858 859 for_each_gt(gt, xe, gt_id) { 860 if (gt->tile != tile) 861 continue; 862 863 for_each_hw_engine(hwe, gt, id) 864 xe_memirq_hwe_handler(memirq, hwe); 865 } 866 } 867 868 return IRQ_HANDLED; 869 } 870 871 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf, 872 bool dynamic_msix, u16 *msix) 873 { 874 struct xa_limit limit; 875 int ret; 876 u32 id; 877 878 limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) : 879 XA_LIMIT(*msix, *msix); 880 ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL); 881 if (ret) 882 return ret; 883 884 if (dynamic_msix) 885 *msix = id; 886 887 return 0; 888 } 889 890 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix) 891 { 892 xa_erase(&xe->irq.msix.indexes, msix); 893 } 894 895 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler, 896 void *irq_buf, const char *name, u16 msix) 897 { 898 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 899 int ret, irq; 900 901 irq = pci_irq_vector(pdev, msix); 902 if (irq < 0) 903 return irq; 904 905 ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf); 906 if (ret < 0) 907 return ret; 908 909 return 0; 910 } 911 912 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf, 913 const char *name, bool dynamic_msix, u16 *msix) 914 { 915 int ret; 916 917 ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix); 918 if (ret) 919 return ret; 920 921 ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix); 922 if (ret) { 923 drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix); 924 xe_irq_msix_release_vector(xe, *msix); 925 return ret; 926 } 927 928 return 0; 929 } 930 931 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix) 932 { 933 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 934 int irq; 935 void *irq_buf; 936 937 irq_buf = xa_load(&xe->irq.msix.indexes, msix); 938 if (!irq_buf) 939 return; 940 941 irq = pci_irq_vector(pdev, msix); 942 if (irq < 0) { 943 drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix); 944 return; 945 } 946 947 free_irq(irq, irq_buf); 948 xe_irq_msix_release_vector(xe, msix); 949 } 950 951 int xe_irq_msix_request_irqs(struct xe_device *xe) 952 { 953 int err; 954 u16 msix; 955 956 msix = GUC2HOST_MSIX; 957 err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe, 958 DRIVER_NAME "-guc2host", false, &msix); 959 if (err) 960 return err; 961 962 msix = DEFAULT_MSIX; 963 err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe, 964 DRIVER_NAME "-default-msix", false, &msix); 965 if (err) { 966 xe_irq_msix_free_irq(xe, GUC2HOST_MSIX); 967 return err; 968 } 969 970 return 0; 971 } 972 973 void xe_irq_msix_free(struct xe_device *xe) 974 { 975 unsigned long msix; 976 u32 *dummy; 977 978 xa_for_each(&xe->irq.msix.indexes, msix, dummy) 979 xe_irq_msix_free_irq(xe, msix); 980 xa_destroy(&xe->irq.msix.indexes); 981 } 982 983 void xe_irq_msix_synchronize_irq(struct xe_device *xe) 984 { 985 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 986 unsigned long msix; 987 u32 *dummy; 988 989 xa_for_each(&xe->irq.msix.indexes, msix, dummy) 990 synchronize_irq(pci_irq_vector(pdev, msix)); 991 } 992