1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 /** 30 * DOC: Interrupt Handling 31 * 32 * Interrupts generated within GPU hardware raise interrupt requests that are 33 * passed to amdgpu IRQ handler which is responsible for detecting source and 34 * type of the interrupt and dispatching matching handlers. If handling an 35 * interrupt requires calling kernel functions that may sleep processing is 36 * dispatched to work handlers. 37 * 38 * If MSI functionality is not disabled by module parameter then MSI 39 * support will be enabled. 40 * 41 * For GPU interrupt sources that may be driven by another driver, IRQ domain 42 * support is used (with mapping between virtual and hardware IRQs). 43 */ 44 45 #include <linux/irq.h> 46 #include <linux/pci.h> 47 48 #include <drm/drm_vblank.h> 49 #include <drm/amdgpu_drm.h> 50 #include <drm/drm_drv.h> 51 #include "amdgpu.h" 52 #include "amdgpu_ih.h" 53 #include "atom.h" 54 #include "amdgpu_connectors.h" 55 #include "amdgpu_trace.h" 56 #include "amdgpu_amdkfd.h" 57 #include "amdgpu_ras.h" 58 59 #include <linux/pm_runtime.h> 60 61 #ifdef CONFIG_DRM_AMD_DC 62 #include "amdgpu_dm_irq.h" 63 #endif 64 65 #define AMDGPU_WAIT_IDLE_TIMEOUT 200 66 67 const char *soc15_ih_clientid_name[] = { 68 "IH", 69 "SDMA2 or ACP", 70 "ATHUB", 71 "BIF", 72 "SDMA3 or DCE", 73 "SDMA4 or ISP", 74 "VMC1 or PCIE0", 75 "RLC", 76 "SDMA0", 77 "SDMA1", 78 "SE0SH", 79 "SE1SH", 80 "SE2SH", 81 "SE3SH", 82 "VCN1 or UVD1", 83 "THM", 84 "VCN or UVD", 85 "SDMA5 or VCE0", 86 "VMC", 87 "SDMA6 or XDMA", 88 "GRBM_CP", 89 "ATS", 90 "ROM_SMUIO", 91 "DF", 92 "SDMA7 or VCE1", 93 "PWR", 94 "reserved", 95 "UTCL2", 96 "EA", 97 "UTCL2LOG", 98 "MP0", 99 "MP1" 100 }; 101 102 const int node_id_to_phys_map[NODEID_MAX] = { 103 [AID0_NODEID] = 0, 104 [XCD0_NODEID] = 0, 105 [XCD1_NODEID] = 1, 106 [AID1_NODEID] = 1, 107 [XCD2_NODEID] = 2, 108 [XCD3_NODEID] = 3, 109 [AID2_NODEID] = 2, 110 [XCD4_NODEID] = 4, 111 [XCD5_NODEID] = 5, 112 [AID3_NODEID] = 3, 113 [XCD6_NODEID] = 6, 114 [XCD7_NODEID] = 7, 115 }; 116 117 /** 118 * amdgpu_irq_disable_all - disable *all* interrupts 119 * 120 * @adev: amdgpu device pointer 121 * 122 * Disable all types of interrupts from all sources. 123 */ 124 void amdgpu_irq_disable_all(struct amdgpu_device *adev) 125 { 126 unsigned long irqflags; 127 unsigned int i, j, k; 128 int r; 129 130 spin_lock_irqsave(&adev->irq.lock, irqflags); 131 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { 132 if (!adev->irq.client[i].sources) 133 continue; 134 135 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 136 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 137 138 if (!src || !src->funcs->set || !src->num_types) 139 continue; 140 141 for (k = 0; k < src->num_types; ++k) { 142 r = src->funcs->set(adev, src, k, 143 AMDGPU_IRQ_STATE_DISABLE); 144 if (r) 145 dev_err(adev->dev, 146 "error disabling interrupt (%d)\n", 147 r); 148 } 149 } 150 } 151 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 152 } 153 154 /** 155 * amdgpu_irq_handler - IRQ handler 156 * 157 * @irq: IRQ number (unused) 158 * @arg: pointer to DRM device 159 * 160 * IRQ handler for amdgpu driver (all ASICs). 161 * 162 * Returns: 163 * result of handling the IRQ, as defined by &irqreturn_t 164 */ 165 static irqreturn_t amdgpu_irq_handler(int irq, void *arg) 166 { 167 struct drm_device *dev = (struct drm_device *) arg; 168 struct amdgpu_device *adev = drm_to_adev(dev); 169 irqreturn_t ret; 170 171 ret = amdgpu_ih_process(adev, &adev->irq.ih); 172 if (ret == IRQ_HANDLED) 173 pm_runtime_mark_last_busy(dev->dev); 174 175 amdgpu_ras_interrupt_fatal_error_handler(adev); 176 177 return ret; 178 } 179 180 /** 181 * amdgpu_irq_handle_ih1 - kick of processing for IH1 182 * 183 * @work: work structure in struct amdgpu_irq 184 * 185 * Kick of processing IH ring 1. 186 */ 187 static void amdgpu_irq_handle_ih1(struct work_struct *work) 188 { 189 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 190 irq.ih1_work); 191 192 amdgpu_ih_process(adev, &adev->irq.ih1); 193 } 194 195 /** 196 * amdgpu_irq_handle_ih2 - kick of processing for IH2 197 * 198 * @work: work structure in struct amdgpu_irq 199 * 200 * Kick of processing IH ring 2. 201 */ 202 static void amdgpu_irq_handle_ih2(struct work_struct *work) 203 { 204 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 205 irq.ih2_work); 206 207 amdgpu_ih_process(adev, &adev->irq.ih2); 208 } 209 210 /** 211 * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft 212 * 213 * @work: work structure in struct amdgpu_irq 214 * 215 * Kick of processing IH soft ring. 216 */ 217 static void amdgpu_irq_handle_ih_soft(struct work_struct *work) 218 { 219 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 220 irq.ih_soft_work); 221 222 amdgpu_ih_process(adev, &adev->irq.ih_soft); 223 } 224 225 /** 226 * amdgpu_msi_ok - check whether MSI functionality is enabled 227 * 228 * @adev: amdgpu device pointer (unused) 229 * 230 * Checks whether MSI functionality has been disabled via module parameter 231 * (all ASICs). 232 * 233 * Returns: 234 * *true* if MSIs are allowed to be enabled or *false* otherwise 235 */ 236 static bool amdgpu_msi_ok(struct amdgpu_device *adev) 237 { 238 if (amdgpu_msi == 1) 239 return true; 240 else if (amdgpu_msi == 0) 241 return false; 242 243 return true; 244 } 245 246 void amdgpu_restore_msix(struct amdgpu_device *adev) 247 { 248 u16 ctrl; 249 250 pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 251 if (!(ctrl & PCI_MSIX_FLAGS_ENABLE)) 252 return; 253 254 /* VF FLR */ 255 ctrl &= ~PCI_MSIX_FLAGS_ENABLE; 256 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); 257 ctrl |= PCI_MSIX_FLAGS_ENABLE; 258 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); 259 } 260 261 /** 262 * amdgpu_irq_init - initialize interrupt handling 263 * 264 * @adev: amdgpu device pointer 265 * 266 * Sets up work functions for hotplug and reset interrupts, enables MSI 267 * functionality, initializes vblank, hotplug and reset interrupt handling. 268 * 269 * Returns: 270 * 0 on success or error code on failure 271 */ 272 int amdgpu_irq_init(struct amdgpu_device *adev) 273 { 274 unsigned int irq, flags; 275 int r; 276 277 spin_lock_init(&adev->irq.lock); 278 279 /* Enable MSI if not disabled by module parameter */ 280 adev->irq.msi_enabled = false; 281 282 if (!amdgpu_msi_ok(adev)) 283 flags = PCI_IRQ_INTX; 284 else 285 flags = PCI_IRQ_ALL_TYPES; 286 287 /* we only need one vector */ 288 r = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); 289 if (r < 0) { 290 dev_err(adev->dev, "Failed to alloc msi vectors\n"); 291 return r; 292 } 293 294 if (amdgpu_msi_ok(adev)) { 295 adev->irq.msi_enabled = true; 296 dev_dbg(adev->dev, "using MSI/MSI-X.\n"); 297 } 298 299 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); 300 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); 301 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft); 302 303 /* Use vector 0 for MSI-X. */ 304 r = pci_irq_vector(adev->pdev, 0); 305 if (r < 0) 306 goto free_vectors; 307 irq = r; 308 309 /* PCI devices require shared interrupts. */ 310 r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name, 311 adev_to_drm(adev)); 312 if (r) 313 goto free_vectors; 314 315 adev->irq.installed = true; 316 adev->irq.irq = irq; 317 adev_to_drm(adev)->max_vblank_count = 0x00ffffff; 318 319 dev_dbg(adev->dev, "amdgpu: irq initialized.\n"); 320 return 0; 321 322 free_vectors: 323 if (adev->irq.msi_enabled) 324 pci_free_irq_vectors(adev->pdev); 325 326 adev->irq.msi_enabled = false; 327 return r; 328 } 329 330 void amdgpu_irq_fini_hw(struct amdgpu_device *adev) 331 { 332 if (adev->irq.installed) { 333 free_irq(adev->irq.irq, adev_to_drm(adev)); 334 adev->irq.installed = false; 335 if (adev->irq.msi_enabled) 336 pci_free_irq_vectors(adev->pdev); 337 } 338 339 amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); 340 amdgpu_ih_ring_fini(adev, &adev->irq.ih); 341 amdgpu_ih_ring_fini(adev, &adev->irq.ih1); 342 amdgpu_ih_ring_fini(adev, &adev->irq.ih2); 343 } 344 345 /** 346 * amdgpu_irq_fini_sw - shut down interrupt handling 347 * 348 * @adev: amdgpu device pointer 349 * 350 * Tears down work functions for hotplug and reset interrupts, disables MSI 351 * functionality, shuts down vblank, hotplug and reset interrupt handling, 352 * turns off interrupts from all sources (all ASICs). 353 */ 354 void amdgpu_irq_fini_sw(struct amdgpu_device *adev) 355 { 356 unsigned int i, j; 357 358 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { 359 if (!adev->irq.client[i].sources) 360 continue; 361 362 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 363 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 364 365 if (!src) 366 continue; 367 368 kfree(src->enabled_types); 369 src->enabled_types = NULL; 370 } 371 kfree(adev->irq.client[i].sources); 372 adev->irq.client[i].sources = NULL; 373 } 374 } 375 376 /** 377 * amdgpu_irq_add_id - register IRQ source 378 * 379 * @adev: amdgpu device pointer 380 * @client_id: client id 381 * @src_id: source id 382 * @source: IRQ source pointer 383 * 384 * Registers IRQ source on a client. 385 * 386 * Returns: 387 * 0 on success or error code otherwise 388 */ 389 int amdgpu_irq_add_id(struct amdgpu_device *adev, 390 unsigned int client_id, unsigned int src_id, 391 struct amdgpu_irq_src *source) 392 { 393 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) 394 return -EINVAL; 395 396 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) 397 return -EINVAL; 398 399 if (!source->funcs) 400 return -EINVAL; 401 402 if (!adev->irq.client[client_id].sources) { 403 adev->irq.client[client_id].sources = 404 kcalloc(AMDGPU_MAX_IRQ_SRC_ID, 405 sizeof(struct amdgpu_irq_src *), 406 GFP_KERNEL); 407 if (!adev->irq.client[client_id].sources) 408 return -ENOMEM; 409 } 410 411 if (adev->irq.client[client_id].sources[src_id] != NULL) 412 return -EINVAL; 413 414 if (source->num_types && !source->enabled_types) { 415 atomic_t *types; 416 417 types = kcalloc(source->num_types, sizeof(atomic_t), 418 GFP_KERNEL); 419 if (!types) 420 return -ENOMEM; 421 422 source->enabled_types = types; 423 } 424 425 adev->irq.client[client_id].sources[src_id] = source; 426 return 0; 427 } 428 429 /** 430 * amdgpu_irq_dispatch - dispatch IRQ to IP blocks 431 * 432 * @adev: amdgpu device pointer 433 * @ih: interrupt ring instance 434 * 435 * Dispatches IRQ to IP blocks. 436 */ 437 void amdgpu_irq_dispatch(struct amdgpu_device *adev, 438 struct amdgpu_ih_ring *ih) 439 { 440 u32 ring_index = ih->rptr >> 2; 441 struct amdgpu_iv_entry entry; 442 unsigned int client_id, src_id; 443 struct amdgpu_irq_src *src; 444 bool handled = false; 445 int r; 446 447 entry.ih = ih; 448 entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; 449 450 /* 451 * timestamp is not supported on some legacy SOCs (cik, cz, iceland, 452 * si and tonga), so initialize timestamp and timestamp_src to 0 453 */ 454 entry.timestamp = 0; 455 entry.timestamp_src = 0; 456 457 amdgpu_ih_decode_iv(adev, &entry); 458 459 trace_amdgpu_iv(ih - &adev->irq.ih, &entry); 460 461 client_id = entry.client_id; 462 src_id = entry.src_id; 463 464 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { 465 dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id); 466 467 } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { 468 dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id); 469 470 } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) || 471 (client_id == SOC15_IH_CLIENTID_ISP)) && 472 adev->irq.virq[src_id]) { 473 generic_handle_domain_irq(adev->irq.domain, src_id); 474 475 } else if (!adev->irq.client[client_id].sources) { 476 dev_dbg(adev->dev, 477 "Unregistered interrupt client_id: %d src_id: %d\n", 478 client_id, src_id); 479 480 } else if ((src = adev->irq.client[client_id].sources[src_id])) { 481 r = src->funcs->process(adev, src, &entry); 482 if (r < 0) 483 dev_err(adev->dev, "error processing interrupt (%d)\n", 484 r); 485 else if (r) 486 handled = true; 487 488 } else { 489 dev_dbg(adev->dev, 490 "Unregistered interrupt src_id: %d of client_id:%d\n", 491 src_id, client_id); 492 } 493 494 /* Send it to amdkfd as well if it isn't already handled */ 495 if (!handled) 496 amdgpu_amdkfd_interrupt(adev, entry.iv_entry); 497 498 if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp)) 499 ih->processed_timestamp = entry.timestamp; 500 } 501 502 /** 503 * amdgpu_irq_delegate - delegate IV to soft IH ring 504 * 505 * @adev: amdgpu device pointer 506 * @entry: IV entry 507 * @num_dw: size of IV 508 * 509 * Delegate the IV to the soft IH ring and schedule processing of it. Used 510 * if the hardware delegation to IH1 or IH2 doesn't work for some reason. 511 */ 512 void amdgpu_irq_delegate(struct amdgpu_device *adev, 513 struct amdgpu_iv_entry *entry, 514 unsigned int num_dw) 515 { 516 amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw); 517 schedule_work(&adev->irq.ih_soft_work); 518 } 519 520 /** 521 * amdgpu_irq_update - update hardware interrupt state 522 * 523 * @adev: amdgpu device pointer 524 * @src: interrupt source pointer 525 * @type: type of interrupt 526 * 527 * Updates interrupt state for the specific source (all ASICs). 528 */ 529 int amdgpu_irq_update(struct amdgpu_device *adev, 530 struct amdgpu_irq_src *src, unsigned int type) 531 { 532 unsigned long irqflags; 533 enum amdgpu_interrupt_state state; 534 int r; 535 536 spin_lock_irqsave(&adev->irq.lock, irqflags); 537 538 /* We need to determine after taking the lock, otherwise 539 * we might disable just enabled interrupts again 540 */ 541 if (amdgpu_irq_enabled(adev, src, type)) 542 state = AMDGPU_IRQ_STATE_ENABLE; 543 else 544 state = AMDGPU_IRQ_STATE_DISABLE; 545 546 r = src->funcs->set(adev, src, type, state); 547 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 548 return r; 549 } 550 551 /** 552 * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources 553 * 554 * @adev: amdgpu device pointer 555 * 556 * Updates state of all types of interrupts on all sources on resume after 557 * reset. 558 */ 559 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) 560 { 561 int i, j, k; 562 563 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 564 amdgpu_restore_msix(adev); 565 566 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { 567 if (!adev->irq.client[i].sources) 568 continue; 569 570 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 571 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 572 573 if (!src || !src->funcs || !src->funcs->set) 574 continue; 575 for (k = 0; k < src->num_types; k++) 576 amdgpu_irq_update(adev, src, k); 577 } 578 } 579 } 580 581 /** 582 * amdgpu_irq_get - enable interrupt 583 * 584 * @adev: amdgpu device pointer 585 * @src: interrupt source pointer 586 * @type: type of interrupt 587 * 588 * Enables specified type of interrupt on the specified source (all ASICs). 589 * 590 * Returns: 591 * 0 on success or error code otherwise 592 */ 593 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 594 unsigned int type) 595 { 596 if (!adev->irq.installed) 597 return -ENOENT; 598 599 if (type >= src->num_types) 600 return -EINVAL; 601 602 if (!src->enabled_types || !src->funcs->set) 603 return -EINVAL; 604 605 if (atomic_inc_return(&src->enabled_types[type]) == 1) 606 return amdgpu_irq_update(adev, src, type); 607 608 return 0; 609 } 610 611 /** 612 * amdgpu_irq_put - disable interrupt 613 * 614 * @adev: amdgpu device pointer 615 * @src: interrupt source pointer 616 * @type: type of interrupt 617 * 618 * Enables specified type of interrupt on the specified source (all ASICs). 619 * 620 * Returns: 621 * 0 on success or error code otherwise 622 */ 623 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 624 unsigned int type) 625 { 626 /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */ 627 if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type)) 628 return -EINVAL; 629 630 if (!adev->irq.installed) 631 return -ENOENT; 632 633 if (type >= src->num_types) 634 return -EINVAL; 635 636 if (!src->enabled_types || !src->funcs->set) 637 return -EINVAL; 638 639 if (WARN_ON(!amdgpu_irq_enabled(adev, src, type))) 640 return -EINVAL; 641 642 if (atomic_dec_and_test(&src->enabled_types[type])) 643 return amdgpu_irq_update(adev, src, type); 644 645 return 0; 646 } 647 648 /** 649 * amdgpu_irq_enabled - check whether interrupt is enabled or not 650 * 651 * @adev: amdgpu device pointer 652 * @src: interrupt source pointer 653 * @type: type of interrupt 654 * 655 * Checks whether the given type of interrupt is enabled on the given source. 656 * 657 * Returns: 658 * *true* if interrupt is enabled, *false* if interrupt is disabled or on 659 * invalid parameters 660 */ 661 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 662 unsigned int type) 663 { 664 if (!adev->irq.installed) 665 return false; 666 667 if (type >= src->num_types) 668 return false; 669 670 if (!src->enabled_types || !src->funcs->set) 671 return false; 672 673 return !!atomic_read(&src->enabled_types[type]); 674 } 675 676 /* XXX: Generic IRQ handling */ 677 static void amdgpu_irq_mask(struct irq_data *irqd) 678 { 679 /* XXX */ 680 } 681 682 static void amdgpu_irq_unmask(struct irq_data *irqd) 683 { 684 /* XXX */ 685 } 686 687 /* amdgpu hardware interrupt chip descriptor */ 688 static struct irq_chip amdgpu_irq_chip = { 689 .name = "amdgpu-ih", 690 .irq_mask = amdgpu_irq_mask, 691 .irq_unmask = amdgpu_irq_unmask, 692 }; 693 694 /** 695 * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers 696 * 697 * @d: amdgpu IRQ domain pointer (unused) 698 * @irq: virtual IRQ number 699 * @hwirq: hardware irq number 700 * 701 * Current implementation assigns simple interrupt handler to the given virtual 702 * IRQ. 703 * 704 * Returns: 705 * 0 on success or error code otherwise 706 */ 707 static int amdgpu_irqdomain_map(struct irq_domain *d, 708 unsigned int irq, irq_hw_number_t hwirq) 709 { 710 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) 711 return -EPERM; 712 713 irq_set_chip_and_handler(irq, 714 &amdgpu_irq_chip, handle_simple_irq); 715 return 0; 716 } 717 718 /* Implementation of methods for amdgpu IRQ domain */ 719 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { 720 .map = amdgpu_irqdomain_map, 721 }; 722 723 /** 724 * amdgpu_irq_add_domain - create a linear IRQ domain 725 * 726 * @adev: amdgpu device pointer 727 * 728 * Creates an IRQ domain for GPU interrupt sources 729 * that may be driven by another driver (e.g., ACP). 730 * 731 * Returns: 732 * 0 on success or error code otherwise 733 */ 734 int amdgpu_irq_add_domain(struct amdgpu_device *adev) 735 { 736 adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, 737 &amdgpu_hw_irqdomain_ops, adev); 738 if (!adev->irq.domain) { 739 dev_err(adev->dev, "GPU irq add domain failed\n"); 740 return -ENODEV; 741 } 742 743 return 0; 744 } 745 746 /** 747 * amdgpu_irq_remove_domain - remove the IRQ domain 748 * 749 * @adev: amdgpu device pointer 750 * 751 * Removes the IRQ domain for GPU interrupt sources 752 * that may be driven by another driver (e.g., ACP). 753 */ 754 void amdgpu_irq_remove_domain(struct amdgpu_device *adev) 755 { 756 if (adev->irq.domain) { 757 irq_domain_remove(adev->irq.domain); 758 adev->irq.domain = NULL; 759 } 760 } 761 762 /** 763 * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs 764 * 765 * @adev: amdgpu device pointer 766 * @src_id: IH source id 767 * 768 * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ 769 * Use this for components that generate a GPU interrupt, but are driven 770 * by a different driver (e.g., ACP). 771 * 772 * Returns: 773 * Linux IRQ 774 */ 775 unsigned int amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned int src_id) 776 { 777 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); 778 779 return adev->irq.virq[src_id]; 780 } 781