1 #define pr_fmt(fmt) "irq: " fmt 2 3 #include <linux/debugfs.h> 4 #include <linux/hardirq.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 #include <linux/irqdesc.h> 8 #include <linux/irqdomain.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/of.h> 12 #include <linux/of_address.h> 13 #include <linux/of_irq.h> 14 #include <linux/topology.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/smp.h> 18 #include <linux/fs.h> 19 20 static LIST_HEAD(irq_domain_list); 21 static DEFINE_MUTEX(irq_domain_mutex); 22 23 static DEFINE_MUTEX(revmap_trees_mutex); 24 static struct irq_domain *irq_default_domain; 25 26 static void irq_domain_check_hierarchy(struct irq_domain *domain); 27 28 struct irqchip_fwid { 29 struct fwnode_handle fwnode; 30 unsigned int type; 31 char *name; 32 void *data; 33 }; 34 35 /** 36 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for 37 * identifying an irq domain 38 * @type: Type of irqchip_fwnode. See linux/irqdomain.h 39 * @name: Optional user provided domain name 40 * @id: Optional user provided id if name != NULL 41 * @data: Optional user-provided data 42 * 43 * Allocate a struct irqchip_fwid, and return a poiner to the embedded 44 * fwnode_handle (or NULL on failure). 45 * 46 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are 47 * solely to transport name information to irqdomain creation code. The 48 * node is not stored. For other types the pointer is kept in the irq 49 * domain struct. 50 */ 51 struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, 52 const char *name, void *data) 53 { 54 struct irqchip_fwid *fwid; 55 char *n; 56 57 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); 58 59 switch (type) { 60 case IRQCHIP_FWNODE_NAMED: 61 n = kasprintf(GFP_KERNEL, "%s", name); 62 break; 63 case IRQCHIP_FWNODE_NAMED_ID: 64 n = kasprintf(GFP_KERNEL, "%s-%d", name, id); 65 break; 66 default: 67 n = kasprintf(GFP_KERNEL, "irqchip@%p", data); 68 break; 69 } 70 71 if (!fwid || !n) { 72 kfree(fwid); 73 kfree(n); 74 return NULL; 75 } 76 77 fwid->type = type; 78 fwid->name = n; 79 fwid->data = data; 80 fwid->fwnode.type = FWNODE_IRQCHIP; 81 return &fwid->fwnode; 82 } 83 EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode); 84 85 /** 86 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle 87 * 88 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. 89 */ 90 void irq_domain_free_fwnode(struct fwnode_handle *fwnode) 91 { 92 struct irqchip_fwid *fwid; 93 94 if (WARN_ON(!is_fwnode_irqchip(fwnode))) 95 return; 96 97 fwid = container_of(fwnode, struct irqchip_fwid, fwnode); 98 kfree(fwid->name); 99 kfree(fwid); 100 } 101 EXPORT_SYMBOL_GPL(irq_domain_free_fwnode); 102 103 /** 104 * __irq_domain_add() - Allocate a new irq_domain data structure 105 * @fwnode: firmware node for the interrupt controller 106 * @size: Size of linear map; 0 for radix mapping only 107 * @hwirq_max: Maximum number of interrupts supported by controller 108 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 109 * direct mapping 110 * @ops: domain callbacks 111 * @host_data: Controller private data pointer 112 * 113 * Allocates and initialize and irq_domain structure. 114 * Returns pointer to IRQ domain, or NULL on failure. 115 */ 116 struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, 117 irq_hw_number_t hwirq_max, int direct_max, 118 const struct irq_domain_ops *ops, 119 void *host_data) 120 { 121 struct device_node *of_node = to_of_node(fwnode); 122 struct irqchip_fwid *fwid; 123 struct irq_domain *domain; 124 125 static atomic_t unknown_domains; 126 127 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 128 GFP_KERNEL, of_node_to_nid(of_node)); 129 if (WARN_ON(!domain)) 130 return NULL; 131 132 if (fwnode && is_fwnode_irqchip(fwnode)) { 133 fwid = container_of(fwnode, struct irqchip_fwid, fwnode); 134 135 switch (fwid->type) { 136 case IRQCHIP_FWNODE_NAMED: 137 case IRQCHIP_FWNODE_NAMED_ID: 138 domain->name = kstrdup(fwid->name, GFP_KERNEL); 139 if (!domain->name) { 140 kfree(domain); 141 return NULL; 142 } 143 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; 144 break; 145 default: 146 domain->fwnode = fwnode; 147 domain->name = fwid->name; 148 break; 149 } 150 } else if (of_node) { 151 char *name; 152 153 /* 154 * DT paths contain '/', which debugfs is legitimately 155 * unhappy about. Replace them with ':', which does 156 * the trick and is not as offensive as '\'... 157 */ 158 name = kstrdup(of_node_full_name(of_node), GFP_KERNEL); 159 if (!name) { 160 kfree(domain); 161 return NULL; 162 } 163 164 strreplace(name, '/', ':'); 165 166 domain->name = name; 167 domain->fwnode = fwnode; 168 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; 169 } 170 171 if (!domain->name) { 172 if (fwnode) { 173 pr_err("Invalid fwnode type (%d) for irqdomain\n", 174 fwnode->type); 175 } 176 domain->name = kasprintf(GFP_KERNEL, "unknown-%d", 177 atomic_inc_return(&unknown_domains)); 178 if (!domain->name) { 179 kfree(domain); 180 return NULL; 181 } 182 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; 183 } 184 185 of_node_get(of_node); 186 187 /* Fill structure */ 188 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 189 domain->ops = ops; 190 domain->host_data = host_data; 191 domain->hwirq_max = hwirq_max; 192 domain->revmap_size = size; 193 domain->revmap_direct_max_irq = direct_max; 194 irq_domain_check_hierarchy(domain); 195 196 mutex_lock(&irq_domain_mutex); 197 list_add(&domain->link, &irq_domain_list); 198 mutex_unlock(&irq_domain_mutex); 199 200 pr_debug("Added domain %s\n", domain->name); 201 return domain; 202 } 203 EXPORT_SYMBOL_GPL(__irq_domain_add); 204 205 /** 206 * irq_domain_remove() - Remove an irq domain. 207 * @domain: domain to remove 208 * 209 * This routine is used to remove an irq domain. The caller must ensure 210 * that all mappings within the domain have been disposed of prior to 211 * use, depending on the revmap type. 212 */ 213 void irq_domain_remove(struct irq_domain *domain) 214 { 215 mutex_lock(&irq_domain_mutex); 216 217 WARN_ON(!radix_tree_empty(&domain->revmap_tree)); 218 219 list_del(&domain->link); 220 221 /* 222 * If the going away domain is the default one, reset it. 223 */ 224 if (unlikely(irq_default_domain == domain)) 225 irq_set_default_host(NULL); 226 227 mutex_unlock(&irq_domain_mutex); 228 229 pr_debug("Removed domain %s\n", domain->name); 230 231 of_node_put(irq_domain_get_of_node(domain)); 232 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) 233 kfree(domain->name); 234 kfree(domain); 235 } 236 EXPORT_SYMBOL_GPL(irq_domain_remove); 237 238 /** 239 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs 240 * @of_node: pointer to interrupt controller's device tree node. 241 * @size: total number of irqs in mapping 242 * @first_irq: first number of irq block assigned to the domain, 243 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then 244 * pre-map all of the irqs in the domain to virqs starting at first_irq. 245 * @ops: domain callbacks 246 * @host_data: Controller private data pointer 247 * 248 * Allocates an irq_domain, and optionally if first_irq is positive then also 249 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. 250 * 251 * This is intended to implement the expected behaviour for most 252 * interrupt controllers. If device tree is used, then first_irq will be 0 and 253 * irqs get mapped dynamically on the fly. However, if the controller requires 254 * static virq assignments (non-DT boot) then it will set that up correctly. 255 */ 256 struct irq_domain *irq_domain_add_simple(struct device_node *of_node, 257 unsigned int size, 258 unsigned int first_irq, 259 const struct irq_domain_ops *ops, 260 void *host_data) 261 { 262 struct irq_domain *domain; 263 264 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); 265 if (!domain) 266 return NULL; 267 268 if (first_irq > 0) { 269 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { 270 /* attempt to allocated irq_descs */ 271 int rc = irq_alloc_descs(first_irq, first_irq, size, 272 of_node_to_nid(of_node)); 273 if (rc < 0) 274 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 275 first_irq); 276 } 277 irq_domain_associate_many(domain, first_irq, 0, size); 278 } 279 280 return domain; 281 } 282 EXPORT_SYMBOL_GPL(irq_domain_add_simple); 283 284 /** 285 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 286 * @of_node: pointer to interrupt controller's device tree node. 287 * @size: total number of irqs in legacy mapping 288 * @first_irq: first number of irq block assigned to the domain 289 * @first_hwirq: first hwirq number to use for the translation. Should normally 290 * be '0', but a positive integer can be used if the effective 291 * hwirqs numbering does not begin at zero. 292 * @ops: map/unmap domain callbacks 293 * @host_data: Controller private data pointer 294 * 295 * Note: the map() callback will be called before this function returns 296 * for all legacy interrupts except 0 (which is always the invalid irq for 297 * a legacy controller). 298 */ 299 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 300 unsigned int size, 301 unsigned int first_irq, 302 irq_hw_number_t first_hwirq, 303 const struct irq_domain_ops *ops, 304 void *host_data) 305 { 306 struct irq_domain *domain; 307 308 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size, 309 first_hwirq + size, 0, ops, host_data); 310 if (domain) 311 irq_domain_associate_many(domain, first_irq, first_hwirq, size); 312 313 return domain; 314 } 315 EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 316 317 /** 318 * irq_find_matching_fwspec() - Locates a domain for a given fwspec 319 * @fwspec: FW specifier for an interrupt 320 * @bus_token: domain-specific data 321 */ 322 struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, 323 enum irq_domain_bus_token bus_token) 324 { 325 struct irq_domain *h, *found = NULL; 326 struct fwnode_handle *fwnode = fwspec->fwnode; 327 int rc; 328 329 /* We might want to match the legacy controller last since 330 * it might potentially be set to match all interrupts in 331 * the absence of a device node. This isn't a problem so far 332 * yet though... 333 * 334 * bus_token == DOMAIN_BUS_ANY matches any domain, any other 335 * values must generate an exact match for the domain to be 336 * selected. 337 */ 338 mutex_lock(&irq_domain_mutex); 339 list_for_each_entry(h, &irq_domain_list, link) { 340 if (h->ops->select && fwspec->param_count) 341 rc = h->ops->select(h, fwspec, bus_token); 342 else if (h->ops->match) 343 rc = h->ops->match(h, to_of_node(fwnode), bus_token); 344 else 345 rc = ((fwnode != NULL) && (h->fwnode == fwnode) && 346 ((bus_token == DOMAIN_BUS_ANY) || 347 (h->bus_token == bus_token))); 348 349 if (rc) { 350 found = h; 351 break; 352 } 353 } 354 mutex_unlock(&irq_domain_mutex); 355 return found; 356 } 357 EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); 358 359 /** 360 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement 361 * IRQ remapping 362 * 363 * Return: false if any MSI irq domain does not support IRQ remapping, 364 * true otherwise (including if there is no MSI irq domain) 365 */ 366 bool irq_domain_check_msi_remap(void) 367 { 368 struct irq_domain *h; 369 bool ret = true; 370 371 mutex_lock(&irq_domain_mutex); 372 list_for_each_entry(h, &irq_domain_list, link) { 373 if (irq_domain_is_msi(h) && 374 !irq_domain_hierarchical_is_msi_remap(h)) { 375 ret = false; 376 break; 377 } 378 } 379 mutex_unlock(&irq_domain_mutex); 380 return ret; 381 } 382 EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap); 383 384 /** 385 * irq_set_default_host() - Set a "default" irq domain 386 * @domain: default domain pointer 387 * 388 * For convenience, it's possible to set a "default" domain that will be used 389 * whenever NULL is passed to irq_create_mapping(). It makes life easier for 390 * platforms that want to manipulate a few hard coded interrupt numbers that 391 * aren't properly represented in the device-tree. 392 */ 393 void irq_set_default_host(struct irq_domain *domain) 394 { 395 pr_debug("Default domain set to @0x%p\n", domain); 396 397 irq_default_domain = domain; 398 } 399 EXPORT_SYMBOL_GPL(irq_set_default_host); 400 401 void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 402 { 403 struct irq_data *irq_data = irq_get_irq_data(irq); 404 irq_hw_number_t hwirq; 405 406 if (WARN(!irq_data || irq_data->domain != domain, 407 "virq%i doesn't exist; cannot disassociate\n", irq)) 408 return; 409 410 hwirq = irq_data->hwirq; 411 irq_set_status_flags(irq, IRQ_NOREQUEST); 412 413 /* remove chip and handler */ 414 irq_set_chip_and_handler(irq, NULL, NULL); 415 416 /* Make sure it's completed */ 417 synchronize_irq(irq); 418 419 /* Tell the PIC about it */ 420 if (domain->ops->unmap) 421 domain->ops->unmap(domain, irq); 422 smp_mb(); 423 424 irq_data->domain = NULL; 425 irq_data->hwirq = 0; 426 domain->mapcount--; 427 428 /* Clear reverse map for this hwirq */ 429 if (hwirq < domain->revmap_size) { 430 domain->linear_revmap[hwirq] = 0; 431 } else { 432 mutex_lock(&revmap_trees_mutex); 433 radix_tree_delete(&domain->revmap_tree, hwirq); 434 mutex_unlock(&revmap_trees_mutex); 435 } 436 } 437 438 int irq_domain_associate(struct irq_domain *domain, unsigned int virq, 439 irq_hw_number_t hwirq) 440 { 441 struct irq_data *irq_data = irq_get_irq_data(virq); 442 int ret; 443 444 if (WARN(hwirq >= domain->hwirq_max, 445 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) 446 return -EINVAL; 447 if (WARN(!irq_data, "error: virq%i is not allocated", virq)) 448 return -EINVAL; 449 if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) 450 return -EINVAL; 451 452 mutex_lock(&irq_domain_mutex); 453 irq_data->hwirq = hwirq; 454 irq_data->domain = domain; 455 if (domain->ops->map) { 456 ret = domain->ops->map(domain, virq, hwirq); 457 if (ret != 0) { 458 /* 459 * If map() returns -EPERM, this interrupt is protected 460 * by the firmware or some other service and shall not 461 * be mapped. Don't bother telling the user about it. 462 */ 463 if (ret != -EPERM) { 464 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", 465 domain->name, hwirq, virq, ret); 466 } 467 irq_data->domain = NULL; 468 irq_data->hwirq = 0; 469 mutex_unlock(&irq_domain_mutex); 470 return ret; 471 } 472 473 /* If not already assigned, give the domain the chip's name */ 474 if (!domain->name && irq_data->chip) 475 domain->name = irq_data->chip->name; 476 } 477 478 domain->mapcount++; 479 if (hwirq < domain->revmap_size) { 480 domain->linear_revmap[hwirq] = virq; 481 } else { 482 mutex_lock(&revmap_trees_mutex); 483 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 484 mutex_unlock(&revmap_trees_mutex); 485 } 486 mutex_unlock(&irq_domain_mutex); 487 488 irq_clear_status_flags(virq, IRQ_NOREQUEST); 489 490 return 0; 491 } 492 EXPORT_SYMBOL_GPL(irq_domain_associate); 493 494 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, 495 irq_hw_number_t hwirq_base, int count) 496 { 497 struct device_node *of_node; 498 int i; 499 500 of_node = irq_domain_get_of_node(domain); 501 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, 502 of_node_full_name(of_node), irq_base, (int)hwirq_base, count); 503 504 for (i = 0; i < count; i++) { 505 irq_domain_associate(domain, irq_base + i, hwirq_base + i); 506 } 507 } 508 EXPORT_SYMBOL_GPL(irq_domain_associate_many); 509 510 /** 511 * irq_create_direct_mapping() - Allocate an irq for direct mapping 512 * @domain: domain to allocate the irq for or NULL for default domain 513 * 514 * This routine is used for irq controllers which can choose the hardware 515 * interrupt numbers they generate. In such a case it's simplest to use 516 * the linux irq as the hardware interrupt number. It still uses the linear 517 * or radix tree to store the mapping, but the irq controller can optimize 518 * the revmap path by using the hwirq directly. 519 */ 520 unsigned int irq_create_direct_mapping(struct irq_domain *domain) 521 { 522 struct device_node *of_node; 523 unsigned int virq; 524 525 if (domain == NULL) 526 domain = irq_default_domain; 527 528 of_node = irq_domain_get_of_node(domain); 529 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); 530 if (!virq) { 531 pr_debug("create_direct virq allocation failed\n"); 532 return 0; 533 } 534 if (virq >= domain->revmap_direct_max_irq) { 535 pr_err("ERROR: no free irqs available below %i maximum\n", 536 domain->revmap_direct_max_irq); 537 irq_free_desc(virq); 538 return 0; 539 } 540 pr_debug("create_direct obtained virq %d\n", virq); 541 542 if (irq_domain_associate(domain, virq, virq)) { 543 irq_free_desc(virq); 544 return 0; 545 } 546 547 return virq; 548 } 549 EXPORT_SYMBOL_GPL(irq_create_direct_mapping); 550 551 /** 552 * irq_create_mapping() - Map a hardware interrupt into linux irq space 553 * @domain: domain owning this hardware interrupt or NULL for default domain 554 * @hwirq: hardware irq number in that domain space 555 * 556 * Only one mapping per hardware interrupt is permitted. Returns a linux 557 * irq number. 558 * If the sense/trigger is to be specified, set_irq_type() should be called 559 * on the number returned from that call. 560 */ 561 unsigned int irq_create_mapping(struct irq_domain *domain, 562 irq_hw_number_t hwirq) 563 { 564 struct device_node *of_node; 565 int virq; 566 567 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 568 569 /* Look for default domain if nececssary */ 570 if (domain == NULL) 571 domain = irq_default_domain; 572 if (domain == NULL) { 573 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); 574 return 0; 575 } 576 pr_debug("-> using domain @%p\n", domain); 577 578 of_node = irq_domain_get_of_node(domain); 579 580 /* Check if mapping already exists */ 581 virq = irq_find_mapping(domain, hwirq); 582 if (virq) { 583 pr_debug("-> existing mapping on virq %d\n", virq); 584 return virq; 585 } 586 587 /* Allocate a virtual interrupt number */ 588 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL); 589 if (virq <= 0) { 590 pr_debug("-> virq allocation failed\n"); 591 return 0; 592 } 593 594 if (irq_domain_associate(domain, virq, hwirq)) { 595 irq_free_desc(virq); 596 return 0; 597 } 598 599 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", 600 hwirq, of_node_full_name(of_node), virq); 601 602 return virq; 603 } 604 EXPORT_SYMBOL_GPL(irq_create_mapping); 605 606 /** 607 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs 608 * @domain: domain owning the interrupt range 609 * @irq_base: beginning of linux IRQ range 610 * @hwirq_base: beginning of hardware IRQ range 611 * @count: Number of interrupts to map 612 * 613 * This routine is used for allocating and mapping a range of hardware 614 * irqs to linux irqs where the linux irq numbers are at pre-defined 615 * locations. For use by controllers that already have static mappings 616 * to insert in to the domain. 617 * 618 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time 619 * domain insertion. 620 * 621 * 0 is returned upon success, while any failure to establish a static 622 * mapping is treated as an error. 623 */ 624 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, 625 irq_hw_number_t hwirq_base, int count) 626 { 627 struct device_node *of_node; 628 int ret; 629 630 of_node = irq_domain_get_of_node(domain); 631 ret = irq_alloc_descs(irq_base, irq_base, count, 632 of_node_to_nid(of_node)); 633 if (unlikely(ret < 0)) 634 return ret; 635 636 irq_domain_associate_many(domain, irq_base, hwirq_base, count); 637 return 0; 638 } 639 EXPORT_SYMBOL_GPL(irq_create_strict_mappings); 640 641 static int irq_domain_translate(struct irq_domain *d, 642 struct irq_fwspec *fwspec, 643 irq_hw_number_t *hwirq, unsigned int *type) 644 { 645 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 646 if (d->ops->translate) 647 return d->ops->translate(d, fwspec, hwirq, type); 648 #endif 649 if (d->ops->xlate) 650 return d->ops->xlate(d, to_of_node(fwspec->fwnode), 651 fwspec->param, fwspec->param_count, 652 hwirq, type); 653 654 /* If domain has no translation, then we assume interrupt line */ 655 *hwirq = fwspec->param[0]; 656 return 0; 657 } 658 659 static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data, 660 struct irq_fwspec *fwspec) 661 { 662 int i; 663 664 fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL; 665 fwspec->param_count = irq_data->args_count; 666 667 for (i = 0; i < irq_data->args_count; i++) 668 fwspec->param[i] = irq_data->args[i]; 669 } 670 671 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) 672 { 673 struct irq_domain *domain; 674 struct irq_data *irq_data; 675 irq_hw_number_t hwirq; 676 unsigned int type = IRQ_TYPE_NONE; 677 int virq; 678 679 if (fwspec->fwnode) { 680 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED); 681 if (!domain) 682 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY); 683 } else { 684 domain = irq_default_domain; 685 } 686 687 if (!domain) { 688 pr_warn("no irq domain found for %s !\n", 689 of_node_full_name(to_of_node(fwspec->fwnode))); 690 return 0; 691 } 692 693 if (irq_domain_translate(domain, fwspec, &hwirq, &type)) 694 return 0; 695 696 /* 697 * WARN if the irqchip returns a type with bits 698 * outside the sense mask set and clear these bits. 699 */ 700 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) 701 type &= IRQ_TYPE_SENSE_MASK; 702 703 /* 704 * If we've already configured this interrupt, 705 * don't do it again, or hell will break loose. 706 */ 707 virq = irq_find_mapping(domain, hwirq); 708 if (virq) { 709 /* 710 * If the trigger type is not specified or matches the 711 * current trigger type then we are done so return the 712 * interrupt number. 713 */ 714 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) 715 return virq; 716 717 /* 718 * If the trigger type has not been set yet, then set 719 * it now and return the interrupt number. 720 */ 721 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { 722 irq_data = irq_get_irq_data(virq); 723 if (!irq_data) 724 return 0; 725 726 irqd_set_trigger_type(irq_data, type); 727 return virq; 728 } 729 730 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", 731 hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); 732 return 0; 733 } 734 735 if (irq_domain_is_hierarchy(domain)) { 736 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); 737 if (virq <= 0) 738 return 0; 739 } else { 740 /* Create mapping */ 741 virq = irq_create_mapping(domain, hwirq); 742 if (!virq) 743 return virq; 744 } 745 746 irq_data = irq_get_irq_data(virq); 747 if (!irq_data) { 748 if (irq_domain_is_hierarchy(domain)) 749 irq_domain_free_irqs(virq, 1); 750 else 751 irq_dispose_mapping(virq); 752 return 0; 753 } 754 755 /* Store trigger type */ 756 irqd_set_trigger_type(irq_data, type); 757 758 return virq; 759 } 760 EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); 761 762 unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) 763 { 764 struct irq_fwspec fwspec; 765 766 of_phandle_args_to_fwspec(irq_data, &fwspec); 767 return irq_create_fwspec_mapping(&fwspec); 768 } 769 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 770 771 /** 772 * irq_dispose_mapping() - Unmap an interrupt 773 * @virq: linux irq number of the interrupt to unmap 774 */ 775 void irq_dispose_mapping(unsigned int virq) 776 { 777 struct irq_data *irq_data = irq_get_irq_data(virq); 778 struct irq_domain *domain; 779 780 if (!virq || !irq_data) 781 return; 782 783 domain = irq_data->domain; 784 if (WARN_ON(domain == NULL)) 785 return; 786 787 if (irq_domain_is_hierarchy(domain)) { 788 irq_domain_free_irqs(virq, 1); 789 } else { 790 irq_domain_disassociate(domain, virq); 791 irq_free_desc(virq); 792 } 793 } 794 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 795 796 /** 797 * irq_find_mapping() - Find a linux irq from an hw irq number. 798 * @domain: domain owning this hardware interrupt 799 * @hwirq: hardware irq number in that domain space 800 */ 801 unsigned int irq_find_mapping(struct irq_domain *domain, 802 irq_hw_number_t hwirq) 803 { 804 struct irq_data *data; 805 806 /* Look for default domain if nececssary */ 807 if (domain == NULL) 808 domain = irq_default_domain; 809 if (domain == NULL) 810 return 0; 811 812 if (hwirq < domain->revmap_direct_max_irq) { 813 data = irq_domain_get_irq_data(domain, hwirq); 814 if (data && data->hwirq == hwirq) 815 return hwirq; 816 } 817 818 /* Check if the hwirq is in the linear revmap. */ 819 if (hwirq < domain->revmap_size) 820 return domain->linear_revmap[hwirq]; 821 822 rcu_read_lock(); 823 data = radix_tree_lookup(&domain->revmap_tree, hwirq); 824 rcu_read_unlock(); 825 return data ? data->irq : 0; 826 } 827 EXPORT_SYMBOL_GPL(irq_find_mapping); 828 829 #ifdef CONFIG_IRQ_DOMAIN_DEBUG 830 static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc) 831 { 832 struct irq_domain *domain; 833 struct irq_data *data; 834 835 domain = desc->irq_data.domain; 836 data = &desc->irq_data; 837 838 while (domain) { 839 unsigned int irq = data->irq; 840 unsigned long hwirq = data->hwirq; 841 struct irq_chip *chip; 842 bool direct; 843 844 if (data == &desc->irq_data) 845 seq_printf(m, "%5d ", irq); 846 else 847 seq_printf(m, "%5d+ ", irq); 848 seq_printf(m, "0x%05lx ", hwirq); 849 850 chip = irq_data_get_irq_chip(data); 851 seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); 852 853 seq_printf(m, data ? "0x%p " : " %p ", 854 irq_data_get_irq_chip_data(data)); 855 856 seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); 857 direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq); 858 seq_printf(m, "%6s%-8s ", 859 (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", 860 direct ? "(DIRECT)" : ""); 861 seq_printf(m, "%s\n", domain->name); 862 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 863 domain = domain->parent; 864 data = data->parent_data; 865 #else 866 domain = NULL; 867 #endif 868 } 869 } 870 871 static int virq_debug_show(struct seq_file *m, void *private) 872 { 873 unsigned long flags; 874 struct irq_desc *desc; 875 struct irq_domain *domain; 876 struct radix_tree_iter iter; 877 void **slot; 878 int i; 879 880 seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", 881 "name", "mapped", "linear-max", "direct-max", "devtree-node"); 882 mutex_lock(&irq_domain_mutex); 883 list_for_each_entry(domain, &irq_domain_list, link) { 884 struct device_node *of_node; 885 const char *name; 886 887 int count = 0; 888 889 of_node = irq_domain_get_of_node(domain); 890 if (of_node) 891 name = of_node_full_name(of_node); 892 else if (is_fwnode_irqchip(domain->fwnode)) 893 name = container_of(domain->fwnode, struct irqchip_fwid, 894 fwnode)->name; 895 else 896 name = ""; 897 898 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) 899 count++; 900 seq_printf(m, "%c%-16s %6u %10u %10u %s\n", 901 domain == irq_default_domain ? '*' : ' ', domain->name, 902 domain->revmap_size + count, domain->revmap_size, 903 domain->revmap_direct_max_irq, 904 name); 905 } 906 mutex_unlock(&irq_domain_mutex); 907 908 seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", 909 "chip name", (int)(2 * sizeof(void *) + 2), "chip data", 910 "active", "type", "domain"); 911 912 for (i = 1; i < nr_irqs; i++) { 913 desc = irq_to_desc(i); 914 if (!desc) 915 continue; 916 917 raw_spin_lock_irqsave(&desc->lock, flags); 918 virq_debug_show_one(m, desc); 919 raw_spin_unlock_irqrestore(&desc->lock, flags); 920 } 921 922 return 0; 923 } 924 925 static int virq_debug_open(struct inode *inode, struct file *file) 926 { 927 return single_open(file, virq_debug_show, inode->i_private); 928 } 929 930 static const struct file_operations virq_debug_fops = { 931 .open = virq_debug_open, 932 .read = seq_read, 933 .llseek = seq_lseek, 934 .release = single_release, 935 }; 936 937 static int __init irq_debugfs_init(void) 938 { 939 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, 940 NULL, &virq_debug_fops) == NULL) 941 return -ENOMEM; 942 943 return 0; 944 } 945 __initcall(irq_debugfs_init); 946 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 947 948 /** 949 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings 950 * 951 * Device Tree IRQ specifier translation function which works with one cell 952 * bindings where the cell value maps directly to the hwirq number. 953 */ 954 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, 955 const u32 *intspec, unsigned int intsize, 956 unsigned long *out_hwirq, unsigned int *out_type) 957 { 958 if (WARN_ON(intsize < 1)) 959 return -EINVAL; 960 *out_hwirq = intspec[0]; 961 *out_type = IRQ_TYPE_NONE; 962 return 0; 963 } 964 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); 965 966 /** 967 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings 968 * 969 * Device Tree IRQ specifier translation function which works with two cell 970 * bindings where the cell values map directly to the hwirq number 971 * and linux irq flags. 972 */ 973 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, 974 const u32 *intspec, unsigned int intsize, 975 irq_hw_number_t *out_hwirq, unsigned int *out_type) 976 { 977 if (WARN_ON(intsize < 2)) 978 return -EINVAL; 979 *out_hwirq = intspec[0]; 980 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 981 return 0; 982 } 983 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); 984 985 /** 986 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings 987 * 988 * Device Tree IRQ specifier translation function which works with either one 989 * or two cell bindings where the cell values map directly to the hwirq number 990 * and linux irq flags. 991 * 992 * Note: don't use this function unless your interrupt controller explicitly 993 * supports both one and two cell bindings. For the majority of controllers 994 * the _onecell() or _twocell() variants above should be used. 995 */ 996 int irq_domain_xlate_onetwocell(struct irq_domain *d, 997 struct device_node *ctrlr, 998 const u32 *intspec, unsigned int intsize, 999 unsigned long *out_hwirq, unsigned int *out_type) 1000 { 1001 if (WARN_ON(intsize < 1)) 1002 return -EINVAL; 1003 *out_hwirq = intspec[0]; 1004 if (intsize > 1) 1005 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 1006 else 1007 *out_type = IRQ_TYPE_NONE; 1008 return 0; 1009 } 1010 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 1011 1012 const struct irq_domain_ops irq_domain_simple_ops = { 1013 .xlate = irq_domain_xlate_onetwocell, 1014 }; 1015 EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 1016 1017 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, 1018 int node, const struct cpumask *affinity) 1019 { 1020 unsigned int hint; 1021 1022 if (virq >= 0) { 1023 virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE, 1024 affinity); 1025 } else { 1026 hint = hwirq % nr_irqs; 1027 if (hint == 0) 1028 hint++; 1029 virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE, 1030 affinity); 1031 if (virq <= 0 && hint > 1) { 1032 virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE, 1033 affinity); 1034 } 1035 } 1036 1037 return virq; 1038 } 1039 1040 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1041 /** 1042 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy 1043 * @parent: Parent irq domain to associate with the new domain 1044 * @flags: Irq domain flags associated to the domain 1045 * @size: Size of the domain. See below 1046 * @fwnode: Optional fwnode of the interrupt controller 1047 * @ops: Pointer to the interrupt domain callbacks 1048 * @host_data: Controller private data pointer 1049 * 1050 * If @size is 0 a tree domain is created, otherwise a linear domain. 1051 * 1052 * If successful the parent is associated to the new domain and the 1053 * domain flags are set. 1054 * Returns pointer to IRQ domain, or NULL on failure. 1055 */ 1056 struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, 1057 unsigned int flags, 1058 unsigned int size, 1059 struct fwnode_handle *fwnode, 1060 const struct irq_domain_ops *ops, 1061 void *host_data) 1062 { 1063 struct irq_domain *domain; 1064 1065 if (size) 1066 domain = irq_domain_create_linear(fwnode, size, ops, host_data); 1067 else 1068 domain = irq_domain_create_tree(fwnode, ops, host_data); 1069 if (domain) { 1070 domain->parent = parent; 1071 domain->flags |= flags; 1072 } 1073 1074 return domain; 1075 } 1076 EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy); 1077 1078 static void irq_domain_insert_irq(int virq) 1079 { 1080 struct irq_data *data; 1081 1082 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 1083 struct irq_domain *domain = data->domain; 1084 irq_hw_number_t hwirq = data->hwirq; 1085 1086 domain->mapcount++; 1087 if (hwirq < domain->revmap_size) { 1088 domain->linear_revmap[hwirq] = virq; 1089 } else { 1090 mutex_lock(&revmap_trees_mutex); 1091 radix_tree_insert(&domain->revmap_tree, hwirq, data); 1092 mutex_unlock(&revmap_trees_mutex); 1093 } 1094 1095 /* If not already assigned, give the domain the chip's name */ 1096 if (!domain->name && data->chip) 1097 domain->name = data->chip->name; 1098 } 1099 1100 irq_clear_status_flags(virq, IRQ_NOREQUEST); 1101 } 1102 1103 static void irq_domain_remove_irq(int virq) 1104 { 1105 struct irq_data *data; 1106 1107 irq_set_status_flags(virq, IRQ_NOREQUEST); 1108 irq_set_chip_and_handler(virq, NULL, NULL); 1109 synchronize_irq(virq); 1110 smp_mb(); 1111 1112 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 1113 struct irq_domain *domain = data->domain; 1114 irq_hw_number_t hwirq = data->hwirq; 1115 1116 domain->mapcount--; 1117 if (hwirq < domain->revmap_size) { 1118 domain->linear_revmap[hwirq] = 0; 1119 } else { 1120 mutex_lock(&revmap_trees_mutex); 1121 radix_tree_delete(&domain->revmap_tree, hwirq); 1122 mutex_unlock(&revmap_trees_mutex); 1123 } 1124 } 1125 } 1126 1127 static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, 1128 struct irq_data *child) 1129 { 1130 struct irq_data *irq_data; 1131 1132 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, 1133 irq_data_get_node(child)); 1134 if (irq_data) { 1135 child->parent_data = irq_data; 1136 irq_data->irq = child->irq; 1137 irq_data->common = child->common; 1138 irq_data->domain = domain; 1139 } 1140 1141 return irq_data; 1142 } 1143 1144 static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) 1145 { 1146 struct irq_data *irq_data, *tmp; 1147 int i; 1148 1149 for (i = 0; i < nr_irqs; i++) { 1150 irq_data = irq_get_irq_data(virq + i); 1151 tmp = irq_data->parent_data; 1152 irq_data->parent_data = NULL; 1153 irq_data->domain = NULL; 1154 1155 while (tmp) { 1156 irq_data = tmp; 1157 tmp = tmp->parent_data; 1158 kfree(irq_data); 1159 } 1160 } 1161 } 1162 1163 static int irq_domain_alloc_irq_data(struct irq_domain *domain, 1164 unsigned int virq, unsigned int nr_irqs) 1165 { 1166 struct irq_data *irq_data; 1167 struct irq_domain *parent; 1168 int i; 1169 1170 /* The outermost irq_data is embedded in struct irq_desc */ 1171 for (i = 0; i < nr_irqs; i++) { 1172 irq_data = irq_get_irq_data(virq + i); 1173 irq_data->domain = domain; 1174 1175 for (parent = domain->parent; parent; parent = parent->parent) { 1176 irq_data = irq_domain_insert_irq_data(parent, irq_data); 1177 if (!irq_data) { 1178 irq_domain_free_irq_data(virq, i + 1); 1179 return -ENOMEM; 1180 } 1181 } 1182 } 1183 1184 return 0; 1185 } 1186 1187 /** 1188 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1189 * @domain: domain to match 1190 * @virq: IRQ number to get irq_data 1191 */ 1192 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1193 unsigned int virq) 1194 { 1195 struct irq_data *irq_data; 1196 1197 for (irq_data = irq_get_irq_data(virq); irq_data; 1198 irq_data = irq_data->parent_data) 1199 if (irq_data->domain == domain) 1200 return irq_data; 1201 1202 return NULL; 1203 } 1204 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); 1205 1206 /** 1207 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain 1208 * @domain: Interrupt domain to match 1209 * @virq: IRQ number 1210 * @hwirq: The hwirq number 1211 * @chip: The associated interrupt chip 1212 * @chip_data: The associated chip data 1213 */ 1214 int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, 1215 irq_hw_number_t hwirq, struct irq_chip *chip, 1216 void *chip_data) 1217 { 1218 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); 1219 1220 if (!irq_data) 1221 return -ENOENT; 1222 1223 irq_data->hwirq = hwirq; 1224 irq_data->chip = chip ? chip : &no_irq_chip; 1225 irq_data->chip_data = chip_data; 1226 1227 return 0; 1228 } 1229 EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip); 1230 1231 /** 1232 * irq_domain_set_info - Set the complete data for a @virq in @domain 1233 * @domain: Interrupt domain to match 1234 * @virq: IRQ number 1235 * @hwirq: The hardware interrupt number 1236 * @chip: The associated interrupt chip 1237 * @chip_data: The associated interrupt chip data 1238 * @handler: The interrupt flow handler 1239 * @handler_data: The interrupt flow handler data 1240 * @handler_name: The interrupt handler name 1241 */ 1242 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1243 irq_hw_number_t hwirq, struct irq_chip *chip, 1244 void *chip_data, irq_flow_handler_t handler, 1245 void *handler_data, const char *handler_name) 1246 { 1247 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); 1248 __irq_set_handler(virq, handler, 0, handler_name); 1249 irq_set_handler_data(virq, handler_data); 1250 } 1251 EXPORT_SYMBOL(irq_domain_set_info); 1252 1253 /** 1254 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data 1255 * @irq_data: The pointer to irq_data 1256 */ 1257 void irq_domain_reset_irq_data(struct irq_data *irq_data) 1258 { 1259 irq_data->hwirq = 0; 1260 irq_data->chip = &no_irq_chip; 1261 irq_data->chip_data = NULL; 1262 } 1263 EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); 1264 1265 /** 1266 * irq_domain_free_irqs_common - Clear irq_data and free the parent 1267 * @domain: Interrupt domain to match 1268 * @virq: IRQ number to start with 1269 * @nr_irqs: The number of irqs to free 1270 */ 1271 void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, 1272 unsigned int nr_irqs) 1273 { 1274 struct irq_data *irq_data; 1275 int i; 1276 1277 for (i = 0; i < nr_irqs; i++) { 1278 irq_data = irq_domain_get_irq_data(domain, virq + i); 1279 if (irq_data) 1280 irq_domain_reset_irq_data(irq_data); 1281 } 1282 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1283 } 1284 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common); 1285 1286 /** 1287 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent 1288 * @domain: Interrupt domain to match 1289 * @virq: IRQ number to start with 1290 * @nr_irqs: The number of irqs to free 1291 */ 1292 void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, 1293 unsigned int nr_irqs) 1294 { 1295 int i; 1296 1297 for (i = 0; i < nr_irqs; i++) { 1298 irq_set_handler_data(virq + i, NULL); 1299 irq_set_handler(virq + i, NULL); 1300 } 1301 irq_domain_free_irqs_common(domain, virq, nr_irqs); 1302 } 1303 1304 static bool irq_domain_is_auto_recursive(struct irq_domain *domain) 1305 { 1306 return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; 1307 } 1308 1309 static void irq_domain_free_irqs_recursive(struct irq_domain *domain, 1310 unsigned int irq_base, 1311 unsigned int nr_irqs) 1312 { 1313 domain->ops->free(domain, irq_base, nr_irqs); 1314 if (irq_domain_is_auto_recursive(domain)) { 1315 BUG_ON(!domain->parent); 1316 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1317 nr_irqs); 1318 } 1319 } 1320 1321 int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, 1322 unsigned int irq_base, 1323 unsigned int nr_irqs, void *arg) 1324 { 1325 int ret = 0; 1326 struct irq_domain *parent = domain->parent; 1327 bool recursive = irq_domain_is_auto_recursive(domain); 1328 1329 BUG_ON(recursive && !parent); 1330 if (recursive) 1331 ret = irq_domain_alloc_irqs_recursive(parent, irq_base, 1332 nr_irqs, arg); 1333 if (ret < 0) 1334 return ret; 1335 1336 ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); 1337 if (ret < 0 && recursive) 1338 irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); 1339 1340 return ret; 1341 } 1342 1343 /** 1344 * __irq_domain_alloc_irqs - Allocate IRQs from domain 1345 * @domain: domain to allocate from 1346 * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 1347 * @nr_irqs: number of IRQs to allocate 1348 * @node: NUMA node id for memory allocation 1349 * @arg: domain specific argument 1350 * @realloc: IRQ descriptors have already been allocated if true 1351 * @affinity: Optional irq affinity mask for multiqueue devices 1352 * 1353 * Allocate IRQ numbers and initialized all data structures to support 1354 * hierarchy IRQ domains. 1355 * Parameter @realloc is mainly to support legacy IRQs. 1356 * Returns error code or allocated IRQ number 1357 * 1358 * The whole process to setup an IRQ has been split into two steps. 1359 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ 1360 * descriptor and required hardware resources. The second step, 1361 * irq_domain_activate_irq(), is to program hardwares with preallocated 1362 * resources. In this way, it's easier to rollback when failing to 1363 * allocate resources. 1364 */ 1365 int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 1366 unsigned int nr_irqs, int node, void *arg, 1367 bool realloc, const struct cpumask *affinity) 1368 { 1369 int i, ret, virq; 1370 1371 if (domain == NULL) { 1372 domain = irq_default_domain; 1373 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) 1374 return -EINVAL; 1375 } 1376 1377 if (!domain->ops->alloc) { 1378 pr_debug("domain->ops->alloc() is NULL\n"); 1379 return -ENOSYS; 1380 } 1381 1382 if (realloc && irq_base >= 0) { 1383 virq = irq_base; 1384 } else { 1385 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, 1386 affinity); 1387 if (virq < 0) { 1388 pr_debug("cannot allocate IRQ(base %d, count %d)\n", 1389 irq_base, nr_irqs); 1390 return virq; 1391 } 1392 } 1393 1394 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { 1395 pr_debug("cannot allocate memory for IRQ%d\n", virq); 1396 ret = -ENOMEM; 1397 goto out_free_desc; 1398 } 1399 1400 mutex_lock(&irq_domain_mutex); 1401 ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); 1402 if (ret < 0) { 1403 mutex_unlock(&irq_domain_mutex); 1404 goto out_free_irq_data; 1405 } 1406 for (i = 0; i < nr_irqs; i++) 1407 irq_domain_insert_irq(virq + i); 1408 mutex_unlock(&irq_domain_mutex); 1409 1410 return virq; 1411 1412 out_free_irq_data: 1413 irq_domain_free_irq_data(virq, nr_irqs); 1414 out_free_desc: 1415 irq_free_descs(virq, nr_irqs); 1416 return ret; 1417 } 1418 1419 /** 1420 * irq_domain_free_irqs - Free IRQ number and associated data structures 1421 * @virq: base IRQ number 1422 * @nr_irqs: number of IRQs to free 1423 */ 1424 void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) 1425 { 1426 struct irq_data *data = irq_get_irq_data(virq); 1427 int i; 1428 1429 if (WARN(!data || !data->domain || !data->domain->ops->free, 1430 "NULL pointer, cannot free irq\n")) 1431 return; 1432 1433 mutex_lock(&irq_domain_mutex); 1434 for (i = 0; i < nr_irqs; i++) 1435 irq_domain_remove_irq(virq + i); 1436 irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); 1437 mutex_unlock(&irq_domain_mutex); 1438 1439 irq_domain_free_irq_data(virq, nr_irqs); 1440 irq_free_descs(virq, nr_irqs); 1441 } 1442 1443 /** 1444 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain 1445 * @irq_base: Base IRQ number 1446 * @nr_irqs: Number of IRQs to allocate 1447 * @arg: Allocation data (arch/domain specific) 1448 * 1449 * Check whether the domain has been setup recursive. If not allocate 1450 * through the parent domain. 1451 */ 1452 int irq_domain_alloc_irqs_parent(struct irq_domain *domain, 1453 unsigned int irq_base, unsigned int nr_irqs, 1454 void *arg) 1455 { 1456 /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ 1457 if (irq_domain_is_auto_recursive(domain)) 1458 return 0; 1459 1460 domain = domain->parent; 1461 if (domain) 1462 return irq_domain_alloc_irqs_recursive(domain, irq_base, 1463 nr_irqs, arg); 1464 return -ENOSYS; 1465 } 1466 EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent); 1467 1468 /** 1469 * irq_domain_free_irqs_parent - Free interrupts from parent domain 1470 * @irq_base: Base IRQ number 1471 * @nr_irqs: Number of IRQs to free 1472 * 1473 * Check whether the domain has been setup recursive. If not free 1474 * through the parent domain. 1475 */ 1476 void irq_domain_free_irqs_parent(struct irq_domain *domain, 1477 unsigned int irq_base, unsigned int nr_irqs) 1478 { 1479 /* irq_domain_free_irqs_recursive() will call parent's free */ 1480 if (!irq_domain_is_auto_recursive(domain) && domain->parent) 1481 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1482 nr_irqs); 1483 } 1484 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1485 1486 static void __irq_domain_activate_irq(struct irq_data *irq_data) 1487 { 1488 if (irq_data && irq_data->domain) { 1489 struct irq_domain *domain = irq_data->domain; 1490 1491 if (irq_data->parent_data) 1492 __irq_domain_activate_irq(irq_data->parent_data); 1493 if (domain->ops->activate) 1494 domain->ops->activate(domain, irq_data); 1495 } 1496 } 1497 1498 static void __irq_domain_deactivate_irq(struct irq_data *irq_data) 1499 { 1500 if (irq_data && irq_data->domain) { 1501 struct irq_domain *domain = irq_data->domain; 1502 1503 if (domain->ops->deactivate) 1504 domain->ops->deactivate(domain, irq_data); 1505 if (irq_data->parent_data) 1506 __irq_domain_deactivate_irq(irq_data->parent_data); 1507 } 1508 } 1509 1510 /** 1511 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1512 * interrupt 1513 * @irq_data: outermost irq_data associated with interrupt 1514 * 1515 * This is the second step to call domain_ops->activate to program interrupt 1516 * controllers, so the interrupt could actually get delivered. 1517 */ 1518 void irq_domain_activate_irq(struct irq_data *irq_data) 1519 { 1520 if (!irqd_is_activated(irq_data)) { 1521 __irq_domain_activate_irq(irq_data); 1522 irqd_set_activated(irq_data); 1523 } 1524 } 1525 1526 /** 1527 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to 1528 * deactivate interrupt 1529 * @irq_data: outermost irq_data associated with interrupt 1530 * 1531 * It calls domain_ops->deactivate to program interrupt controllers to disable 1532 * interrupt delivery. 1533 */ 1534 void irq_domain_deactivate_irq(struct irq_data *irq_data) 1535 { 1536 if (irqd_is_activated(irq_data)) { 1537 __irq_domain_deactivate_irq(irq_data); 1538 irqd_clr_activated(irq_data); 1539 } 1540 } 1541 1542 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1543 { 1544 /* Hierarchy irq_domains must implement callback alloc() */ 1545 if (domain->ops->alloc) 1546 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; 1547 } 1548 1549 /** 1550 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any 1551 * parent has MSI remapping support 1552 * @domain: domain pointer 1553 */ 1554 bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) 1555 { 1556 for (; domain; domain = domain->parent) { 1557 if (irq_domain_is_msi_remap(domain)) 1558 return true; 1559 } 1560 return false; 1561 } 1562 #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1563 /** 1564 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1565 * @domain: domain to match 1566 * @virq: IRQ number to get irq_data 1567 */ 1568 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1569 unsigned int virq) 1570 { 1571 struct irq_data *irq_data = irq_get_irq_data(virq); 1572 1573 return (irq_data && irq_data->domain == domain) ? irq_data : NULL; 1574 } 1575 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); 1576 1577 /** 1578 * irq_domain_set_info - Set the complete data for a @virq in @domain 1579 * @domain: Interrupt domain to match 1580 * @virq: IRQ number 1581 * @hwirq: The hardware interrupt number 1582 * @chip: The associated interrupt chip 1583 * @chip_data: The associated interrupt chip data 1584 * @handler: The interrupt flow handler 1585 * @handler_data: The interrupt flow handler data 1586 * @handler_name: The interrupt handler name 1587 */ 1588 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1589 irq_hw_number_t hwirq, struct irq_chip *chip, 1590 void *chip_data, irq_flow_handler_t handler, 1591 void *handler_data, const char *handler_name) 1592 { 1593 irq_set_chip_and_handler_name(virq, chip, handler, handler_name); 1594 irq_set_chip_data(virq, chip_data); 1595 irq_set_handler_data(virq, handler_data); 1596 } 1597 1598 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1599 { 1600 } 1601 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1602