1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2014 Intel Corp. 4 * Author: Jiang Liu <jiang.liu@linux.intel.com> 5 * 6 * This file is licensed under GPLv2. 7 * 8 * This file contains common code to support Message Signaled Interrupts for 9 * PCI compatible and non PCI compatible devices. 10 */ 11 #include <linux/device.h> 12 #include <linux/irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/msi.h> 15 #include <linux/mutex.h> 16 #include <linux/pci.h> 17 #include <linux/slab.h> 18 #include <linux/sysfs.h> 19 #include <linux/types.h> 20 #include <linux/xarray.h> 21 22 #include "internals.h" 23 24 /** 25 * struct msi_device_data - MSI per device data 26 * @properties: MSI properties which are interesting to drivers 27 * @mutex: Mutex protecting the MSI descriptor store 28 * @__domains: Internal data for per device MSI domains 29 * @__iter_idx: Index to search the next entry for iterators 30 */ 31 struct msi_device_data { 32 unsigned long properties; 33 struct mutex mutex; 34 struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS]; 35 unsigned long __iter_idx; 36 }; 37 38 /** 39 * struct msi_ctrl - MSI internal management control structure 40 * @domid: ID of the domain on which management operations should be done 41 * @first: First (hardware) slot index to operate on 42 * @last: Last (hardware) slot index to operate on 43 * @nirqs: The number of Linux interrupts to allocate. Can be larger 44 * than the range due to PCI/multi-MSI. 45 */ 46 struct msi_ctrl { 47 unsigned int domid; 48 unsigned int first; 49 unsigned int last; 50 unsigned int nirqs; 51 }; 52 53 /* Invalid Xarray index which is outside of any searchable range */ 54 #define MSI_XA_MAX_INDEX (ULONG_MAX - 1) 55 /* The maximum domain size */ 56 #define MSI_XA_DOMAIN_SIZE (MSI_MAX_INDEX + 1) 57 58 static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl); 59 static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid); 60 static inline int msi_sysfs_create_group(struct device *dev); 61 62 63 /** 64 * msi_alloc_desc - Allocate an initialized msi_desc 65 * @dev: Pointer to the device for which this is allocated 66 * @nvec: The number of vectors used in this entry 67 * @affinity: Optional pointer to an affinity mask array size of @nvec 68 * 69 * If @affinity is not %NULL then an affinity array[@nvec] is allocated 70 * and the affinity masks and flags from @affinity are copied. 71 * 72 * Return: pointer to allocated &msi_desc on success or %NULL on failure 73 */ 74 static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec, 75 const struct irq_affinity_desc *affinity) 76 { 77 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); 78 79 if (!desc) 80 return NULL; 81 82 desc->dev = dev; 83 desc->nvec_used = nvec; 84 if (affinity) { 85 desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL); 86 if (!desc->affinity) { 87 kfree(desc); 88 return NULL; 89 } 90 } 91 return desc; 92 } 93 94 static void msi_free_desc(struct msi_desc *desc) 95 { 96 kfree(desc->affinity); 97 kfree(desc); 98 } 99 100 static int msi_insert_desc(struct device *dev, struct msi_desc *desc, 101 unsigned int domid, unsigned int index) 102 { 103 struct msi_device_data *md = dev->msi.data; 104 struct xarray *xa = &md->__domains[domid].store; 105 unsigned int hwsize; 106 int ret; 107 108 hwsize = msi_domain_get_hwsize(dev, domid); 109 110 if (index == MSI_ANY_INDEX) { 111 struct xa_limit limit = { .min = 0, .max = hwsize - 1 }; 112 unsigned int index; 113 114 /* Let the xarray allocate a free index within the limit */ 115 ret = xa_alloc(xa, &index, desc, limit, GFP_KERNEL); 116 if (ret) 117 goto fail; 118 119 desc->msi_index = index; 120 return 0; 121 } else { 122 if (index >= hwsize) { 123 ret = -ERANGE; 124 goto fail; 125 } 126 127 desc->msi_index = index; 128 ret = xa_insert(xa, index, desc, GFP_KERNEL); 129 if (ret) 130 goto fail; 131 return 0; 132 } 133 fail: 134 msi_free_desc(desc); 135 return ret; 136 } 137 138 /** 139 * msi_domain_insert_msi_desc - Allocate and initialize a MSI descriptor and 140 * insert it at @init_desc->msi_index 141 * 142 * @dev: Pointer to the device for which the descriptor is allocated 143 * @domid: The id of the interrupt domain to which the desriptor is added 144 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor 145 * 146 * Return: 0 on success or an appropriate failure code. 147 */ 148 int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid, 149 struct msi_desc *init_desc) 150 { 151 struct msi_desc *desc; 152 153 lockdep_assert_held(&dev->msi.data->mutex); 154 155 desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity); 156 if (!desc) 157 return -ENOMEM; 158 159 /* Copy type specific data to the new descriptor. */ 160 desc->pci = init_desc->pci; 161 162 return msi_insert_desc(dev, desc, domid, init_desc->msi_index); 163 } 164 165 static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter) 166 { 167 switch (filter) { 168 case MSI_DESC_ALL: 169 return true; 170 case MSI_DESC_NOTASSOCIATED: 171 return !desc->irq; 172 case MSI_DESC_ASSOCIATED: 173 return !!desc->irq; 174 } 175 WARN_ON_ONCE(1); 176 return false; 177 } 178 179 static bool msi_ctrl_valid(struct device *dev, struct msi_ctrl *ctrl) 180 { 181 unsigned int hwsize; 182 183 if (WARN_ON_ONCE(ctrl->domid >= MSI_MAX_DEVICE_IRQDOMAINS || 184 (dev->msi.domain && 185 !dev->msi.data->__domains[ctrl->domid].domain))) 186 return false; 187 188 hwsize = msi_domain_get_hwsize(dev, ctrl->domid); 189 if (WARN_ON_ONCE(ctrl->first > ctrl->last || 190 ctrl->first >= hwsize || 191 ctrl->last >= hwsize)) 192 return false; 193 return true; 194 } 195 196 static void msi_domain_free_descs(struct device *dev, struct msi_ctrl *ctrl) 197 { 198 struct msi_desc *desc; 199 struct xarray *xa; 200 unsigned long idx; 201 202 lockdep_assert_held(&dev->msi.data->mutex); 203 204 if (!msi_ctrl_valid(dev, ctrl)) 205 return; 206 207 xa = &dev->msi.data->__domains[ctrl->domid].store; 208 xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) { 209 xa_erase(xa, idx); 210 211 /* Leak the descriptor when it is still referenced */ 212 if (WARN_ON_ONCE(msi_desc_match(desc, MSI_DESC_ASSOCIATED))) 213 continue; 214 msi_free_desc(desc); 215 } 216 } 217 218 /** 219 * msi_domain_free_msi_descs_range - Free a range of MSI descriptors of a device in an irqdomain 220 * @dev: Device for which to free the descriptors 221 * @domid: Id of the domain to operate on 222 * @first: Index to start freeing from (inclusive) 223 * @last: Last index to be freed (inclusive) 224 */ 225 void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid, 226 unsigned int first, unsigned int last) 227 { 228 struct msi_ctrl ctrl = { 229 .domid = domid, 230 .first = first, 231 .last = last, 232 }; 233 234 msi_domain_free_descs(dev, &ctrl); 235 } 236 237 /** 238 * msi_domain_add_simple_msi_descs - Allocate and initialize MSI descriptors 239 * @dev: Pointer to the device for which the descriptors are allocated 240 * @ctrl: Allocation control struct 241 * 242 * Return: 0 on success or an appropriate failure code. 243 */ 244 static int msi_domain_add_simple_msi_descs(struct device *dev, struct msi_ctrl *ctrl) 245 { 246 struct msi_desc *desc; 247 unsigned int idx; 248 int ret; 249 250 lockdep_assert_held(&dev->msi.data->mutex); 251 252 if (!msi_ctrl_valid(dev, ctrl)) 253 return -EINVAL; 254 255 for (idx = ctrl->first; idx <= ctrl->last; idx++) { 256 desc = msi_alloc_desc(dev, 1, NULL); 257 if (!desc) 258 goto fail_mem; 259 ret = msi_insert_desc(dev, desc, ctrl->domid, idx); 260 if (ret) 261 goto fail; 262 } 263 return 0; 264 265 fail_mem: 266 ret = -ENOMEM; 267 fail: 268 msi_domain_free_descs(dev, ctrl); 269 return ret; 270 } 271 272 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 273 { 274 *msg = entry->msg; 275 } 276 277 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 278 { 279 struct msi_desc *entry = irq_get_msi_desc(irq); 280 281 __get_cached_msi_msg(entry, msg); 282 } 283 EXPORT_SYMBOL_GPL(get_cached_msi_msg); 284 285 static void msi_device_data_release(struct device *dev, void *res) 286 { 287 struct msi_device_data *md = res; 288 int i; 289 290 for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) { 291 msi_remove_device_irq_domain(dev, i); 292 WARN_ON_ONCE(!xa_empty(&md->__domains[i].store)); 293 xa_destroy(&md->__domains[i].store); 294 } 295 dev->msi.data = NULL; 296 } 297 298 /** 299 * msi_setup_device_data - Setup MSI device data 300 * @dev: Device for which MSI device data should be set up 301 * 302 * Return: 0 on success, appropriate error code otherwise 303 * 304 * This can be called more than once for @dev. If the MSI device data is 305 * already allocated the call succeeds. The allocated memory is 306 * automatically released when the device is destroyed. 307 */ 308 int msi_setup_device_data(struct device *dev) 309 { 310 struct msi_device_data *md; 311 int ret, i; 312 313 if (dev->msi.data) 314 return 0; 315 316 md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL); 317 if (!md) 318 return -ENOMEM; 319 320 ret = msi_sysfs_create_group(dev); 321 if (ret) { 322 devres_free(md); 323 return ret; 324 } 325 326 for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) 327 xa_init_flags(&md->__domains[i].store, XA_FLAGS_ALLOC); 328 329 /* 330 * If @dev::msi::domain is set and is a global MSI domain, copy the 331 * pointer into the domain array so all code can operate on domain 332 * ids. The NULL pointer check is required to keep the legacy 333 * architecture specific PCI/MSI support working. 334 */ 335 if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain)) 336 md->__domains[MSI_DEFAULT_DOMAIN].domain = dev->msi.domain; 337 338 mutex_init(&md->mutex); 339 dev->msi.data = md; 340 devres_add(dev, md); 341 return 0; 342 } 343 344 /** 345 * msi_lock_descs - Lock the MSI descriptor storage of a device 346 * @dev: Device to operate on 347 */ 348 void msi_lock_descs(struct device *dev) 349 { 350 mutex_lock(&dev->msi.data->mutex); 351 } 352 EXPORT_SYMBOL_GPL(msi_lock_descs); 353 354 /** 355 * msi_unlock_descs - Unlock the MSI descriptor storage of a device 356 * @dev: Device to operate on 357 */ 358 void msi_unlock_descs(struct device *dev) 359 { 360 /* Invalidate the index which was cached by the iterator */ 361 dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX; 362 mutex_unlock(&dev->msi.data->mutex); 363 } 364 EXPORT_SYMBOL_GPL(msi_unlock_descs); 365 366 static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid, 367 enum msi_desc_filter filter) 368 { 369 struct xarray *xa = &md->__domains[domid].store; 370 struct msi_desc *desc; 371 372 xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) { 373 if (msi_desc_match(desc, filter)) 374 return desc; 375 } 376 md->__iter_idx = MSI_XA_MAX_INDEX; 377 return NULL; 378 } 379 380 /** 381 * msi_domain_first_desc - Get the first MSI descriptor of an irqdomain associated to a device 382 * @dev: Device to operate on 383 * @domid: The id of the interrupt domain which should be walked. 384 * @filter: Descriptor state filter 385 * 386 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs() 387 * must be invoked before the call. 388 * 389 * Return: Pointer to the first MSI descriptor matching the search 390 * criteria, NULL if none found. 391 */ 392 struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid, 393 enum msi_desc_filter filter) 394 { 395 struct msi_device_data *md = dev->msi.data; 396 397 if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS)) 398 return NULL; 399 400 lockdep_assert_held(&md->mutex); 401 402 md->__iter_idx = 0; 403 return msi_find_desc(md, domid, filter); 404 } 405 EXPORT_SYMBOL_GPL(msi_domain_first_desc); 406 407 /** 408 * msi_next_desc - Get the next MSI descriptor of a device 409 * @dev: Device to operate on 410 * @domid: The id of the interrupt domain which should be walked. 411 * @filter: Descriptor state filter 412 * 413 * The first invocation of msi_next_desc() has to be preceeded by a 414 * successful invocation of __msi_first_desc(). Consecutive invocations are 415 * only valid if the previous one was successful. All these operations have 416 * to be done within the same MSI mutex held region. 417 * 418 * Return: Pointer to the next MSI descriptor matching the search 419 * criteria, NULL if none found. 420 */ 421 struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid, 422 enum msi_desc_filter filter) 423 { 424 struct msi_device_data *md = dev->msi.data; 425 426 if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS)) 427 return NULL; 428 429 lockdep_assert_held(&md->mutex); 430 431 if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX) 432 return NULL; 433 434 md->__iter_idx++; 435 return msi_find_desc(md, domid, filter); 436 } 437 EXPORT_SYMBOL_GPL(msi_next_desc); 438 439 /** 440 * msi_domain_get_virq - Lookup the Linux interrupt number for a MSI index on a interrupt domain 441 * @dev: Device to operate on 442 * @domid: Domain ID of the interrupt domain associated to the device 443 * @index: MSI interrupt index to look for (0-based) 444 * 445 * Return: The Linux interrupt number on success (> 0), 0 if not found 446 */ 447 unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index) 448 { 449 struct msi_desc *desc; 450 unsigned int ret = 0; 451 bool pcimsi = false; 452 struct xarray *xa; 453 454 if (!dev->msi.data) 455 return 0; 456 457 if (WARN_ON_ONCE(index > MSI_MAX_INDEX || domid >= MSI_MAX_DEVICE_IRQDOMAINS)) 458 return 0; 459 460 /* This check is only valid for the PCI default MSI domain */ 461 if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN) 462 pcimsi = to_pci_dev(dev)->msi_enabled; 463 464 msi_lock_descs(dev); 465 xa = &dev->msi.data->__domains[domid].store; 466 desc = xa_load(xa, pcimsi ? 0 : index); 467 if (desc && desc->irq) { 468 /* 469 * PCI-MSI has only one descriptor for multiple interrupts. 470 * PCI-MSIX and platform MSI use a descriptor per 471 * interrupt. 472 */ 473 if (pcimsi) { 474 if (index < desc->nvec_used) 475 ret = desc->irq + index; 476 } else { 477 ret = desc->irq; 478 } 479 } 480 481 msi_unlock_descs(dev); 482 return ret; 483 } 484 EXPORT_SYMBOL_GPL(msi_domain_get_virq); 485 486 #ifdef CONFIG_SYSFS 487 static struct attribute *msi_dev_attrs[] = { 488 NULL 489 }; 490 491 static const struct attribute_group msi_irqs_group = { 492 .name = "msi_irqs", 493 .attrs = msi_dev_attrs, 494 }; 495 496 static inline int msi_sysfs_create_group(struct device *dev) 497 { 498 return devm_device_add_group(dev, &msi_irqs_group); 499 } 500 501 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr, 502 char *buf) 503 { 504 /* MSI vs. MSIX is per device not per interrupt */ 505 bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false; 506 507 return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi"); 508 } 509 510 static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) 511 { 512 struct device_attribute *attrs = desc->sysfs_attrs; 513 int i; 514 515 if (!attrs) 516 return; 517 518 desc->sysfs_attrs = NULL; 519 for (i = 0; i < desc->nvec_used; i++) { 520 if (attrs[i].show) 521 sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name); 522 kfree(attrs[i].attr.name); 523 } 524 kfree(attrs); 525 } 526 527 static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) 528 { 529 struct device_attribute *attrs; 530 int ret, i; 531 532 attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL); 533 if (!attrs) 534 return -ENOMEM; 535 536 desc->sysfs_attrs = attrs; 537 for (i = 0; i < desc->nvec_used; i++) { 538 sysfs_attr_init(&attrs[i].attr); 539 attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i); 540 if (!attrs[i].attr.name) { 541 ret = -ENOMEM; 542 goto fail; 543 } 544 545 attrs[i].attr.mode = 0444; 546 attrs[i].show = msi_mode_show; 547 548 ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name); 549 if (ret) { 550 attrs[i].show = NULL; 551 goto fail; 552 } 553 } 554 return 0; 555 556 fail: 557 msi_sysfs_remove_desc(dev, desc); 558 return ret; 559 } 560 561 #if defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) || defined(CONFIG_PCI_XEN) 562 /** 563 * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device 564 * @dev: The device (PCI, platform etc) which will get sysfs entries 565 */ 566 int msi_device_populate_sysfs(struct device *dev) 567 { 568 struct msi_desc *desc; 569 int ret; 570 571 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) { 572 if (desc->sysfs_attrs) 573 continue; 574 ret = msi_sysfs_populate_desc(dev, desc); 575 if (ret) 576 return ret; 577 } 578 return 0; 579 } 580 581 /** 582 * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device 583 * @dev: The device (PCI, platform etc) for which to remove 584 * sysfs entries 585 */ 586 void msi_device_destroy_sysfs(struct device *dev) 587 { 588 struct msi_desc *desc; 589 590 msi_for_each_desc(desc, dev, MSI_DESC_ALL) 591 msi_sysfs_remove_desc(dev, desc); 592 } 593 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK || CONFIG_PCI_XEN */ 594 #else /* CONFIG_SYSFS */ 595 static inline int msi_sysfs_create_group(struct device *dev) { return 0; } 596 static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; } 597 static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { } 598 #endif /* !CONFIG_SYSFS */ 599 600 static struct irq_domain *msi_get_device_domain(struct device *dev, unsigned int domid) 601 { 602 struct irq_domain *domain; 603 604 lockdep_assert_held(&dev->msi.data->mutex); 605 606 if (WARN_ON_ONCE(domid >= MSI_MAX_DEVICE_IRQDOMAINS)) 607 return NULL; 608 609 domain = dev->msi.data->__domains[domid].domain; 610 if (!domain) 611 return NULL; 612 613 if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain))) 614 return NULL; 615 616 return domain; 617 } 618 619 static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid) 620 { 621 struct msi_domain_info *info; 622 struct irq_domain *domain; 623 624 domain = msi_get_device_domain(dev, domid); 625 if (domain) { 626 info = domain->host_data; 627 return info->hwsize; 628 } 629 /* No domain, default to MSI_XA_DOMAIN_SIZE */ 630 return MSI_XA_DOMAIN_SIZE; 631 } 632 633 static inline void irq_chip_write_msi_msg(struct irq_data *data, 634 struct msi_msg *msg) 635 { 636 data->chip->irq_write_msi_msg(data, msg); 637 } 638 639 static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg) 640 { 641 struct msi_domain_info *info = domain->host_data; 642 643 /* 644 * If the MSI provider has messed with the second message and 645 * not advertized that it is level-capable, signal the breakage. 646 */ 647 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) && 648 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) && 649 (msg[1].address_lo || msg[1].address_hi || msg[1].data)); 650 } 651 652 /** 653 * msi_domain_set_affinity - Generic affinity setter function for MSI domains 654 * @irq_data: The irq data associated to the interrupt 655 * @mask: The affinity mask to set 656 * @force: Flag to enforce setting (disable online checks) 657 * 658 * Intended to be used by MSI interrupt controllers which are 659 * implemented with hierarchical domains. 660 * 661 * Return: IRQ_SET_MASK_* result code 662 */ 663 int msi_domain_set_affinity(struct irq_data *irq_data, 664 const struct cpumask *mask, bool force) 665 { 666 struct irq_data *parent = irq_data->parent_data; 667 struct msi_msg msg[2] = { [1] = { }, }; 668 int ret; 669 670 ret = parent->chip->irq_set_affinity(parent, mask, force); 671 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { 672 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg)); 673 msi_check_level(irq_data->domain, msg); 674 irq_chip_write_msi_msg(irq_data, msg); 675 } 676 677 return ret; 678 } 679 680 static int msi_domain_activate(struct irq_domain *domain, 681 struct irq_data *irq_data, bool early) 682 { 683 struct msi_msg msg[2] = { [1] = { }, }; 684 685 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg)); 686 msi_check_level(irq_data->domain, msg); 687 irq_chip_write_msi_msg(irq_data, msg); 688 return 0; 689 } 690 691 static void msi_domain_deactivate(struct irq_domain *domain, 692 struct irq_data *irq_data) 693 { 694 struct msi_msg msg[2]; 695 696 memset(msg, 0, sizeof(msg)); 697 irq_chip_write_msi_msg(irq_data, msg); 698 } 699 700 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 701 unsigned int nr_irqs, void *arg) 702 { 703 struct msi_domain_info *info = domain->host_data; 704 struct msi_domain_ops *ops = info->ops; 705 irq_hw_number_t hwirq = ops->get_hwirq(info, arg); 706 int i, ret; 707 708 if (irq_find_mapping(domain, hwirq) > 0) 709 return -EEXIST; 710 711 if (domain->parent) { 712 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 713 if (ret < 0) 714 return ret; 715 } 716 717 for (i = 0; i < nr_irqs; i++) { 718 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); 719 if (ret < 0) { 720 if (ops->msi_free) { 721 for (i--; i > 0; i--) 722 ops->msi_free(domain, info, virq + i); 723 } 724 irq_domain_free_irqs_top(domain, virq, nr_irqs); 725 return ret; 726 } 727 } 728 729 return 0; 730 } 731 732 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, 733 unsigned int nr_irqs) 734 { 735 struct msi_domain_info *info = domain->host_data; 736 int i; 737 738 if (info->ops->msi_free) { 739 for (i = 0; i < nr_irqs; i++) 740 info->ops->msi_free(domain, info, virq + i); 741 } 742 irq_domain_free_irqs_top(domain, virq, nr_irqs); 743 } 744 745 static int msi_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, 746 irq_hw_number_t *hwirq, unsigned int *type) 747 { 748 struct msi_domain_info *info = domain->host_data; 749 750 /* 751 * This will catch allocations through the regular irqdomain path except 752 * for MSI domains which really support this, e.g. MBIGEN. 753 */ 754 if (!info->ops->msi_translate) 755 return -ENOTSUPP; 756 return info->ops->msi_translate(domain, fwspec, hwirq, type); 757 } 758 759 static const struct irq_domain_ops msi_domain_ops = { 760 .alloc = msi_domain_alloc, 761 .free = msi_domain_free, 762 .activate = msi_domain_activate, 763 .deactivate = msi_domain_deactivate, 764 .translate = msi_domain_translate, 765 }; 766 767 static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, 768 msi_alloc_info_t *arg) 769 { 770 return arg->hwirq; 771 } 772 773 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, 774 int nvec, msi_alloc_info_t *arg) 775 { 776 memset(arg, 0, sizeof(*arg)); 777 return 0; 778 } 779 780 static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, 781 struct msi_desc *desc) 782 { 783 arg->desc = desc; 784 } 785 786 static int msi_domain_ops_init(struct irq_domain *domain, 787 struct msi_domain_info *info, 788 unsigned int virq, irq_hw_number_t hwirq, 789 msi_alloc_info_t *arg) 790 { 791 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, 792 info->chip_data); 793 if (info->handler && info->handler_name) { 794 __irq_set_handler(virq, info->handler, 0, info->handler_name); 795 if (info->handler_data) 796 irq_set_handler_data(virq, info->handler_data); 797 } 798 return 0; 799 } 800 801 static struct msi_domain_ops msi_domain_ops_default = { 802 .get_hwirq = msi_domain_ops_get_hwirq, 803 .msi_init = msi_domain_ops_init, 804 .msi_prepare = msi_domain_ops_prepare, 805 .set_desc = msi_domain_ops_set_desc, 806 }; 807 808 static void msi_domain_update_dom_ops(struct msi_domain_info *info) 809 { 810 struct msi_domain_ops *ops = info->ops; 811 812 if (ops == NULL) { 813 info->ops = &msi_domain_ops_default; 814 return; 815 } 816 817 if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS)) 818 return; 819 820 if (ops->get_hwirq == NULL) 821 ops->get_hwirq = msi_domain_ops_default.get_hwirq; 822 if (ops->msi_init == NULL) 823 ops->msi_init = msi_domain_ops_default.msi_init; 824 if (ops->msi_prepare == NULL) 825 ops->msi_prepare = msi_domain_ops_default.msi_prepare; 826 if (ops->set_desc == NULL) 827 ops->set_desc = msi_domain_ops_default.set_desc; 828 } 829 830 static void msi_domain_update_chip_ops(struct msi_domain_info *info) 831 { 832 struct irq_chip *chip = info->chip; 833 834 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); 835 if (!chip->irq_set_affinity) 836 chip->irq_set_affinity = msi_domain_set_affinity; 837 } 838 839 static struct irq_domain *__msi_create_irq_domain(struct fwnode_handle *fwnode, 840 struct msi_domain_info *info, 841 unsigned int flags, 842 struct irq_domain *parent) 843 { 844 struct irq_domain *domain; 845 846 if (info->hwsize > MSI_XA_DOMAIN_SIZE) 847 return NULL; 848 849 /* 850 * Hardware size 0 is valid for backwards compatibility and for 851 * domains which are not backed by a hardware table. Grant the 852 * maximum index space. 853 */ 854 if (!info->hwsize) 855 info->hwsize = MSI_XA_DOMAIN_SIZE; 856 857 msi_domain_update_dom_ops(info); 858 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 859 msi_domain_update_chip_ops(info); 860 861 domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0, 862 fwnode, &msi_domain_ops, info); 863 864 if (domain) { 865 irq_domain_update_bus_token(domain, info->bus_token); 866 if (info->flags & MSI_FLAG_PARENT_PM_DEV) 867 domain->pm_dev = parent->pm_dev; 868 } 869 870 return domain; 871 } 872 873 /** 874 * msi_create_irq_domain - Create an MSI interrupt domain 875 * @fwnode: Optional fwnode of the interrupt controller 876 * @info: MSI domain info 877 * @parent: Parent irq domain 878 * 879 * Return: pointer to the created &struct irq_domain or %NULL on failure 880 */ 881 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, 882 struct msi_domain_info *info, 883 struct irq_domain *parent) 884 { 885 return __msi_create_irq_domain(fwnode, info, 0, parent); 886 } 887 888 /** 889 * msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down 890 * in the domain hierarchy 891 * @dev: The device for which the domain should be created 892 * @domain: The domain in the hierarchy this op is being called on 893 * @msi_parent_domain: The IRQ_DOMAIN_FLAG_MSI_PARENT domain for the child to 894 * be created 895 * @msi_child_info: The MSI domain info of the IRQ_DOMAIN_FLAG_MSI_DEVICE 896 * domain to be created 897 * 898 * Return: true on success, false otherwise 899 * 900 * This is the most complex problem of per device MSI domains and the 901 * underlying interrupt domain hierarchy: 902 * 903 * The device domain to be initialized requests the broadest feature set 904 * possible and the underlying domain hierarchy puts restrictions on it. 905 * 906 * That's trivial for a simple parent->child relationship, but it gets 907 * interesting with an intermediate domain: root->parent->child. The 908 * intermediate 'parent' can expand the capabilities which the 'root' 909 * domain is providing. So that creates a classic hen and egg problem: 910 * Which entity is doing the restrictions/expansions? 911 * 912 * One solution is to let the root domain handle the initialization that's 913 * why there is the @domain and the @msi_parent_domain pointer. 914 */ 915 bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain, 916 struct irq_domain *msi_parent_domain, 917 struct msi_domain_info *msi_child_info) 918 { 919 struct irq_domain *parent = domain->parent; 920 921 if (WARN_ON_ONCE(!parent || !parent->msi_parent_ops || 922 !parent->msi_parent_ops->init_dev_msi_info)) 923 return false; 924 925 return parent->msi_parent_ops->init_dev_msi_info(dev, parent, msi_parent_domain, 926 msi_child_info); 927 } 928 929 /** 930 * msi_create_device_irq_domain - Create a device MSI interrupt domain 931 * @dev: Pointer to the device 932 * @domid: Domain id 933 * @template: MSI domain info bundle used as template 934 * @hwsize: Maximum number of MSI table entries (0 if unknown or unlimited) 935 * @domain_data: Optional pointer to domain specific data which is set in 936 * msi_domain_info::data 937 * @chip_data: Optional pointer to chip specific data which is set in 938 * msi_domain_info::chip_data 939 * 940 * Return: True on success, false otherwise 941 * 942 * There is no firmware node required for this interface because the per 943 * device domains are software constructs which are actually closer to the 944 * hardware reality than any firmware can describe them. 945 * 946 * The domain name and the irq chip name for a MSI device domain are 947 * composed by: "$(PREFIX)$(CHIPNAME)-$(DEVNAME)" 948 * 949 * $PREFIX: Optional prefix provided by the underlying MSI parent domain 950 * via msi_parent_ops::prefix. If that pointer is NULL the prefix 951 * is empty. 952 * $CHIPNAME: The name of the irq_chip in @template 953 * $DEVNAME: The name of the device 954 * 955 * This results in understandable chip names and hardware interrupt numbers 956 * in e.g. /proc/interrupts 957 * 958 * PCI-MSI-0000:00:1c.0 0-edge Parent domain has no prefix 959 * IR-PCI-MSI-0000:00:1c.4 0-edge Same with interrupt remapping prefix 'IR-' 960 * 961 * IR-PCI-MSIX-0000:3d:00.0 0-edge Hardware interrupt numbers reflect 962 * IR-PCI-MSIX-0000:3d:00.0 1-edge the real MSI-X index on that device 963 * IR-PCI-MSIX-0000:3d:00.0 2-edge 964 * 965 * On IMS domains the hardware interrupt number is either a table entry 966 * index or a purely software managed index but it is guaranteed to be 967 * unique. 968 * 969 * The domain pointer is stored in @dev::msi::data::__irqdomains[]. All 970 * subsequent operations on the domain depend on the domain id. 971 * 972 * The domain is automatically freed when the device is removed via devres 973 * in the context of @dev::msi::data freeing, but it can also be 974 * independently removed via @msi_remove_device_irq_domain(). 975 */ 976 bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, 977 const struct msi_domain_template *template, 978 unsigned int hwsize, void *domain_data, 979 void *chip_data) 980 { 981 struct irq_domain *domain, *parent = dev->msi.domain; 982 struct fwnode_handle *fwnode, *fwnalloced = NULL; 983 struct msi_domain_template *bundle; 984 const struct msi_parent_ops *pops; 985 986 if (!irq_domain_is_msi_parent(parent)) 987 return false; 988 989 if (domid >= MSI_MAX_DEVICE_IRQDOMAINS) 990 return false; 991 992 bundle = kmemdup(template, sizeof(*bundle), GFP_KERNEL); 993 if (!bundle) 994 return false; 995 996 bundle->info.hwsize = hwsize; 997 bundle->info.chip = &bundle->chip; 998 bundle->info.ops = &bundle->ops; 999 bundle->info.data = domain_data; 1000 bundle->info.chip_data = chip_data; 1001 1002 pops = parent->msi_parent_ops; 1003 snprintf(bundle->name, sizeof(bundle->name), "%s%s-%s", 1004 pops->prefix ? : "", bundle->chip.name, dev_name(dev)); 1005 bundle->chip.name = bundle->name; 1006 1007 /* 1008 * Using the device firmware node is required for wire to MSI 1009 * device domains so that the existing firmware results in a domain 1010 * match. 1011 * All other device domains like PCI/MSI use the named firmware 1012 * node as they are not guaranteed to have a fwnode. They are never 1013 * looked up and always handled in the context of the device. 1014 */ 1015 if (bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE) 1016 fwnode = dev->fwnode; 1017 else 1018 fwnode = fwnalloced = irq_domain_alloc_named_fwnode(bundle->name); 1019 1020 if (!fwnode) 1021 goto free_bundle; 1022 1023 if (msi_setup_device_data(dev)) 1024 goto free_fwnode; 1025 1026 msi_lock_descs(dev); 1027 1028 if (WARN_ON_ONCE(msi_get_device_domain(dev, domid))) 1029 goto fail; 1030 1031 if (!pops->init_dev_msi_info(dev, parent, parent, &bundle->info)) 1032 goto fail; 1033 1034 domain = __msi_create_irq_domain(fwnode, &bundle->info, IRQ_DOMAIN_FLAG_MSI_DEVICE, parent); 1035 if (!domain) 1036 goto fail; 1037 1038 domain->dev = dev; 1039 dev->msi.data->__domains[domid].domain = domain; 1040 msi_unlock_descs(dev); 1041 return true; 1042 1043 fail: 1044 msi_unlock_descs(dev); 1045 free_fwnode: 1046 irq_domain_free_fwnode(fwnalloced); 1047 free_bundle: 1048 kfree(bundle); 1049 return false; 1050 } 1051 1052 /** 1053 * msi_remove_device_irq_domain - Free a device MSI interrupt domain 1054 * @dev: Pointer to the device 1055 * @domid: Domain id 1056 */ 1057 void msi_remove_device_irq_domain(struct device *dev, unsigned int domid) 1058 { 1059 struct fwnode_handle *fwnode = NULL; 1060 struct msi_domain_info *info; 1061 struct irq_domain *domain; 1062 1063 msi_lock_descs(dev); 1064 1065 domain = msi_get_device_domain(dev, domid); 1066 1067 if (!domain || !irq_domain_is_msi_device(domain)) 1068 goto unlock; 1069 1070 dev->msi.data->__domains[domid].domain = NULL; 1071 info = domain->host_data; 1072 if (irq_domain_is_msi_device(domain)) 1073 fwnode = domain->fwnode; 1074 irq_domain_remove(domain); 1075 irq_domain_free_fwnode(fwnode); 1076 kfree(container_of(info, struct msi_domain_template, info)); 1077 1078 unlock: 1079 msi_unlock_descs(dev); 1080 } 1081 1082 /** 1083 * msi_match_device_irq_domain - Match a device irq domain against a bus token 1084 * @dev: Pointer to the device 1085 * @domid: Domain id 1086 * @bus_token: Bus token to match against the domain bus token 1087 * 1088 * Return: True if device domain exists and bus tokens match. 1089 */ 1090 bool msi_match_device_irq_domain(struct device *dev, unsigned int domid, 1091 enum irq_domain_bus_token bus_token) 1092 { 1093 struct msi_domain_info *info; 1094 struct irq_domain *domain; 1095 bool ret = false; 1096 1097 msi_lock_descs(dev); 1098 domain = msi_get_device_domain(dev, domid); 1099 if (domain && irq_domain_is_msi_device(domain)) { 1100 info = domain->host_data; 1101 ret = info->bus_token == bus_token; 1102 } 1103 msi_unlock_descs(dev); 1104 return ret; 1105 } 1106 1107 static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, 1108 int nvec, msi_alloc_info_t *arg) 1109 { 1110 struct msi_domain_info *info = domain->host_data; 1111 struct msi_domain_ops *ops = info->ops; 1112 1113 return ops->msi_prepare(domain, dev, nvec, arg); 1114 } 1115 1116 /* 1117 * Carefully check whether the device can use reservation mode. If 1118 * reservation mode is enabled then the early activation will assign a 1119 * dummy vector to the device. If the PCI/MSI device does not support 1120 * masking of the entry then this can result in spurious interrupts when 1121 * the device driver is not absolutely careful. But even then a malfunction 1122 * of the hardware could result in a spurious interrupt on the dummy vector 1123 * and render the device unusable. If the entry can be masked then the core 1124 * logic will prevent the spurious interrupt and reservation mode can be 1125 * used. For now reservation mode is restricted to PCI/MSI. 1126 */ 1127 static bool msi_check_reservation_mode(struct irq_domain *domain, 1128 struct msi_domain_info *info, 1129 struct device *dev) 1130 { 1131 struct msi_desc *desc; 1132 1133 switch(domain->bus_token) { 1134 case DOMAIN_BUS_PCI_MSI: 1135 case DOMAIN_BUS_PCI_DEVICE_MSI: 1136 case DOMAIN_BUS_PCI_DEVICE_MSIX: 1137 case DOMAIN_BUS_VMD_MSI: 1138 break; 1139 default: 1140 return false; 1141 } 1142 1143 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) 1144 return false; 1145 1146 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) 1147 return false; 1148 1149 /* 1150 * Checking the first MSI descriptor is sufficient. MSIX supports 1151 * masking and MSI does so when the can_mask attribute is set. 1152 */ 1153 desc = msi_first_desc(dev, MSI_DESC_ALL); 1154 return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask; 1155 } 1156 1157 static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc, 1158 int allocated) 1159 { 1160 switch(domain->bus_token) { 1161 case DOMAIN_BUS_PCI_MSI: 1162 case DOMAIN_BUS_PCI_DEVICE_MSI: 1163 case DOMAIN_BUS_PCI_DEVICE_MSIX: 1164 case DOMAIN_BUS_VMD_MSI: 1165 if (IS_ENABLED(CONFIG_PCI_MSI)) 1166 break; 1167 fallthrough; 1168 default: 1169 return -ENOSPC; 1170 } 1171 1172 /* Let a failed PCI multi MSI allocation retry */ 1173 if (desc->nvec_used > 1) 1174 return 1; 1175 1176 /* If there was a successful allocation let the caller know */ 1177 return allocated ? allocated : -ENOSPC; 1178 } 1179 1180 #define VIRQ_CAN_RESERVE 0x01 1181 #define VIRQ_ACTIVATE 0x02 1182 1183 static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags) 1184 { 1185 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq); 1186 int ret; 1187 1188 if (!(vflags & VIRQ_CAN_RESERVE)) { 1189 irqd_clr_can_reserve(irqd); 1190 1191 /* 1192 * If the interrupt is managed but no CPU is available to 1193 * service it, shut it down until better times. Note that 1194 * we only do this on the !RESERVE path as x86 (the only 1195 * architecture using this flag) deals with this in a 1196 * different way by using a catch-all vector. 1197 */ 1198 if ((vflags & VIRQ_ACTIVATE) && 1199 irqd_affinity_is_managed(irqd) && 1200 !cpumask_intersects(irq_data_get_affinity_mask(irqd), 1201 cpu_online_mask)) { 1202 irqd_set_managed_shutdown(irqd); 1203 return 0; 1204 } 1205 } 1206 1207 if (!(vflags & VIRQ_ACTIVATE)) 1208 return 0; 1209 1210 ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE); 1211 if (ret) 1212 return ret; 1213 /* 1214 * If the interrupt uses reservation mode, clear the activated bit 1215 * so request_irq() will assign the final vector. 1216 */ 1217 if (vflags & VIRQ_CAN_RESERVE) 1218 irqd_clr_activated(irqd); 1219 return 0; 1220 } 1221 1222 static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain, 1223 struct msi_ctrl *ctrl) 1224 { 1225 struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store; 1226 struct msi_domain_info *info = domain->host_data; 1227 struct msi_domain_ops *ops = info->ops; 1228 unsigned int vflags = 0, allocated = 0; 1229 msi_alloc_info_t arg = { }; 1230 struct msi_desc *desc; 1231 unsigned long idx; 1232 int i, ret, virq; 1233 1234 ret = msi_domain_prepare_irqs(domain, dev, ctrl->nirqs, &arg); 1235 if (ret) 1236 return ret; 1237 1238 /* 1239 * This flag is set by the PCI layer as we need to activate 1240 * the MSI entries before the PCI layer enables MSI in the 1241 * card. Otherwise the card latches a random msi message. 1242 */ 1243 if (info->flags & MSI_FLAG_ACTIVATE_EARLY) 1244 vflags |= VIRQ_ACTIVATE; 1245 1246 /* 1247 * Interrupt can use a reserved vector and will not occupy 1248 * a real device vector until the interrupt is requested. 1249 */ 1250 if (msi_check_reservation_mode(domain, info, dev)) 1251 vflags |= VIRQ_CAN_RESERVE; 1252 1253 xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) { 1254 if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED)) 1255 continue; 1256 1257 /* This should return -ECONFUSED... */ 1258 if (WARN_ON_ONCE(allocated >= ctrl->nirqs)) 1259 return -EINVAL; 1260 1261 if (ops->prepare_desc) 1262 ops->prepare_desc(domain, &arg, desc); 1263 1264 ops->set_desc(&arg, desc); 1265 1266 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, 1267 dev_to_node(dev), &arg, false, 1268 desc->affinity); 1269 if (virq < 0) 1270 return msi_handle_pci_fail(domain, desc, allocated); 1271 1272 for (i = 0; i < desc->nvec_used; i++) { 1273 irq_set_msi_desc_off(virq, i, desc); 1274 irq_debugfs_copy_devname(virq + i, dev); 1275 ret = msi_init_virq(domain, virq + i, vflags); 1276 if (ret) 1277 return ret; 1278 } 1279 if (info->flags & MSI_FLAG_DEV_SYSFS) { 1280 ret = msi_sysfs_populate_desc(dev, desc); 1281 if (ret) 1282 return ret; 1283 } 1284 allocated++; 1285 } 1286 return 0; 1287 } 1288 1289 static int msi_domain_alloc_simple_msi_descs(struct device *dev, 1290 struct msi_domain_info *info, 1291 struct msi_ctrl *ctrl) 1292 { 1293 if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS)) 1294 return 0; 1295 1296 return msi_domain_add_simple_msi_descs(dev, ctrl); 1297 } 1298 1299 static int __msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl) 1300 { 1301 struct msi_domain_info *info; 1302 struct msi_domain_ops *ops; 1303 struct irq_domain *domain; 1304 int ret; 1305 1306 if (!msi_ctrl_valid(dev, ctrl)) 1307 return -EINVAL; 1308 1309 domain = msi_get_device_domain(dev, ctrl->domid); 1310 if (!domain) 1311 return -ENODEV; 1312 1313 info = domain->host_data; 1314 1315 ret = msi_domain_alloc_simple_msi_descs(dev, info, ctrl); 1316 if (ret) 1317 return ret; 1318 1319 ops = info->ops; 1320 if (ops->domain_alloc_irqs) 1321 return ops->domain_alloc_irqs(domain, dev, ctrl->nirqs); 1322 1323 return __msi_domain_alloc_irqs(dev, domain, ctrl); 1324 } 1325 1326 static int msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl) 1327 { 1328 int ret = __msi_domain_alloc_locked(dev, ctrl); 1329 1330 if (ret) 1331 msi_domain_free_locked(dev, ctrl); 1332 return ret; 1333 } 1334 1335 /** 1336 * msi_domain_alloc_irqs_range_locked - Allocate interrupts from a MSI interrupt domain 1337 * @dev: Pointer to device struct of the device for which the interrupts 1338 * are allocated 1339 * @domid: Id of the interrupt domain to operate on 1340 * @first: First index to allocate (inclusive) 1341 * @last: Last index to allocate (inclusive) 1342 * 1343 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs() 1344 * pair. Use this for MSI irqdomains which implement their own descriptor 1345 * allocation/free. 1346 * 1347 * Return: %0 on success or an error code. 1348 */ 1349 int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid, 1350 unsigned int first, unsigned int last) 1351 { 1352 struct msi_ctrl ctrl = { 1353 .domid = domid, 1354 .first = first, 1355 .last = last, 1356 .nirqs = last + 1 - first, 1357 }; 1358 1359 return msi_domain_alloc_locked(dev, &ctrl); 1360 } 1361 1362 /** 1363 * msi_domain_alloc_irqs_range - Allocate interrupts from a MSI interrupt domain 1364 * @dev: Pointer to device struct of the device for which the interrupts 1365 * are allocated 1366 * @domid: Id of the interrupt domain to operate on 1367 * @first: First index to allocate (inclusive) 1368 * @last: Last index to allocate (inclusive) 1369 * 1370 * Return: %0 on success or an error code. 1371 */ 1372 int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid, 1373 unsigned int first, unsigned int last) 1374 { 1375 int ret; 1376 1377 msi_lock_descs(dev); 1378 ret = msi_domain_alloc_irqs_range_locked(dev, domid, first, last); 1379 msi_unlock_descs(dev); 1380 return ret; 1381 } 1382 EXPORT_SYMBOL_GPL(msi_domain_alloc_irqs_range); 1383 1384 /** 1385 * msi_domain_alloc_irqs_all_locked - Allocate all interrupts from a MSI interrupt domain 1386 * 1387 * @dev: Pointer to device struct of the device for which the interrupts 1388 * are allocated 1389 * @domid: Id of the interrupt domain to operate on 1390 * @nirqs: The number of interrupts to allocate 1391 * 1392 * This function scans all MSI descriptors of the MSI domain and allocates interrupts 1393 * for all unassigned ones. That function is to be used for MSI domain usage where 1394 * the descriptor allocation is handled at the call site, e.g. PCI/MSI[X]. 1395 * 1396 * Return: %0 on success or an error code. 1397 */ 1398 int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs) 1399 { 1400 struct msi_ctrl ctrl = { 1401 .domid = domid, 1402 .first = 0, 1403 .last = msi_domain_get_hwsize(dev, domid) - 1, 1404 .nirqs = nirqs, 1405 }; 1406 1407 return msi_domain_alloc_locked(dev, &ctrl); 1408 } 1409 1410 static struct msi_map __msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, 1411 unsigned int index, 1412 const struct irq_affinity_desc *affdesc, 1413 union msi_instance_cookie *icookie) 1414 { 1415 struct msi_ctrl ctrl = { .domid = domid, .nirqs = 1, }; 1416 struct irq_domain *domain; 1417 struct msi_map map = { }; 1418 struct msi_desc *desc; 1419 int ret; 1420 1421 domain = msi_get_device_domain(dev, domid); 1422 if (!domain) { 1423 map.index = -ENODEV; 1424 return map; 1425 } 1426 1427 desc = msi_alloc_desc(dev, 1, affdesc); 1428 if (!desc) { 1429 map.index = -ENOMEM; 1430 return map; 1431 } 1432 1433 if (icookie) 1434 desc->data.icookie = *icookie; 1435 1436 ret = msi_insert_desc(dev, desc, domid, index); 1437 if (ret) { 1438 map.index = ret; 1439 return map; 1440 } 1441 1442 ctrl.first = ctrl.last = desc->msi_index; 1443 1444 ret = __msi_domain_alloc_irqs(dev, domain, &ctrl); 1445 if (ret) { 1446 map.index = ret; 1447 msi_domain_free_locked(dev, &ctrl); 1448 } else { 1449 map.index = desc->msi_index; 1450 map.virq = desc->irq; 1451 } 1452 return map; 1453 } 1454 1455 /** 1456 * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at 1457 * a given index - or at the next free index 1458 * 1459 * @dev: Pointer to device struct of the device for which the interrupts 1460 * are allocated 1461 * @domid: Id of the interrupt domain to operate on 1462 * @index: Index for allocation. If @index == %MSI_ANY_INDEX the allocation 1463 * uses the next free index. 1464 * @affdesc: Optional pointer to an interrupt affinity descriptor structure 1465 * @icookie: Optional pointer to a domain specific per instance cookie. If 1466 * non-NULL the content of the cookie is stored in msi_desc::data. 1467 * Must be NULL for MSI-X allocations 1468 * 1469 * This requires a MSI interrupt domain which lets the core code manage the 1470 * MSI descriptors. 1471 * 1472 * Return: struct msi_map 1473 * 1474 * On success msi_map::index contains the allocated index number and 1475 * msi_map::virq the corresponding Linux interrupt number 1476 * 1477 * On failure msi_map::index contains the error code and msi_map::virq 1478 * is %0. 1479 */ 1480 struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index, 1481 const struct irq_affinity_desc *affdesc, 1482 union msi_instance_cookie *icookie) 1483 { 1484 struct msi_map map; 1485 1486 msi_lock_descs(dev); 1487 map = __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie); 1488 msi_unlock_descs(dev); 1489 return map; 1490 } 1491 1492 /** 1493 * msi_device_domain_alloc_wired - Allocate a "wired" interrupt on @domain 1494 * @domain: The domain to allocate on 1495 * @hwirq: The hardware interrupt number to allocate for 1496 * @type: The interrupt type 1497 * 1498 * This weirdness supports wire to MSI controllers like MBIGEN. 1499 * 1500 * @hwirq is the hardware interrupt number which is handed in from 1501 * irq_create_fwspec_mapping(). As the wire to MSI domain is sparse, but 1502 * sized in firmware, the hardware interrupt number cannot be used as MSI 1503 * index. For the underlying irq chip the MSI index is irrelevant and 1504 * all it needs is the hardware interrupt number. 1505 * 1506 * To handle this the MSI index is allocated with MSI_ANY_INDEX and the 1507 * hardware interrupt number is stored along with the type information in 1508 * msi_desc::cookie so the underlying interrupt chip and domain code can 1509 * retrieve it. 1510 * 1511 * Return: The Linux interrupt number (> 0) or an error code 1512 */ 1513 int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, 1514 unsigned int type) 1515 { 1516 unsigned int domid = MSI_DEFAULT_DOMAIN; 1517 union msi_instance_cookie icookie = { }; 1518 struct device *dev = domain->dev; 1519 struct msi_map map = { }; 1520 1521 if (WARN_ON_ONCE(!dev || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) 1522 return -EINVAL; 1523 1524 icookie.value = ((u64)type << 32) | hwirq; 1525 1526 msi_lock_descs(dev); 1527 if (WARN_ON_ONCE(msi_get_device_domain(dev, domid) != domain)) 1528 map.index = -EINVAL; 1529 else 1530 map = __msi_domain_alloc_irq_at(dev, domid, MSI_ANY_INDEX, NULL, &icookie); 1531 msi_unlock_descs(dev); 1532 1533 return map.index >= 0 ? map.virq : map.index; 1534 } 1535 1536 static void __msi_domain_free_irqs(struct device *dev, struct irq_domain *domain, 1537 struct msi_ctrl *ctrl) 1538 { 1539 struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store; 1540 struct msi_domain_info *info = domain->host_data; 1541 struct irq_data *irqd; 1542 struct msi_desc *desc; 1543 unsigned long idx; 1544 int i; 1545 1546 xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) { 1547 /* Only handle MSI entries which have an interrupt associated */ 1548 if (!msi_desc_match(desc, MSI_DESC_ASSOCIATED)) 1549 continue; 1550 1551 /* Make sure all interrupts are deactivated */ 1552 for (i = 0; i < desc->nvec_used; i++) { 1553 irqd = irq_domain_get_irq_data(domain, desc->irq + i); 1554 if (irqd && irqd_is_activated(irqd)) 1555 irq_domain_deactivate_irq(irqd); 1556 } 1557 1558 irq_domain_free_irqs(desc->irq, desc->nvec_used); 1559 if (info->flags & MSI_FLAG_DEV_SYSFS) 1560 msi_sysfs_remove_desc(dev, desc); 1561 desc->irq = 0; 1562 } 1563 } 1564 1565 static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl) 1566 { 1567 struct msi_domain_info *info; 1568 struct msi_domain_ops *ops; 1569 struct irq_domain *domain; 1570 1571 if (!msi_ctrl_valid(dev, ctrl)) 1572 return; 1573 1574 domain = msi_get_device_domain(dev, ctrl->domid); 1575 if (!domain) 1576 return; 1577 1578 info = domain->host_data; 1579 ops = info->ops; 1580 1581 if (ops->domain_free_irqs) 1582 ops->domain_free_irqs(domain, dev); 1583 else 1584 __msi_domain_free_irqs(dev, domain, ctrl); 1585 1586 if (ops->msi_post_free) 1587 ops->msi_post_free(domain, dev); 1588 1589 if (info->flags & MSI_FLAG_FREE_MSI_DESCS) 1590 msi_domain_free_descs(dev, ctrl); 1591 } 1592 1593 /** 1594 * msi_domain_free_irqs_range_locked - Free a range of interrupts from a MSI interrupt domain 1595 * associated to @dev with msi_lock held 1596 * @dev: Pointer to device struct of the device for which the interrupts 1597 * are freed 1598 * @domid: Id of the interrupt domain to operate on 1599 * @first: First index to free (inclusive) 1600 * @last: Last index to free (inclusive) 1601 */ 1602 void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid, 1603 unsigned int first, unsigned int last) 1604 { 1605 struct msi_ctrl ctrl = { 1606 .domid = domid, 1607 .first = first, 1608 .last = last, 1609 }; 1610 msi_domain_free_locked(dev, &ctrl); 1611 } 1612 1613 /** 1614 * msi_domain_free_irqs_range - Free a range of interrupts from a MSI interrupt domain 1615 * associated to @dev 1616 * @dev: Pointer to device struct of the device for which the interrupts 1617 * are freed 1618 * @domid: Id of the interrupt domain to operate on 1619 * @first: First index to free (inclusive) 1620 * @last: Last index to free (inclusive) 1621 */ 1622 void msi_domain_free_irqs_range(struct device *dev, unsigned int domid, 1623 unsigned int first, unsigned int last) 1624 { 1625 msi_lock_descs(dev); 1626 msi_domain_free_irqs_range_locked(dev, domid, first, last); 1627 msi_unlock_descs(dev); 1628 } 1629 EXPORT_SYMBOL_GPL(msi_domain_free_irqs_all); 1630 1631 /** 1632 * msi_domain_free_irqs_all_locked - Free all interrupts from a MSI interrupt domain 1633 * associated to a device 1634 * @dev: Pointer to device struct of the device for which the interrupts 1635 * are freed 1636 * @domid: The id of the domain to operate on 1637 * 1638 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs() 1639 * pair. Use this for MSI irqdomains which implement their own vector 1640 * allocation. 1641 */ 1642 void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid) 1643 { 1644 msi_domain_free_irqs_range_locked(dev, domid, 0, 1645 msi_domain_get_hwsize(dev, domid) - 1); 1646 } 1647 1648 /** 1649 * msi_domain_free_irqs_all - Free all interrupts from a MSI interrupt domain 1650 * associated to a device 1651 * @dev: Pointer to device struct of the device for which the interrupts 1652 * are freed 1653 * @domid: The id of the domain to operate on 1654 */ 1655 void msi_domain_free_irqs_all(struct device *dev, unsigned int domid) 1656 { 1657 msi_lock_descs(dev); 1658 msi_domain_free_irqs_all_locked(dev, domid); 1659 msi_unlock_descs(dev); 1660 } 1661 1662 /** 1663 * msi_device_domain_free_wired - Free a wired interrupt in @domain 1664 * @domain: The domain to free the interrupt on 1665 * @virq: The Linux interrupt number to free 1666 * 1667 * This is the counterpart of msi_device_domain_alloc_wired() for the 1668 * weird wired to MSI converting domains. 1669 */ 1670 void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq) 1671 { 1672 struct msi_desc *desc = irq_get_msi_desc(virq); 1673 struct device *dev = domain->dev; 1674 1675 if (WARN_ON_ONCE(!dev || !desc || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) 1676 return; 1677 1678 msi_lock_descs(dev); 1679 if (!WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) { 1680 msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, 1681 desc->msi_index); 1682 } 1683 msi_unlock_descs(dev); 1684 } 1685 1686 /** 1687 * msi_get_domain_info - Get the MSI interrupt domain info for @domain 1688 * @domain: The interrupt domain to retrieve data from 1689 * 1690 * Return: the pointer to the msi_domain_info stored in @domain->host_data. 1691 */ 1692 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) 1693 { 1694 return (struct msi_domain_info *)domain->host_data; 1695 } 1696 1697 /** 1698 * msi_device_has_isolated_msi - True if the device has isolated MSI 1699 * @dev: The device to check 1700 * 1701 * Isolated MSI means that HW modeled by an irq_domain on the path from the 1702 * initiating device to the CPU will validate that the MSI message specifies an 1703 * interrupt number that the device is authorized to trigger. This must block 1704 * devices from triggering interrupts they are not authorized to trigger. 1705 * Currently authorization means the MSI vector is one assigned to the device. 1706 * 1707 * This is interesting for securing VFIO use cases where a rouge MSI (eg created 1708 * by abusing a normal PCI MemWr DMA) must not allow the VFIO userspace to 1709 * impact outside its security domain, eg userspace triggering interrupts on 1710 * kernel drivers, a VM triggering interrupts on the hypervisor, or a VM 1711 * triggering interrupts on another VM. 1712 */ 1713 bool msi_device_has_isolated_msi(struct device *dev) 1714 { 1715 struct irq_domain *domain = dev_get_msi_domain(dev); 1716 1717 for (; domain; domain = domain->parent) 1718 if (domain->flags & IRQ_DOMAIN_FLAG_ISOLATED_MSI) 1719 return true; 1720 return arch_is_isolated_msi(); 1721 } 1722 EXPORT_SYMBOL_GPL(msi_device_has_isolated_msi); 1723