Lines Matching +full:pci +full:- +full:domain
1 // SPDX-License-Identifier: GPL-2.0
9 * PCI compatible and non PCI compatible devices.
16 #include <linux/pci.h>
25 * struct msi_device_data - MSI per device data
39 * struct msi_ctrl - MSI internal management control structure
40 * @domid: ID of the domain on which management operations should be done
44 * than the range due to PCI/multi-MSI.
54 #define MSI_XA_MAX_INDEX (ULONG_MAX - 1)
55 /* The maximum domain size */
64 * msi_alloc_desc - Allocate an initialized msi_desc
82 desc->dev = dev; in msi_alloc_desc()
83 desc->nvec_used = nvec; in msi_alloc_desc()
85 desc->affinity = kmemdup_array(affinity, nvec, sizeof(*desc->affinity), GFP_KERNEL); in msi_alloc_desc()
86 if (!desc->affinity) { in msi_alloc_desc()
96 kfree(desc->affinity); in msi_free_desc()
103 struct msi_device_data *md = dev->msi.data; in msi_insert_desc()
104 struct xarray *xa = &md->__domains[domid].store; in msi_insert_desc()
111 struct xa_limit limit = { .min = 0, .max = hwsize - 1 }; in msi_insert_desc()
119 desc->msi_index = index; in msi_insert_desc()
123 ret = -ERANGE; in msi_insert_desc()
127 desc->msi_index = index; in msi_insert_desc()
139 * msi_domain_insert_msi_desc - Allocate and initialize a MSI descriptor and
140 * insert it at @init_desc->msi_index
143 * @domid: The id of the interrupt domain to which the desriptor is added
153 lockdep_assert_held(&dev->msi.data->mutex); in msi_domain_insert_msi_desc()
155 desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity); in msi_domain_insert_msi_desc()
157 return -ENOMEM; in msi_domain_insert_msi_desc()
160 desc->pci = init_desc->pci; in msi_domain_insert_msi_desc()
162 return msi_insert_desc(dev, desc, domid, init_desc->msi_index); in msi_domain_insert_msi_desc()
171 return !desc->irq; in msi_desc_match()
173 return !!desc->irq; in msi_desc_match()
183 if (WARN_ON_ONCE(ctrl->domid >= MSI_MAX_DEVICE_IRQDOMAINS || in msi_ctrl_valid()
184 (dev->msi.domain && in msi_ctrl_valid()
185 !dev->msi.data->__domains[ctrl->domid].domain))) in msi_ctrl_valid()
188 hwsize = msi_domain_get_hwsize(dev, ctrl->domid); in msi_ctrl_valid()
189 if (WARN_ON_ONCE(ctrl->first > ctrl->last || in msi_ctrl_valid()
190 ctrl->first >= hwsize || in msi_ctrl_valid()
191 ctrl->last >= hwsize)) in msi_ctrl_valid()
202 lockdep_assert_held(&dev->msi.data->mutex); in msi_domain_free_descs()
207 xa = &dev->msi.data->__domains[ctrl->domid].store; in msi_domain_free_descs()
208 xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) { in msi_domain_free_descs()
219 * msi_domain_free_msi_descs_range - Free a range of MSI descriptors of a device in an irqdomain
221 * @domid: Id of the domain to operate on
238 * msi_domain_add_simple_msi_descs - Allocate and initialize MSI descriptors
250 lockdep_assert_held(&dev->msi.data->mutex); in msi_domain_add_simple_msi_descs()
253 return -EINVAL; in msi_domain_add_simple_msi_descs()
255 for (idx = ctrl->first; idx <= ctrl->last; idx++) { in msi_domain_add_simple_msi_descs()
259 ret = msi_insert_desc(dev, desc, ctrl->domid, idx); in msi_domain_add_simple_msi_descs()
266 ret = -ENOMEM; in msi_domain_add_simple_msi_descs()
274 *msg = entry->msg; in __get_cached_msi_msg()
292 WARN_ON_ONCE(!xa_empty(&md->__domains[i].store)); in msi_device_data_release()
293 xa_destroy(&md->__domains[i].store); in msi_device_data_release()
295 dev->msi.data = NULL; in msi_device_data_release()
299 * msi_setup_device_data - Setup MSI device data
313 if (dev->msi.data) in msi_setup_device_data()
318 return -ENOMEM; in msi_setup_device_data()
327 xa_init_flags(&md->__domains[i].store, XA_FLAGS_ALLOC); in msi_setup_device_data()
330 * If @dev::msi::domain is set and is a global MSI domain, copy the in msi_setup_device_data()
331 * pointer into the domain array so all code can operate on domain in msi_setup_device_data()
333 * architecture specific PCI/MSI support working. in msi_setup_device_data()
335 if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain)) in msi_setup_device_data()
336 md->__domains[MSI_DEFAULT_DOMAIN].domain = dev->msi.domain; in msi_setup_device_data()
338 mutex_init(&md->mutex); in msi_setup_device_data()
339 dev->msi.data = md; in msi_setup_device_data()
345 * msi_lock_descs - Lock the MSI descriptor storage of a device
350 mutex_lock(&dev->msi.data->mutex); in msi_lock_descs()
355 * msi_unlock_descs - Unlock the MSI descriptor storage of a device
361 dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX; in msi_unlock_descs()
362 mutex_unlock(&dev->msi.data->mutex); in msi_unlock_descs()
369 struct xarray *xa = &md->__domains[domid].store; in msi_find_desc()
372 xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) { in msi_find_desc()
376 md->__iter_idx = MSI_XA_MAX_INDEX; in msi_find_desc()
381 * msi_domain_first_desc - Get the first MSI descriptor of an irqdomain associated to a device
383 * @domid: The id of the interrupt domain which should be walked.
395 struct msi_device_data *md = dev->msi.data; in msi_domain_first_desc()
400 lockdep_assert_held(&md->mutex); in msi_domain_first_desc()
402 md->__iter_idx = 0; in msi_domain_first_desc()
408 * msi_next_desc - Get the next MSI descriptor of a device
410 * @domid: The id of the interrupt domain which should be walked.
424 struct msi_device_data *md = dev->msi.data; in msi_next_desc()
429 lockdep_assert_held(&md->mutex); in msi_next_desc()
431 if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX) in msi_next_desc()
434 md->__iter_idx++; in msi_next_desc()
440 * msi_domain_get_virq - Lookup the Linux interrupt number for a MSI index on a interrupt domain
442 * @domid: Domain ID of the interrupt domain associated to the device
443 * @index: MSI interrupt index to look for (0-based)
454 if (!dev->msi.data) in msi_domain_get_virq()
460 /* This check is only valid for the PCI default MSI domain */ in msi_domain_get_virq()
462 pcimsi = to_pci_dev(dev)->msi_enabled; in msi_domain_get_virq()
465 xa = &dev->msi.data->__domains[domid].store; in msi_domain_get_virq()
467 if (desc && desc->irq) { in msi_domain_get_virq()
469 * PCI-MSI has only one descriptor for multiple interrupts. in msi_domain_get_virq()
470 * PCI-MSIX and platform MSI use a descriptor per in msi_domain_get_virq()
474 if (index < desc->nvec_used) in msi_domain_get_virq()
475 ret = desc->irq + index; in msi_domain_get_virq()
477 ret = desc->irq; in msi_domain_get_virq()
505 bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false; in msi_mode_show()
512 struct device_attribute *attrs = desc->sysfs_attrs; in msi_sysfs_remove_desc()
518 desc->sysfs_attrs = NULL; in msi_sysfs_remove_desc()
519 for (i = 0; i < desc->nvec_used; i++) { in msi_sysfs_remove_desc()
521 sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name); in msi_sysfs_remove_desc()
532 attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL); in msi_sysfs_populate_desc()
534 return -ENOMEM; in msi_sysfs_populate_desc()
536 desc->sysfs_attrs = attrs; in msi_sysfs_populate_desc()
537 for (i = 0; i < desc->nvec_used; i++) { in msi_sysfs_populate_desc()
539 attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i); in msi_sysfs_populate_desc()
541 ret = -ENOMEM; in msi_sysfs_populate_desc()
548 ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name); in msi_sysfs_populate_desc()
563 * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
564 * @dev: The device (PCI, platform etc) which will get sysfs entries
572 if (desc->sysfs_attrs) in msi_device_populate_sysfs()
582 * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
583 * @dev: The device (PCI, platform etc) for which to remove
602 struct irq_domain *domain; in msi_get_device_domain() local
604 lockdep_assert_held(&dev->msi.data->mutex); in msi_get_device_domain()
609 domain = dev->msi.data->__domains[domid].domain; in msi_get_device_domain()
610 if (!domain) in msi_get_device_domain()
613 if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain))) in msi_get_device_domain()
616 return domain; in msi_get_device_domain()
622 struct irq_domain *domain; in msi_domain_get_hwsize() local
624 domain = msi_get_device_domain(dev, domid); in msi_domain_get_hwsize()
625 if (domain) { in msi_domain_get_hwsize()
626 info = domain->host_data; in msi_domain_get_hwsize()
627 return info->hwsize; in msi_domain_get_hwsize()
629 /* No domain, default to MSI_XA_DOMAIN_SIZE */ in msi_domain_get_hwsize()
636 data->chip->irq_write_msi_msg(data, msg); in irq_chip_write_msi_msg()
639 static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg) in msi_check_level() argument
641 struct msi_domain_info *info = domain->host_data; in msi_check_level()
645 * not advertized that it is level-capable, signal the breakage. in msi_check_level()
647 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) && in msi_check_level()
648 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) && in msi_check_level()
653 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
666 struct irq_data *parent = irq_data->parent_data; in msi_domain_set_affinity()
670 ret = parent->chip->irq_set_affinity(parent, mask, force); in msi_domain_set_affinity()
673 msi_check_level(irq_data->domain, msg); in msi_domain_set_affinity()
680 static int msi_domain_activate(struct irq_domain *domain, in msi_domain_activate() argument
686 msi_check_level(irq_data->domain, msg); in msi_domain_activate()
691 static void msi_domain_deactivate(struct irq_domain *domain, in msi_domain_deactivate() argument
700 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, in msi_domain_alloc() argument
703 struct msi_domain_info *info = domain->host_data; in msi_domain_alloc()
704 struct msi_domain_ops *ops = info->ops; in msi_domain_alloc()
705 irq_hw_number_t hwirq = ops->get_hwirq(info, arg); in msi_domain_alloc()
708 if (irq_find_mapping(domain, hwirq) > 0) in msi_domain_alloc()
709 return -EEXIST; in msi_domain_alloc()
711 if (domain->parent) { in msi_domain_alloc()
712 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in msi_domain_alloc()
718 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); in msi_domain_alloc()
720 if (ops->msi_free) { in msi_domain_alloc()
721 for (i--; i >= 0; i--) in msi_domain_alloc()
722 ops->msi_free(domain, info, virq + i); in msi_domain_alloc()
724 irq_domain_free_irqs_top(domain, virq, nr_irqs); in msi_domain_alloc()
732 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, in msi_domain_free() argument
735 struct msi_domain_info *info = domain->host_data; in msi_domain_free()
738 if (info->ops->msi_free) { in msi_domain_free()
740 info->ops->msi_free(domain, info, virq + i); in msi_domain_free()
742 irq_domain_free_irqs_top(domain, virq, nr_irqs); in msi_domain_free()
745 static int msi_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, in msi_domain_translate() argument
748 struct msi_domain_info *info = domain->host_data; in msi_domain_translate()
754 if (!info->ops->msi_translate) in msi_domain_translate()
755 return -ENOTSUPP; in msi_domain_translate()
756 return info->ops->msi_translate(domain, fwspec, hwirq, type); in msi_domain_translate()
770 return arg->hwirq; in msi_domain_ops_get_hwirq()
773 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, in msi_domain_ops_prepare() argument
783 arg->desc = desc; in msi_domain_ops_set_desc()
786 static int msi_domain_ops_init(struct irq_domain *domain, in msi_domain_ops_init() argument
791 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, in msi_domain_ops_init()
792 info->chip_data); in msi_domain_ops_init()
793 if (info->handler && info->handler_name) { in msi_domain_ops_init()
794 __irq_set_handler(virq, info->handler, 0, info->handler_name); in msi_domain_ops_init()
795 if (info->handler_data) in msi_domain_ops_init()
796 irq_set_handler_data(virq, info->handler_data); in msi_domain_ops_init()
810 struct msi_domain_ops *ops = info->ops; in msi_domain_update_dom_ops()
813 info->ops = &msi_domain_ops_default; in msi_domain_update_dom_ops()
817 if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS)) in msi_domain_update_dom_ops()
820 if (ops->get_hwirq == NULL) in msi_domain_update_dom_ops()
821 ops->get_hwirq = msi_domain_ops_default.get_hwirq; in msi_domain_update_dom_ops()
822 if (ops->msi_init == NULL) in msi_domain_update_dom_ops()
823 ops->msi_init = msi_domain_ops_default.msi_init; in msi_domain_update_dom_ops()
824 if (ops->msi_prepare == NULL) in msi_domain_update_dom_ops()
825 ops->msi_prepare = msi_domain_ops_default.msi_prepare; in msi_domain_update_dom_ops()
826 if (ops->set_desc == NULL) in msi_domain_update_dom_ops()
827 ops->set_desc = msi_domain_ops_default.set_desc; in msi_domain_update_dom_ops()
832 struct irq_chip *chip = info->chip; in msi_domain_update_chip_ops()
834 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); in msi_domain_update_chip_ops()
835 if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY)) in msi_domain_update_chip_ops()
836 chip->irq_set_affinity = msi_domain_set_affinity; in msi_domain_update_chip_ops()
844 struct irq_domain *domain; in __msi_create_irq_domain() local
846 if (info->hwsize > MSI_XA_DOMAIN_SIZE) in __msi_create_irq_domain()
854 if (!info->hwsize) in __msi_create_irq_domain()
855 info->hwsize = MSI_XA_DOMAIN_SIZE; in __msi_create_irq_domain()
858 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) in __msi_create_irq_domain()
861 domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0, in __msi_create_irq_domain()
864 if (domain) { in __msi_create_irq_domain()
865 irq_domain_update_bus_token(domain, info->bus_token); in __msi_create_irq_domain()
866 if (info->flags & MSI_FLAG_PARENT_PM_DEV) in __msi_create_irq_domain()
867 domain->pm_dev = parent->pm_dev; in __msi_create_irq_domain()
870 return domain; in __msi_create_irq_domain()
874 * msi_create_irq_domain - Create an MSI interrupt domain
876 * @info: MSI domain info
877 * @parent: Parent irq domain
889 * msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down
890 * in the domain hierarchy
891 * @dev: The device for which the domain should be created
892 * @domain: The domain in the hierarchy this op is being called on
893 * @msi_parent_domain: The IRQ_DOMAIN_FLAG_MSI_PARENT domain for the child to
895 * @msi_child_info: The MSI domain info of the IRQ_DOMAIN_FLAG_MSI_DEVICE
896 * domain to be created
901 * underlying interrupt domain hierarchy:
903 * The device domain to be initialized requests the broadest feature set
904 * possible and the underlying domain hierarchy puts restrictions on it.
906 * That's trivial for a simple parent->child relationship, but it gets
907 * interesting with an intermediate domain: root->parent->child. The
909 * domain is providing. So that creates a classic hen and egg problem:
912 * One solution is to let the root domain handle the initialization that's
913 * why there is the @domain and the @msi_parent_domain pointer.
915 bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain, in msi_parent_init_dev_msi_info() argument
919 struct irq_domain *parent = domain->parent; in msi_parent_init_dev_msi_info()
921 if (WARN_ON_ONCE(!parent || !parent->msi_parent_ops || in msi_parent_init_dev_msi_info()
922 !parent->msi_parent_ops->init_dev_msi_info)) in msi_parent_init_dev_msi_info()
925 return parent->msi_parent_ops->init_dev_msi_info(dev, parent, msi_parent_domain, in msi_parent_init_dev_msi_info()
930 * msi_create_device_irq_domain - Create a device MSI interrupt domain
932 * @domid: Domain id
933 * @template: MSI domain info bundle used as template
935 * @domain_data: Optional pointer to domain specific data which is set in
946 * The domain name and the irq chip name for a MSI device domain are
947 * composed by: "$(PREFIX)$(CHIPNAME)-$(DEVNAME)"
949 * $PREFIX: Optional prefix provided by the underlying MSI parent domain
958 * PCI-MSI-0000:00:1c.0 0-edge Parent domain has no prefix
959 * IR-PCI-MSI-0000:00:1c.4 0-edge Same with interrupt remapping prefix 'IR-'
961 * IR-PCI-MSIX-0000:3d:00.0 0-edge Hardware interrupt numbers reflect
962 * IR-PCI-MSIX-0000:3d:00.0 1-edge the real MSI-X index on that device
963 * IR-PCI-MSIX-0000:3d:00.0 2-edge
969 * The domain pointer is stored in @dev::msi::data::__irqdomains[]. All
970 * subsequent operations on the domain depend on the domain id.
972 * The domain is automatically freed when the device is removed via devres
981 struct irq_domain *domain, *parent = dev->msi.domain; in msi_create_device_irq_domain() local
996 bundle->info.hwsize = hwsize; in msi_create_device_irq_domain()
997 bundle->info.chip = &bundle->chip; in msi_create_device_irq_domain()
998 bundle->info.ops = &bundle->ops; in msi_create_device_irq_domain()
999 bundle->info.data = domain_data; in msi_create_device_irq_domain()
1000 bundle->info.chip_data = chip_data; in msi_create_device_irq_domain()
1002 pops = parent->msi_parent_ops; in msi_create_device_irq_domain()
1003 snprintf(bundle->name, sizeof(bundle->name), "%s%s-%s", in msi_create_device_irq_domain()
1004 pops->prefix ? : "", bundle->chip.name, dev_name(dev)); in msi_create_device_irq_domain()
1005 bundle->chip.name = bundle->name; in msi_create_device_irq_domain()
1009 * device domains so that the existing firmware results in a domain in msi_create_device_irq_domain()
1011 * All other device domains like PCI/MSI use the named firmware in msi_create_device_irq_domain()
1015 if (bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE) in msi_create_device_irq_domain()
1016 fwnode = dev->fwnode; in msi_create_device_irq_domain()
1018 fwnode = fwnalloced = irq_domain_alloc_named_fwnode(bundle->name); in msi_create_device_irq_domain()
1031 if (!pops->init_dev_msi_info(dev, parent, parent, &bundle->info)) in msi_create_device_irq_domain()
1034 domain = __msi_create_irq_domain(fwnode, &bundle->info, IRQ_DOMAIN_FLAG_MSI_DEVICE, parent); in msi_create_device_irq_domain()
1035 if (!domain) in msi_create_device_irq_domain()
1038 domain->dev = dev; in msi_create_device_irq_domain()
1039 dev->msi.data->__domains[domid].domain = domain; in msi_create_device_irq_domain()
1053 * msi_remove_device_irq_domain - Free a device MSI interrupt domain
1055 * @domid: Domain id
1061 struct irq_domain *domain; in msi_remove_device_irq_domain() local
1065 domain = msi_get_device_domain(dev, domid); in msi_remove_device_irq_domain()
1067 if (!domain || !irq_domain_is_msi_device(domain)) in msi_remove_device_irq_domain()
1070 dev->msi.data->__domains[domid].domain = NULL; in msi_remove_device_irq_domain()
1071 info = domain->host_data; in msi_remove_device_irq_domain()
1072 if (irq_domain_is_msi_device(domain)) in msi_remove_device_irq_domain()
1073 fwnode = domain->fwnode; in msi_remove_device_irq_domain()
1074 irq_domain_remove(domain); in msi_remove_device_irq_domain()
1083 * msi_match_device_irq_domain - Match a device irq domain against a bus token
1085 * @domid: Domain id
1086 * @bus_token: Bus token to match against the domain bus token
1088 * Return: True if device domain exists and bus tokens match.
1094 struct irq_domain *domain; in msi_match_device_irq_domain() local
1098 domain = msi_get_device_domain(dev, domid); in msi_match_device_irq_domain()
1099 if (domain && irq_domain_is_msi_device(domain)) { in msi_match_device_irq_domain()
1100 info = domain->host_data; in msi_match_device_irq_domain()
1101 ret = info->bus_token == bus_token; in msi_match_device_irq_domain()
1107 static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, in msi_domain_prepare_irqs() argument
1110 struct msi_domain_info *info = domain->host_data; in msi_domain_prepare_irqs()
1111 struct msi_domain_ops *ops = info->ops; in msi_domain_prepare_irqs()
1113 return ops->msi_prepare(domain, dev, nvec, arg); in msi_domain_prepare_irqs()
1119 * dummy vector to the device. If the PCI/MSI device does not support
1125 * used. For now reservation mode is restricted to PCI/MSI.
1127 static bool msi_check_reservation_mode(struct irq_domain *domain, in msi_check_reservation_mode() argument
1133 switch(domain->bus_token) { in msi_check_reservation_mode()
1143 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) in msi_check_reservation_mode()
1154 return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask; in msi_check_reservation_mode()
1157 static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc, in msi_handle_pci_fail() argument
1160 switch(domain->bus_token) { in msi_handle_pci_fail()
1169 return -ENOSPC; in msi_handle_pci_fail()
1172 /* Let a failed PCI multi MSI allocation retry */ in msi_handle_pci_fail()
1173 if (desc->nvec_used > 1) in msi_handle_pci_fail()
1177 return allocated ? allocated : -ENOSPC; in msi_handle_pci_fail()
1183 static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags) in msi_init_virq() argument
1185 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq); in msi_init_virq()
1196 * different way by using a catch-all vector. in msi_init_virq()
1222 static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain, in __msi_domain_alloc_irqs() argument
1225 struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store; in __msi_domain_alloc_irqs()
1226 struct msi_domain_info *info = domain->host_data; in __msi_domain_alloc_irqs()
1227 struct msi_domain_ops *ops = info->ops; in __msi_domain_alloc_irqs()
1234 ret = msi_domain_prepare_irqs(domain, dev, ctrl->nirqs, &arg); in __msi_domain_alloc_irqs()
1239 * This flag is set by the PCI layer as we need to activate in __msi_domain_alloc_irqs()
1240 * the MSI entries before the PCI layer enables MSI in the in __msi_domain_alloc_irqs()
1243 if (info->flags & MSI_FLAG_ACTIVATE_EARLY) in __msi_domain_alloc_irqs()
1250 if (msi_check_reservation_mode(domain, info, dev)) in __msi_domain_alloc_irqs()
1253 xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) { in __msi_domain_alloc_irqs()
1257 /* This should return -ECONFUSED... */ in __msi_domain_alloc_irqs()
1258 if (WARN_ON_ONCE(allocated >= ctrl->nirqs)) in __msi_domain_alloc_irqs()
1259 return -EINVAL; in __msi_domain_alloc_irqs()
1261 if (ops->prepare_desc) in __msi_domain_alloc_irqs()
1262 ops->prepare_desc(domain, &arg, desc); in __msi_domain_alloc_irqs()
1264 ops->set_desc(&arg, desc); in __msi_domain_alloc_irqs()
1266 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, in __msi_domain_alloc_irqs()
1268 desc->affinity); in __msi_domain_alloc_irqs()
1270 return msi_handle_pci_fail(domain, desc, allocated); in __msi_domain_alloc_irqs()
1272 for (i = 0; i < desc->nvec_used; i++) { in __msi_domain_alloc_irqs()
1275 ret = msi_init_virq(domain, virq + i, vflags); in __msi_domain_alloc_irqs()
1279 if (info->flags & MSI_FLAG_DEV_SYSFS) { in __msi_domain_alloc_irqs()
1293 if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS)) in msi_domain_alloc_simple_msi_descs()
1303 struct irq_domain *domain; in __msi_domain_alloc_locked() local
1307 return -EINVAL; in __msi_domain_alloc_locked()
1309 domain = msi_get_device_domain(dev, ctrl->domid); in __msi_domain_alloc_locked()
1310 if (!domain) in __msi_domain_alloc_locked()
1311 return -ENODEV; in __msi_domain_alloc_locked()
1313 info = domain->host_data; in __msi_domain_alloc_locked()
1319 ops = info->ops; in __msi_domain_alloc_locked()
1320 if (ops->domain_alloc_irqs) in __msi_domain_alloc_locked()
1321 return ops->domain_alloc_irqs(domain, dev, ctrl->nirqs); in __msi_domain_alloc_locked()
1323 return __msi_domain_alloc_irqs(dev, domain, ctrl); in __msi_domain_alloc_locked()
1336 * msi_domain_alloc_irqs_range_locked - Allocate interrupts from a MSI interrupt domain
1339 * @domid: Id of the interrupt domain to operate on
1356 .nirqs = last + 1 - first, in msi_domain_alloc_irqs_range_locked()
1363 * msi_domain_alloc_irqs_range - Allocate interrupts from a MSI interrupt domain
1366 * @domid: Id of the interrupt domain to operate on
1385 * msi_domain_alloc_irqs_all_locked - Allocate all interrupts from a MSI interrupt domain
1389 * @domid: Id of the interrupt domain to operate on
1392 * This function scans all MSI descriptors of the MSI domain and allocates interrupts
1393 * for all unassigned ones. That function is to be used for MSI domain usage where
1394 * the descriptor allocation is handled at the call site, e.g. PCI/MSI[X].
1403 .last = msi_domain_get_hwsize(dev, domid) - 1, in msi_domain_alloc_irqs_all_locked()
1416 struct irq_domain *domain; in __msi_domain_alloc_irq_at() local
1421 domain = msi_get_device_domain(dev, domid); in __msi_domain_alloc_irq_at()
1422 if (!domain) { in __msi_domain_alloc_irq_at()
1423 map.index = -ENODEV; in __msi_domain_alloc_irq_at()
1429 map.index = -ENOMEM; in __msi_domain_alloc_irq_at()
1434 desc->data.icookie = *icookie; in __msi_domain_alloc_irq_at()
1442 ctrl.first = ctrl.last = desc->msi_index; in __msi_domain_alloc_irq_at()
1444 ret = __msi_domain_alloc_irqs(dev, domain, &ctrl); in __msi_domain_alloc_irq_at()
1449 map.index = desc->msi_index; in __msi_domain_alloc_irq_at()
1450 map.virq = desc->irq; in __msi_domain_alloc_irq_at()
1456 * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at
1457 * a given index - or at the next free index
1461 * @domid: Id of the interrupt domain to operate on
1465 * @icookie: Optional pointer to a domain specific per instance cookie. If
1466 * non-NULL the content of the cookie is stored in msi_desc::data.
1467 * Must be NULL for MSI-X allocations
1469 * This requires a MSI interrupt domain which lets the core code manage the
1493 * msi_device_domain_alloc_wired - Allocate a "wired" interrupt on @domain
1494 * @domain: The domain to allocate on
1501 * irq_create_fwspec_mapping(). As the wire to MSI domain is sparse, but
1508 * msi_desc::cookie so the underlying interrupt chip and domain code can
1513 int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, in msi_device_domain_alloc_wired() argument
1518 struct device *dev = domain->dev; in msi_device_domain_alloc_wired()
1521 if (WARN_ON_ONCE(!dev || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) in msi_device_domain_alloc_wired()
1522 return -EINVAL; in msi_device_domain_alloc_wired()
1527 if (WARN_ON_ONCE(msi_get_device_domain(dev, domid) != domain)) in msi_device_domain_alloc_wired()
1528 map.index = -EINVAL; in msi_device_domain_alloc_wired()
1536 static void __msi_domain_free_irqs(struct device *dev, struct irq_domain *domain, in __msi_domain_free_irqs() argument
1539 struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store; in __msi_domain_free_irqs()
1540 struct msi_domain_info *info = domain->host_data; in __msi_domain_free_irqs()
1546 xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) { in __msi_domain_free_irqs()
1552 for (i = 0; i < desc->nvec_used; i++) { in __msi_domain_free_irqs()
1553 irqd = irq_domain_get_irq_data(domain, desc->irq + i); in __msi_domain_free_irqs()
1558 irq_domain_free_irqs(desc->irq, desc->nvec_used); in __msi_domain_free_irqs()
1559 if (info->flags & MSI_FLAG_DEV_SYSFS) in __msi_domain_free_irqs()
1561 desc->irq = 0; in __msi_domain_free_irqs()
1569 struct irq_domain *domain; in msi_domain_free_locked() local
1574 domain = msi_get_device_domain(dev, ctrl->domid); in msi_domain_free_locked()
1575 if (!domain) in msi_domain_free_locked()
1578 info = domain->host_data; in msi_domain_free_locked()
1579 ops = info->ops; in msi_domain_free_locked()
1581 if (ops->domain_free_irqs) in msi_domain_free_locked()
1582 ops->domain_free_irqs(domain, dev); in msi_domain_free_locked()
1584 __msi_domain_free_irqs(dev, domain, ctrl); in msi_domain_free_locked()
1586 if (ops->msi_post_free) in msi_domain_free_locked()
1587 ops->msi_post_free(domain, dev); in msi_domain_free_locked()
1589 if (info->flags & MSI_FLAG_FREE_MSI_DESCS) in msi_domain_free_locked()
1594 * msi_domain_free_irqs_range_locked - Free a range of interrupts from a MSI interrupt domain
1598 * @domid: Id of the interrupt domain to operate on
1614 * msi_domain_free_irqs_range - Free a range of interrupts from a MSI interrupt domain
1618 * @domid: Id of the interrupt domain to operate on
1632 * msi_domain_free_irqs_all_locked - Free all interrupts from a MSI interrupt domain
1636 * @domid: The id of the domain to operate on
1645 msi_domain_get_hwsize(dev, domid) - 1); in msi_domain_free_irqs_all_locked()
1649 * msi_domain_free_irqs_all - Free all interrupts from a MSI interrupt domain
1653 * @domid: The id of the domain to operate on
1663 * msi_device_domain_free_wired - Free a wired interrupt in @domain
1664 * @domain: The domain to free the interrupt on
1670 void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq) in msi_device_domain_free_wired() argument
1673 struct device *dev = domain->dev; in msi_device_domain_free_wired()
1675 if (WARN_ON_ONCE(!dev || !desc || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) in msi_device_domain_free_wired()
1679 if (!WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) { in msi_device_domain_free_wired()
1680 msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, in msi_device_domain_free_wired()
1681 desc->msi_index); in msi_device_domain_free_wired()
1687 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1688 * @domain: The interrupt domain to retrieve data from
1690 * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1692 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) in msi_get_domain_info() argument
1694 return (struct msi_domain_info *)domain->host_data; in msi_get_domain_info()
1698 * msi_device_has_isolated_msi - True if the device has isolated MSI
1708 * by abusing a normal PCI MemWr DMA) must not allow the VFIO userspace to
1709 * impact outside its security domain, eg userspace triggering interrupts on
1715 struct irq_domain *domain = dev_get_msi_domain(dev); in msi_device_has_isolated_msi() local
1717 for (; domain; domain = domain->parent) in msi_device_has_isolated_msi()
1718 if (domain->flags & IRQ_DOMAIN_FLAG_ISOLATED_MSI) in msi_device_has_isolated_msi()