Lines Matching +full:msi +full:- +full:x

1 // SPDX-License-Identifier: GPL-2.0
3 * PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
16 #include "msi.h"
21 * pci_msi_supported - check whether MSI may be enabled on a device
22 * @dev: pointer to the pci_dev data structure of MSI device function
26 * to determine if MSI/-X are supported for the device. If MSI/-X is
33 /* MSI must be globally enabled and supported by the device */ in pci_msi_supported()
37 if (!dev || dev->no_msi) in pci_msi_supported()
49 * Any bridge which does NOT route MSI transactions from its in pci_msi_supported()
54 * - arch-specific PCI host bus controller drivers (deprecated) in pci_msi_supported()
55 * - quirks for specific PCI bridges in pci_msi_supported()
57 * or indirectly by platform-specific PCI host bridge drivers by in pci_msi_supported()
59 * the NO_MSI flag when no MSI domain is found for this bridge in pci_msi_supported()
62 for (bus = dev->bus; bus; bus = bus->parent) in pci_msi_supported()
63 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) in pci_msi_supported()
73 dev->is_msi_managed = false; in pcim_msi_release()
79 * vs. msi_device_data_release() in the MSI core code.
85 if (!pci_is_managed(dev) || dev->is_msi_managed) in pcim_setup_msi_release()
88 ret = devm_add_action(&dev->dev, pcim_msi_release, dev); in pcim_setup_msi_release()
92 dev->is_msi_managed = true; in pcim_setup_msi_release()
97 * Ordering vs. devres: msi device data has to be installed first so that
102 int ret = msi_setup_device_data(&dev->dev); in pci_setup_msi_context()
111 * Helper functions for mask/unmask and MSI message handling
117 raw_spinlock_t *lock = &dev->msi_lock; in pci_msi_update_mask()
120 if (!desc->pci.msi_attrib.can_mask) in pci_msi_update_mask()
124 desc->pci.msi_mask &= ~clear; in pci_msi_update_mask()
125 desc->pci.msi_mask |= set; in pci_msi_update_mask()
126 pci_write_config_dword(dev, desc->pci.mask_pos, desc->pci.msi_mask); in pci_msi_update_mask()
131 * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
138 __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq)); in pci_msi_mask_irq()
143 * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts
150 __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq)); in pci_msi_unmask_irq()
158 BUG_ON(dev->current_state != PCI_D0); in __pci_read_msi_msg()
160 if (entry->pci.msi_attrib.is_msix) { in __pci_read_msi_msg()
163 if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual)) in __pci_read_msi_msg()
166 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); in __pci_read_msi_msg()
167 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); in __pci_read_msi_msg()
168 msg->data = readl(base + PCI_MSIX_ENTRY_DATA); in __pci_read_msi_msg()
170 int pos = dev->msi_cap; in __pci_read_msi_msg()
174 &msg->address_lo); in __pci_read_msi_msg()
175 if (entry->pci.msi_attrib.is_64) { in __pci_read_msi_msg()
177 &msg->address_hi); in __pci_read_msi_msg()
180 msg->address_hi = 0; in __pci_read_msi_msg()
183 msg->data = data; in __pci_read_msi_msg()
190 int pos = dev->msi_cap; in pci_write_msg_msi()
195 msgctl |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, desc->pci.msi_attrib.multiple); in pci_write_msg_msi()
198 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo); in pci_write_msg_msi()
199 if (desc->pci.msi_attrib.is_64) { in pci_write_msg_msi()
200 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, msg->address_hi); in pci_write_msg_msi()
201 pci_write_config_word(dev, pos + PCI_MSI_DATA_64, msg->data); in pci_write_msg_msi()
203 pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data); in pci_write_msg_msi()
212 u32 ctrl = desc->pci.msix_ctrl; in pci_write_msg_msix()
215 if (desc->pci.msi_attrib.is_virtual) in pci_write_msg_msix()
228 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); in pci_write_msg_msix()
229 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); in pci_write_msg_msix()
230 writel(msg->data, base + PCI_MSIX_ENTRY_DATA); in pci_write_msg_msix()
243 if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { in __pci_write_msi_msg()
245 } else if (entry->pci.msi_attrib.is_msix) { in __pci_write_msi_msg()
251 entry->msg = *msg; in __pci_write_msi_msg()
253 if (entry->write_msi_msg) in __pci_write_msi_msg()
254 entry->write_msi_msg(entry, entry->write_msi_msg_data); in __pci_write_msi_msg()
266 /* PCI/MSI specific functionality */
270 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) in pci_intx_for_msi()
278 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in pci_msi_set_enable()
282 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); in pci_msi_set_enable()
291 /* MSI Entry Initialization */ in msi_setup_msi_desc()
294 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in msi_setup_msi_desc()
296 if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) in msi_setup_msi_desc()
304 desc.pci.msi_attrib.default_irq = dev->irq; in msi_setup_msi_desc()
310 desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64; in msi_setup_msi_desc()
312 desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32; in msi_setup_msi_desc()
318 return msi_insert_msi_desc(&dev->dev, &desc); in msi_setup_msi_desc()
325 if (!dev->no_64bit_msi) in msi_verify_entries()
328 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { in msi_verify_entries()
329 if (entry->msg.address_hi) { in msi_verify_entries()
330 pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n", in msi_verify_entries()
331 entry->msg.address_hi, entry->msg.address_lo); in msi_verify_entries()
335 return !entry ? 0 : -EIO; in msi_verify_entries()
347 entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); in __msi_capability_init()
350 * Copy the MSI descriptor for the error path because in __msi_capability_init()
356 /* Configure MSI capability structure */ in __msi_capability_init()
365 /* Set MSI enabled bits */ in __msi_capability_init()
366 dev->msi_enabled = 1; in __msi_capability_init()
371 dev->irq = entry->irq; in __msi_capability_init()
380 * msi_capability_init - configure device's MSI capability structure
381 * @dev: pointer to the pci_dev data structure of MSI device function
385 * Setup the MSI capability structure of the device with the requested
387 * setup of an entry with the new MSI IRQ. A negative return value indicates
394 /* Reject multi-MSI early on irq domain enabled architectures */ in msi_capability_init()
399 * Disable MSI during setup in the hardware, but mark it enabled in msi_capability_init()
407 guard(msi_descs_lock)(&dev->dev); in msi_capability_init()
417 if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) in __pci_enable_msi_range()
418 return -EINVAL; in __pci_enable_msi_range()
420 /* Check whether driver already requested MSI-X IRQs */ in __pci_enable_msi_range()
421 if (dev->msix_enabled) { in __pci_enable_msi_range()
422 pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); in __pci_enable_msi_range()
423 return -EINVAL; in __pci_enable_msi_range()
427 return -ERANGE; in __pci_enable_msi_range()
429 if (WARN_ON_ONCE(dev->msi_enabled)) in __pci_enable_msi_range()
430 return -EINVAL; in __pci_enable_msi_range()
432 /* Test for the availability of MSI support */ in __pci_enable_msi_range()
434 return -ENOTSUPP; in __pci_enable_msi_range()
440 return -ENOSPC; in __pci_enable_msi_range()
447 return -ENODEV; in __pci_enable_msi_range()
456 return -ENOSPC; in __pci_enable_msi_range()
466 return -ENOSPC; in __pci_enable_msi_range()
473 * pci_msi_vec_count - Return the number of MSI vectors a device can send
476 * This function returns the number of MSI vectors a device requested via
478 * device is not capable sending MSI interrupts. Otherwise, the call succeeds
480 * MSI specification.
487 if (!dev->msi_cap) in pci_msi_vec_count()
488 return -EINVAL; in pci_msi_vec_count()
490 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); in pci_msi_vec_count()
498 * Architecture override returns true when the PCI MSI message should be
511 if (!dev->msi_enabled) in __pci_restore_msi_state()
514 entry = irq_get_msi_desc(dev->irq); in __pci_restore_msi_state()
519 __pci_write_msi_msg(entry, &entry->msg); in __pci_restore_msi_state()
521 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in __pci_restore_msi_state()
525 FIELD_PREP(PCI_MSI_FLAGS_QSIZE, entry->pci.msi_attrib.multiple); in __pci_restore_msi_state()
526 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); in __pci_restore_msi_state()
533 if (!pci_msi_enable || !dev || !dev->msi_enabled) in pci_msi_shutdown()
538 dev->msi_enabled = 0; in pci_msi_shutdown()
540 /* Return the device with MSI unmasked as initial states */ in pci_msi_shutdown()
541 desc = msi_first_desc(&dev->dev, MSI_DESC_ALL); in pci_msi_shutdown()
545 /* Restore dev->irq to its default pin-assertion IRQ */ in pci_msi_shutdown()
546 dev->irq = desc->pci.msi_attrib.default_irq; in pci_msi_shutdown()
550 /* PCI/MSI-X specific functionality */
556 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); in pci_msix_clear_and_set_ctrl()
559 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); in pci_msix_clear_and_set_ctrl()
570 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, in msix_map_region()
584 * msix_prepare_msi_desc - Prepare a half initialized MSI descriptor for operation
586 * @desc: The MSI descriptor for preparation
589 * allocations for MSI-X after initial enablement.
591 * Ideally the whole MSI-X setup would work that way, but there is no way to
603 desc->nvec_used = 1; in msix_prepare_msi_desc()
604 desc->pci.msi_attrib.is_msix = 1; in msix_prepare_msi_desc()
605 desc->pci.msi_attrib.is_64 = 1; in msix_prepare_msi_desc()
606 desc->pci.msi_attrib.default_irq = dev->irq; in msix_prepare_msi_desc()
607 desc->pci.mask_base = dev->msix_base; in msix_prepare_msi_desc()
611 !desc->pci.msi_attrib.is_virtual) { in msix_prepare_msi_desc()
614 desc->pci.msi_attrib.can_mask = 1; in msix_prepare_msi_desc()
616 if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST) in msix_prepare_msi_desc()
618 desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); in msix_prepare_msi_desc()
638 ret = msi_insert_msi_desc(&dev->dev, &desc); in msix_setup_msi_descs()
650 msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { in msix_update_entries()
651 entries->vector = desc->irq; in msix_update_entries()
681 /* Check if all MSI entries honor device restrictions */ in __msix_setup_interrupts()
697 guard(msi_descs_lock)(&dev->dev); in msix_setup_interrupts()
702 * msix_capability_init - configure device's MSI-X capability
703 * @dev: pointer to the pci_dev data structure of MSI-X device function
708 * Setup the MSI-X capability structure of device function with a
709 * single MSI-X IRQ. A return of zero indicates the successful setup of
710 * requested MSI-X entries with allocated IRQs or non-zero for otherwise.
719 * Some devices require MSI-X to be enabled before the MSI-X in msix_capability_init()
727 dev->msix_enabled = 1; in msix_capability_init()
729 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); in msix_capability_init()
730 /* Request & Map MSI-X table region */ in msix_capability_init()
732 dev->msix_base = msix_map_region(dev, tsize); in msix_capability_init()
733 if (!dev->msix_base) { in msix_capability_init()
734 ret = -ENOMEM; in msix_capability_init()
751 * which takes the MSI-X mask bits into account even in msix_capability_init()
752 * when MSI-X is disabled, which prevents MSI delivery. in msix_capability_init()
754 msix_mask_all(dev->msix_base, tsize); in msix_capability_init()
762 dev->msix_enabled = 0; in msix_capability_init()
797 return -ERANGE; in __pci_enable_msix_range()
799 if (dev->msi_enabled) { in __pci_enable_msix_range()
800 pci_info(dev, "can't enable MSI-X (MSI already enabled)\n"); in __pci_enable_msix_range()
801 return -EINVAL; in __pci_enable_msix_range()
804 if (WARN_ON_ONCE(dev->msix_enabled)) in __pci_enable_msix_range()
805 return -EINVAL; in __pci_enable_msix_range()
807 /* Check MSI-X early on irq domain enabled architectures */ in __pci_enable_msix_range()
809 return -ENOTSUPP; in __pci_enable_msix_range()
811 if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) in __pci_enable_msix_range()
812 return -EINVAL; in __pci_enable_msix_range()
819 return -EINVAL; in __pci_enable_msix_range()
830 return -ENOSPC; in __pci_enable_msix_range()
837 return -ENODEV; in __pci_enable_msix_range()
843 return -ENOSPC; in __pci_enable_msix_range()
853 return -ENOSPC; in __pci_enable_msix_range()
864 if (!dev->msix_enabled) in __pci_restore_msix_state()
874 scoped_guard (msi_descs_lock, &dev->dev) { in __pci_restore_msix_state()
875 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { in __pci_restore_msix_state()
877 __pci_write_msi_msg(entry, &entry->msg); in __pci_restore_msix_state()
878 pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); in __pci_restore_msix_state()
889 if (!pci_msi_enable || !dev || !dev->msix_enabled) in pci_msix_shutdown()
893 dev->msix_enabled = 0; in pci_msix_shutdown()
897 /* Return the device with MSI-X masked as initial states */ in pci_msix_shutdown()
898 msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) in pci_msix_shutdown()
903 dev->msix_enabled = 0; in pci_msix_shutdown()
913 if (dev->msix_base) { in pci_free_msi_irqs()
914 iounmap(dev->msix_base); in pci_free_msi_irqs()
915 dev->msix_base = NULL; in pci_free_msi_irqs()
921 * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
923 * @index: The MSI-X index to update
934 if (!pdev->msix_enabled) in pci_msix_write_tph_tag()
935 return -ENXIO; in pci_msix_write_tph_tag()
937 virq = msi_get_virq(&pdev->dev, index); in pci_msix_write_tph_tag()
939 return -ENXIO; in pci_msix_write_tph_tag()
941 guard(msi_descs_lock)(&pdev->dev); in pci_msix_write_tph_tag()
952 return -ENXIO; in pci_msix_write_tph_tag()
954 guard(raw_spinlock_irq)(&irq_desc->lock); in pci_msix_write_tph_tag()
955 msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data); in pci_msix_write_tph_tag()
956 if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual) in pci_msix_write_tph_tag()
957 return -ENXIO; in pci_msix_write_tph_tag()
959 msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST; in pci_msix_write_tph_tag()
960 msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag); in pci_msix_write_tph_tag()
961 pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl); in pci_msix_write_tph_tag()
972 return to_pci_dev(desc->dev); in msi_desc_to_pci_dev()