Lines Matching full:iommu
21 #include <linux/iommu-helper.h>
23 #include <linux/amd-iommu.h>
37 #include <asm/iommu.h>
43 #include "../dma-iommu.h"
45 #include "../iommu-pages.h"
67 * general struct to manage commands send to an IOMMU
77 static void set_dte_entry(struct amd_iommu *iommu,
138 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) in get_dev_table() argument
141 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_dev_table()
167 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
168 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) in amd_iommu_set_rlookup_table() argument
170 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in amd_iommu_set_rlookup_table()
172 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table()
196 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) in alloc_dev_data() argument
199 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in alloc_dev_data()
213 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) in search_dev_data() argument
217 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in search_dev_data()
233 struct amd_iommu *iommu; in clone_alias() local
240 iommu = rlookup_amd_iommu(&pdev->dev); in clone_alias()
241 if (!iommu) in clone_alias()
244 amd_iommu_set_rlookup_table(iommu, alias); in clone_alias()
245 dev_table = get_dev_table(iommu); in clone_alias()
253 static void clone_aliases(struct amd_iommu *iommu, struct device *dev) in clone_aliases() argument
266 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); in clone_aliases()
271 static void setup_aliases(struct amd_iommu *iommu, struct device *dev) in setup_aliases() argument
274 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in setup_aliases()
290 clone_aliases(iommu, dev); in setup_aliases()
293 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) in find_dev_data() argument
297 dev_data = search_dev_data(iommu, devid); in find_dev_data()
300 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
304 if (translation_pre_enabled(iommu)) in find_dev_data()
312 * Find or create an IOMMU group for a acpihid device.
482 struct amd_iommu *iommu; in check_device() local
493 iommu = rlookup_amd_iommu(dev); in check_device()
494 if (!iommu) in check_device()
498 pci_seg = iommu->pci_seg; in check_device()
505 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) in iommu_init_device() argument
518 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
523 setup_aliases(iommu, dev); in iommu_init_device()
541 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) in iommu_ignore_device() argument
543 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in iommu_ignore_device()
544 struct dev_table_entry *dev_table = get_dev_table(iommu); in iommu_ignore_device()
555 setup_aliases(iommu, dev); in iommu_ignore_device()
581 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) in dump_dte_entry() argument
584 struct dev_table_entry *dev_table = get_dev_table(iommu); in dump_dte_entry()
599 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_hw_error() argument
611 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_hw_error()
623 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_hw_error()
631 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_fault() argument
644 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_fault()
656 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_fault()
670 static void amd_iommu_report_page_fault(struct amd_iommu *iommu, in amd_iommu_report_page_fault() argument
677 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_page_fault()
693 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), in amd_iommu_report_page_fault()
712 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_page_fault()
721 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
723 struct device *dev = iommu->iommu.dev; in iommu_print_event()
749 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); in iommu_print_event()
756 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
758 dump_dte_entry(iommu, devid); in iommu_print_event()
763 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
768 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
781 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
786 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
790 amd_iommu_report_rmp_fault(iommu, event); in iommu_print_event()
793 amd_iommu_report_rmp_hw_error(iommu, event); in iommu_print_event()
799 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
817 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
821 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
822 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
825 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
829 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
845 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
849 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
852 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
853 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
859 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
866 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
888 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
894 dev_set_msi_domain(dev, iommu->ir_domain); in amd_iommu_set_pci_msi_domain()
899 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
907 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_handle_irq() local
908 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
913 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
916 pr_devel("Processing IOMMU (ivhd%d) %s Log\n", in amd_iommu_handle_irq()
917 iommu->index, evt_type); in amd_iommu_handle_irq()
918 int_handler(iommu); in amd_iommu_handle_irq()
922 overflow_handler(iommu); in amd_iommu_handle_irq()
932 * Workaround: The IOMMU driver should read back the in amd_iommu_handle_irq()
937 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
986 * IOMMU command queuing functions
990 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
994 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
1007 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
1014 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
1015 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
1019 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
1021 /* Tell the IOMMU about it */ in copy_cmd_to_buffer()
1022 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
1026 struct amd_iommu *iommu, in build_completion_wait() argument
1029 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1163 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1170 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1172 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1186 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1192 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1195 iommu->need_sync = sync; in __iommu_queue_command_sync()
1200 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1207 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1208 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1209 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1214 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1216 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1221 * buffer of an IOMMU
1223 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1230 if (!iommu->need_sync) in iommu_completion_wait()
1233 data = atomic64_add_return(1, &iommu->cmd_sem_val); in iommu_completion_wait()
1234 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1236 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1238 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1242 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1245 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1259 * Devices of this domain are behind this IOMMU in domain_flush_complete()
1266 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1272 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1275 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1278 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_dte_all()
1281 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1283 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1290 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1293 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_tlb_all()
1299 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1302 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1305 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1311 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1313 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1316 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1322 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1323 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1326 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1332 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1335 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1338 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_irt_all()
1340 if (iommu->irtcachedis_enabled) in amd_iommu_flush_irt_all()
1344 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1346 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1349 void amd_iommu_flush_all_caches(struct amd_iommu *iommu) in amd_iommu_flush_all_caches() argument
1352 amd_iommu_flush_all(iommu); in amd_iommu_flush_all_caches()
1354 amd_iommu_flush_dte_all(iommu); in amd_iommu_flush_all_caches()
1355 amd_iommu_flush_irt_all(iommu); in amd_iommu_flush_all_caches()
1356 amd_iommu_flush_tlb_all(iommu); in amd_iommu_flush_all_caches()
1366 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_iotlb() local
1373 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1378 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1380 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1388 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_dte() local
1399 device_flush_dte_alias, iommu); in device_flush_dte()
1401 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1405 pci_seg = iommu->pci_seg; in device_flush_dte()
1408 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1430 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in domain_flush_pages_v2() local
1436 ret |= iommu_queue_command(iommu, &cmd); in domain_flush_pages_v2()
1456 * Devices of this domain are behind this IOMMU in domain_flush_pages_v1()
1501 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ in amd_iommu_domain_flush_pages()
1541 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ in amd_iommu_domain_flush_pages()
1556 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in amd_iommu_dev_flush_pasid_pages() local
1560 iommu_queue_command(iommu, &cmd); in amd_iommu_dev_flush_pasid_pages()
1565 iommu_completion_wait(iommu); in amd_iommu_dev_flush_pasid_pages()
1597 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in amd_iommu_update_and_flush_device_table() local
1599 set_dte_entry(iommu, dev_data); in amd_iommu_update_and_flush_device_table()
1600 clone_aliases(iommu, dev_data->dev); in amd_iommu_update_and_flush_device_table()
1621 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
1625 iommu = get_amd_iommu_from_dev(dev); in amd_iommu_complete_ppr()
1630 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
1636 * allocated for every IOMMU as the default domain. If device isolation
1735 struct amd_iommu *iommu, int pasids) in setup_gcr3_table() argument
1738 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; in setup_gcr3_table()
1845 static void set_dte_entry(struct amd_iommu *iommu, in set_dte_entry() argument
1854 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_entry()
1871 * When SNP is enabled, Only set TV bit when IOMMU in set_dte_entry()
1936 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1940 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) in clear_dte_entry() argument
1942 struct dev_table_entry *dev_table = get_dev_table(iommu); in clear_dte_entry()
1952 amd_iommu_apply_erratum_63(iommu, devid); in clear_dte_entry()
1958 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in dev_update_dte() local
1961 set_dte_entry(iommu, dev_data); in dev_update_dte()
1963 clear_dte_entry(iommu, dev_data->devid); in dev_update_dte()
1965 clone_aliases(iommu, dev_data->dev); in dev_update_dte()
1967 iommu_completion_wait(iommu); in dev_update_dte()
1977 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in init_gcr3_table() local
1990 * supported by the device/IOMMU. in init_gcr3_table()
1992 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, in init_gcr3_table()
2025 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in do_attach() local
2038 domain->dev_iommu[iommu->index] += 1; in do_attach()
2054 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in do_detach() local
2071 domain->dev_iommu[iommu->index] -= 1; in do_detach()
2114 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in detach_device() local
2147 amd_iommu_iopf_remove_device(iommu, dev_data); in detach_device()
2157 struct amd_iommu *iommu; in amd_iommu_probe_device() local
2164 iommu = rlookup_amd_iommu(dev); in amd_iommu_probe_device()
2165 if (!iommu) in amd_iommu_probe_device()
2169 if (!iommu->iommu.ops) in amd_iommu_probe_device()
2173 return &iommu->iommu; in amd_iommu_probe_device()
2175 ret = iommu_init_device(iommu, dev); in amd_iommu_probe_device()
2179 iommu_ignore_device(iommu, dev); in amd_iommu_probe_device()
2183 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
2184 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
2187 * If IOMMU and device supports PASID then it will contain max in amd_iommu_probe_device()
2193 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, in amd_iommu_probe_device()
2198 iommu_completion_wait(iommu); in amd_iommu_probe_device()
2208 struct amd_iommu *iommu; in amd_iommu_release_device() local
2213 iommu = rlookup_amd_iommu(dev); in amd_iommu_release_device()
2214 if (!iommu) in amd_iommu_release_device()
2218 iommu_completion_wait(iommu); in amd_iommu_release_device()
2231 * The following functions belong to the exported interface of AMD IOMMU
2233 * This interface allows access to lower level functions of the IOMMU
2294 * Force IOMMU v1 page table when allocating in protection_domain_alloc()
2337 static bool amd_iommu_hd_support(struct amd_iommu *iommu) in amd_iommu_hd_support() argument
2339 return iommu && (iommu->features & FEATURE_HDSUP); in amd_iommu_hd_support()
2347 struct amd_iommu *iommu = NULL; in do_iommu_domain_alloc() local
2350 iommu = get_amd_iommu_from_dev(dev); in do_iommu_domain_alloc()
2359 if (dirty_tracking && !amd_iommu_hd_support(iommu)) in do_iommu_domain_alloc()
2372 if (iommu) { in do_iommu_domain_alloc()
2374 domain->domain.ops = iommu->iommu.ops->default_domain_ops; in do_iommu_domain_alloc()
2452 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in amd_iommu_attach_device() local
2466 * Restrict to devices with compatible IOMMU hardware support in amd_iommu_attach_device()
2469 if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) in amd_iommu_attach_device()
2495 if (amd_iommu_iopf_add_device(iommu, dev_data)) in amd_iommu_attach_device()
2549 * AMD's IOMMU can flush as many pages as necessary in a single flush. in amd_iommu_iotlb_gather_add_page()
2554 * hypervisor needs to synchronize the host IOMMU PTEs with those of in amd_iommu_iotlb_gather_add_page()
2608 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in amd_iommu_capable() local
2610 return amd_iommu_hd_support(iommu); in amd_iommu_capable()
2626 struct amd_iommu *iommu; in amd_iommu_set_dirty_tracking() local
2637 iommu = get_amd_iommu_from_dev_data(dev_data); in amd_iommu_set_dirty_tracking()
2639 dev_table = get_dev_table(iommu); in amd_iommu_set_dirty_tracking()
2688 struct amd_iommu *iommu; in amd_iommu_get_resv_regions() local
2697 iommu = get_amd_iommu_from_dev(dev); in amd_iommu_get_resv_regions()
2698 pci_seg = iommu->pci_seg; in amd_iommu_get_resv_regions()
2882 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt_and_complete() argument
2889 if (iommu->irtcachedis_enabled) in iommu_flush_irt_and_complete()
2893 data = atomic64_add_return(1, &iommu->cmd_sem_val); in iommu_flush_irt_and_complete()
2894 build_completion_wait(&cmd2, iommu, data); in iommu_flush_irt_and_complete()
2896 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2897 ret = __iommu_queue_command_sync(iommu, &cmd, true); in iommu_flush_irt_and_complete()
2900 ret = __iommu_queue_command_sync(iommu, &cmd2, false); in iommu_flush_irt_and_complete()
2903 wait_on_sem(iommu, data); in iommu_flush_irt_and_complete()
2905 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2908 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, in set_dte_irq_entry() argument
2912 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_irq_entry()
2924 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) in get_irq_table() argument
2927 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_irq_table()
2930 "%s: no iommu for devid %x:%x\n", in get_irq_table()
2966 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
2969 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in set_remap_table_entry()
2972 set_dte_irq_entry(iommu, devid, table); in set_remap_table_entry()
2973 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
2981 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); in set_remap_table_entry_alias() local
2983 if (!iommu) in set_remap_table_entry_alias()
2986 pci_seg = iommu->pci_seg; in set_remap_table_entry_alias()
2988 set_dte_irq_entry(iommu, alias, table); in set_remap_table_entry_alias()
2994 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, in alloc_irq_table() argument
3005 pci_seg = iommu->pci_seg; in alloc_irq_table()
3013 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3031 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3042 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3045 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
3048 iommu_completion_wait(iommu); in alloc_irq_table()
3060 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, in alloc_irq_index() argument
3067 table = alloc_irq_table(iommu, devid, pdev); in alloc_irq_index()
3079 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3089 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3106 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in __modify_irte_ga() argument
3114 table = get_irq_table(iommu, devid); in __modify_irte_ga()
3137 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in modify_irte_ga() argument
3142 ret = __modify_irte_ga(iommu, devid, index, irte); in modify_irte_ga()
3146 iommu_flush_irt_and_complete(iommu, devid); in modify_irte_ga()
3151 static int modify_irte(struct amd_iommu *iommu, in modify_irte() argument
3157 table = get_irq_table(iommu, devid); in modify_irte()
3165 iommu_flush_irt_and_complete(iommu, devid); in modify_irte()
3170 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) in free_irte() argument
3175 table = get_irq_table(iommu, devid); in free_irte()
3180 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3183 iommu_flush_irt_and_complete(iommu, devid); in free_irte()
3216 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_activate() argument
3221 modify_irte(iommu, devid, index, irte); in irte_activate()
3224 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_activate() argument
3229 modify_irte_ga(iommu, devid, index, irte); in irte_ga_activate()
3232 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_deactivate() argument
3237 modify_irte(iommu, devid, index, irte); in irte_deactivate()
3240 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_deactivate() argument
3245 modify_irte_ga(iommu, devid, index, irte); in irte_ga_deactivate()
3248 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_set_affinity() argument
3255 modify_irte(iommu, devid, index, irte); in irte_set_affinity()
3258 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_ga_set_affinity() argument
3269 modify_irte_ga(iommu, devid, index, irte); in irte_ga_set_affinity()
3357 struct amd_iommu *iommu = data->iommu; in irq_remapping_prepare_irte() local
3359 if (!iommu) in irq_remapping_prepare_irte()
3364 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, in irq_remapping_prepare_irte()
3408 struct amd_iommu *iommu; in irq_remapping_alloc() local
3424 iommu = __rlookup_amd_iommu(seg, devid); in irq_remapping_alloc()
3425 if (!iommu) in irq_remapping_alloc()
3435 table = alloc_irq_table(iommu, devid, NULL); in irq_remapping_alloc()
3444 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3455 index = alloc_irq_index(iommu, devid, nr_irqs, align, in irq_remapping_alloc()
3458 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL); in irq_remapping_alloc()
3490 data->iommu = iommu; in irq_remapping_alloc()
3507 free_irte(iommu, devid, index + i); in irq_remapping_alloc()
3526 free_irte(data->iommu, irte_info->devid, irte_info->index); in irq_remapping_free()
3534 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3544 struct amd_iommu *iommu = data->iommu; in irq_remapping_activate() local
3547 if (!iommu) in irq_remapping_activate()
3550 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, in irq_remapping_activate()
3552 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3561 struct amd_iommu *iommu = data->iommu; in irq_remapping_deactivate() local
3563 if (iommu) in irq_remapping_deactivate()
3564 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, in irq_remapping_deactivate()
3571 struct amd_iommu *iommu; in irq_remapping_select() local
3584 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); in irq_remapping_select()
3586 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3618 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3648 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3662 if (ir_data->iommu == NULL) in amd_ir_set_vcpu_affinity()
3665 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3710 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
3720 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3732 struct amd_iommu *iommu = ir_data->iommu; in amd_ir_set_affinity() local
3735 if (!iommu) in amd_ir_set_affinity()
3742 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
3774 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
3778 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3781 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, in amd_iommu_create_irq_domain()
3782 fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
3783 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3788 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); in amd_iommu_create_irq_domain()
3789 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | in amd_iommu_create_irq_domain()
3791 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; in amd_iommu_create_irq_domain()
3805 if (!ir_data->iommu) in amd_iommu_update_ga()
3816 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_update_ga()