Lines Matching +full:ext +full:- +full:irq +full:- +full:range
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
19 #include <linux/irq.h>
20 #include <linux/amd-iommu.h>
25 #include <asm/pci-direct.h>
39 #include "../iommu-pages.h"
127 u32 ext;
186 /* IOMMUs have a non-present cache? */
240 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
245 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
252 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status()
254 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
279 u64 tmp = iommu->features; in get_global_efr()
280 u64 tmp2 = iommu->features2; in get_global_efr()
282 if (list_is_first(&iommu->list, &amd_iommu_list)) { in get_global_efr()
295 iommu->index, iommu->pci_seg->id, in get_global_efr()
296 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), in get_global_efr()
297 PCI_FUNC(iommu->devid)); in get_global_efr()
315 iommu->features = h->efr_reg; in early_iommu_features_init()
316 iommu->features2 = h->efr_reg2; in early_iommu_features_init()
328 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
329 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
335 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); in iommu_write_l1()
336 pci_write_config_dword(iommu->dev, 0xfc, val); in iommu_write_l1()
337 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_write_l1()
344 pci_write_config_dword(iommu->dev, 0xf0, address); in iommu_read_l2()
345 pci_read_config_dword(iommu->dev, 0xf4, &val); in iommu_read_l2()
351 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); in iommu_write_l2()
352 pci_write_config_dword(iommu->dev, 0xf4, val); in iommu_write_l2()
365 * This function set the exclusion range in the IOMMU. DMA accesses to the
366 * exclusion range are passed through untranslated
370 u64 start = iommu->exclusion_start & PAGE_MASK; in iommu_set_exclusion_range()
371 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; in iommu_set_exclusion_range()
374 if (!iommu->exclusion_start) in iommu_set_exclusion_range()
378 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_exclusion_range()
382 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_exclusion_range()
388 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); in iommu_set_cwwb_range()
395 * Re-purpose Exclusion base/limit registers for Completion wait in iommu_set_cwwb_range()
396 * write-back base/limit. in iommu_set_cwwb_range()
398 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_cwwb_range()
405 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_cwwb_range()
413 u32 dev_table_size = iommu->pci_seg->dev_table_size; in iommu_set_device_table()
416 BUG_ON(iommu->mmio_base == NULL); in iommu_set_device_table()
419 entry |= (dev_table_size >> 12) - 1; in iommu_set_device_table()
420 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, in iommu_set_device_table()
429 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
431 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
438 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
440 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
447 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
450 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
461 if (!iommu->mmio_base) in iommu_disable()
493 pr_err("Can not reserve memory region %llx-%llx for mmio\n", in iommu_map_mmio_space()
504 if (iommu->mmio_base) in iommu_unmap_mmio_space()
505 iounmap(iommu->mmio_base); in iommu_unmap_mmio_space()
506 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); in iommu_unmap_mmio_space()
513 switch (h->type) { in get_ivhd_header_size()
539 u32 type = ((struct ivhd_entry *)ivhd)->type; in ivhd_entry_length()
558 int last_devid = -EINVAL; in find_last_devid_from_ivhd()
563 pr_err("Unsupported IVHD type %#x\n", h->type); in find_last_devid_from_ivhd()
564 return -EINVAL; in find_last_devid_from_ivhd()
568 end += h->length; in find_last_devid_from_ivhd()
572 switch (dev->type) { in find_last_devid_from_ivhd()
581 if (dev->devid > last_devid) in find_last_devid_from_ivhd()
582 last_devid = dev->devid; in find_last_devid_from_ivhd()
600 for (i = 0; i < table->length; ++i) in check_ivrs_checksum()
605 return -ENODEV; in check_ivrs_checksum()
624 end += table->length; in find_last_devid_acpi()
627 if (h->pci_seg == pci_seg && in find_last_devid_acpi()
628 h->type == amd_iommu_target_ivhd_type) { in find_last_devid_acpi()
632 return -EINVAL; in find_last_devid_acpi()
636 p += h->length; in find_last_devid_acpi()
655 pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, in alloc_dev_table()
656 get_order(pci_seg->dev_table_size)); in alloc_dev_table()
657 if (!pci_seg->dev_table) in alloc_dev_table()
658 return -ENOMEM; in alloc_dev_table()
665 iommu_free_pages(pci_seg->dev_table, in free_dev_table()
666 get_order(pci_seg->dev_table_size)); in free_dev_table()
667 pci_seg->dev_table = NULL; in free_dev_table()
673 pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL, in alloc_rlookup_table()
674 get_order(pci_seg->rlookup_table_size)); in alloc_rlookup_table()
675 if (pci_seg->rlookup_table == NULL) in alloc_rlookup_table()
676 return -ENOMEM; in alloc_rlookup_table()
683 iommu_free_pages(pci_seg->rlookup_table, in free_rlookup_table()
684 get_order(pci_seg->rlookup_table_size)); in free_rlookup_table()
685 pci_seg->rlookup_table = NULL; in free_rlookup_table()
690 pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL, in alloc_irq_lookup_table()
691 get_order(pci_seg->rlookup_table_size)); in alloc_irq_lookup_table()
692 kmemleak_alloc(pci_seg->irq_lookup_table, in alloc_irq_lookup_table()
693 pci_seg->rlookup_table_size, 1, GFP_KERNEL); in alloc_irq_lookup_table()
694 if (pci_seg->irq_lookup_table == NULL) in alloc_irq_lookup_table()
695 return -ENOMEM; in alloc_irq_lookup_table()
702 kmemleak_free(pci_seg->irq_lookup_table); in free_irq_lookup_table()
703 iommu_free_pages(pci_seg->irq_lookup_table, in free_irq_lookup_table()
704 get_order(pci_seg->rlookup_table_size)); in free_irq_lookup_table()
705 pci_seg->irq_lookup_table = NULL; in free_irq_lookup_table()
712 pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL, in alloc_alias_table()
713 get_order(pci_seg->alias_table_size)); in alloc_alias_table()
714 if (!pci_seg->alias_table) in alloc_alias_table()
715 return -ENOMEM; in alloc_alias_table()
720 for (i = 0; i <= pci_seg->last_bdf; ++i) in alloc_alias_table()
721 pci_seg->alias_table[i] = i; in alloc_alias_table()
728 iommu_free_pages(pci_seg->alias_table, in free_alias_table()
729 get_order(pci_seg->alias_table_size)); in free_alias_table()
730 pci_seg->alias_table = NULL; in free_alias_table()
740 iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL, in alloc_command_buffer()
743 return iommu->cmd_buf ? 0 : -ENOMEM; in alloc_command_buffer()
756 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_restart_log()
765 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_restart_log()
801 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in amd_iommu_reset_cmd_buffer()
802 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in amd_iommu_reset_cmd_buffer()
803 iommu->cmd_buf_head = 0; in amd_iommu_reset_cmd_buffer()
804 iommu->cmd_buf_tail = 0; in amd_iommu_reset_cmd_buffer()
817 BUG_ON(iommu->cmd_buf == NULL); in iommu_enable_command_buffer()
819 entry = iommu_virt_to_phys(iommu->cmd_buf); in iommu_enable_command_buffer()
822 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, in iommu_enable_command_buffer()
838 iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); in free_command_buffer()
860 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL, in alloc_event_buffer()
863 return iommu->evt_buf ? 0 : -ENOMEM; in alloc_event_buffer()
870 BUG_ON(iommu->evt_buf == NULL); in iommu_enable_event_buffer()
872 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; in iommu_enable_event_buffer()
874 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, in iommu_enable_event_buffer()
878 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_enable_event_buffer()
879 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_enable_event_buffer()
894 iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); in free_event_buffer()
900 iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE)); in free_ga_log()
901 iommu_free_pages(iommu->ga_log_tail, get_order(8)); in free_ga_log()
911 if (!iommu->ga_log) in iommu_ga_log_enable()
912 return -EINVAL; in iommu_ga_log_enable()
914 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; in iommu_ga_log_enable()
915 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, in iommu_ga_log_enable()
917 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & in iommu_ga_log_enable()
918 (BIT_ULL(52)-1)) & ~7ULL; in iommu_ga_log_enable()
919 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, in iommu_ga_log_enable()
921 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_ga_log_enable()
922 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_ga_log_enable()
929 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in iommu_ga_log_enable()
936 return -EINVAL; in iommu_ga_log_enable()
946 iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE)); in iommu_init_ga_log()
947 if (!iommu->ga_log) in iommu_init_ga_log()
950 iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8)); in iommu_init_ga_log()
951 if (!iommu->ga_log_tail) in iommu_init_ga_log()
957 return -EINVAL; in iommu_init_ga_log()
963 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1); in alloc_cwwb_sem()
965 return iommu->cmd_sem ? 0 : -ENOMEM; in alloc_cwwb_sem()
970 if (iommu->cmd_sem) in free_cwwb_sem()
971 iommu_free_page((void *)iommu->cmd_sem); in free_cwwb_sem()
978 * XT mode (32-bit APIC destination ID) requires in iommu_enable_xt()
979 * GA mode (128-bit IRTE support) as a prerequisite. in iommu_enable_xt()
1031 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in __copy_device_table()
1039 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); in __copy_device_table()
1040 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); in __copy_device_table()
1044 if (old_devtb_size != pci_seg->dev_table_size) { in __copy_device_table()
1046 iommu->index); in __copy_device_table()
1063 pci_seg->dev_table_size) in __copy_device_table()
1064 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB); in __copy_device_table()
1069 pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, in __copy_device_table()
1070 get_order(pci_seg->dev_table_size)); in __copy_device_table()
1071 if (pci_seg->old_dev_tbl_cpy == NULL) { in __copy_device_table()
1077 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in __copy_device_table()
1078 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid]; in __copy_device_table()
1083 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; in __copy_device_table()
1084 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; in __copy_device_table()
1090 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp; in __copy_device_table()
1093 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp; in __copy_device_table()
1103 pr_err("Wrong old irq remapping flag: %#x\n", devid); in __copy_device_table()
1108 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; in __copy_device_table()
1124 pr_warn("Translation is already enabled - trying to copy translation structures\n"); in copy_device_table()
1132 if (pci_seg->id != iommu->pci_seg->id) in copy_device_table()
1191 return -EINVAL; in add_special_device()
1194 if (!(entry->id == id && entry->cmd_line)) in add_special_device()
1197 pr_info("Command-line override present for %s id %d - ignoring\n", in add_special_device()
1200 *devid = entry->devid; in add_special_device()
1207 return -ENOMEM; in add_special_device()
1209 entry->id = id; in add_special_device()
1210 entry->devid = *devid; in add_special_device()
1211 entry->cmd_line = cmd_line; in add_special_device()
1213 list_add_tail(&entry->list, list); in add_special_device()
1225 if (strcmp(entry->hid, hid) || in add_acpi_hid_device()
1226 (*uid && *entry->uid && strcmp(entry->uid, uid)) || in add_acpi_hid_device()
1227 !entry->cmd_line) in add_acpi_hid_device()
1230 pr_info("Command-line override for hid:%s uid:%s\n", in add_acpi_hid_device()
1232 *devid = entry->devid; in add_acpi_hid_device()
1238 return -ENOMEM; in add_acpi_hid_device()
1240 memcpy(entry->uid, uid, strlen(uid)); in add_acpi_hid_device()
1241 memcpy(entry->hid, hid, strlen(hid)); in add_acpi_hid_device()
1242 entry->devid = *devid; in add_acpi_hid_device()
1243 entry->cmd_line = cmd_line; in add_acpi_hid_device()
1244 entry->root_devid = (entry->devid & (~0x7)); in add_acpi_hid_device()
1247 entry->cmd_line ? "cmd" : "ivrs", in add_acpi_hid_device()
1248 entry->hid, entry->uid, entry->root_devid); in add_acpi_hid_device()
1250 list_add_tail(&entry->list, list); in add_acpi_hid_device()
1301 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in init_iommu_from_acpi()
1315 iommu->acpi_flags = h->flags; in init_iommu_from_acpi()
1322 pr_err("Unsupported IVHD type %#x\n", h->type); in init_iommu_from_acpi()
1323 return -EINVAL; in init_iommu_from_acpi()
1328 end += h->length; in init_iommu_from_acpi()
1333 seg_id = pci_seg->id; in init_iommu_from_acpi()
1335 switch (e->type) { in init_iommu_from_acpi()
1338 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); in init_iommu_from_acpi()
1340 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i) in init_iommu_from_acpi()
1341 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); in init_iommu_from_acpi()
1347 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1348 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1349 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1350 e->flags); in init_iommu_from_acpi()
1352 devid = e->devid; in init_iommu_from_acpi()
1353 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1359 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1360 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1361 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1362 e->flags); in init_iommu_from_acpi()
1364 devid_start = e->devid; in init_iommu_from_acpi()
1365 flags = e->flags; in init_iommu_from_acpi()
1373 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1374 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1375 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1376 e->flags, in init_iommu_from_acpi()
1377 PCI_BUS_NUM(e->ext >> 8), in init_iommu_from_acpi()
1378 PCI_SLOT(e->ext >> 8), in init_iommu_from_acpi()
1379 PCI_FUNC(e->ext >> 8)); in init_iommu_from_acpi()
1381 devid = e->devid; in init_iommu_from_acpi()
1382 devid_to = e->ext >> 8; in init_iommu_from_acpi()
1383 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1384 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); in init_iommu_from_acpi()
1385 pci_seg->alias_table[devid] = devid_to; in init_iommu_from_acpi()
1392 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1393 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1394 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1395 e->flags, in init_iommu_from_acpi()
1396 seg_id, PCI_BUS_NUM(e->ext >> 8), in init_iommu_from_acpi()
1397 PCI_SLOT(e->ext >> 8), in init_iommu_from_acpi()
1398 PCI_FUNC(e->ext >> 8)); in init_iommu_from_acpi()
1400 devid_start = e->devid; in init_iommu_from_acpi()
1401 flags = e->flags; in init_iommu_from_acpi()
1402 devid_to = e->ext >> 8; in init_iommu_from_acpi()
1409 "flags: %02x ext: %08x\n", in init_iommu_from_acpi()
1410 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1411 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1412 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1413 e->flags, e->ext); in init_iommu_from_acpi()
1415 devid = e->devid; in init_iommu_from_acpi()
1416 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1417 e->ext); in init_iommu_from_acpi()
1422 "%04x:%02x:%02x.%x flags: %02x ext: %08x\n", in init_iommu_from_acpi()
1423 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1424 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1425 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1426 e->flags, e->ext); in init_iommu_from_acpi()
1428 devid_start = e->devid; in init_iommu_from_acpi()
1429 flags = e->flags; in init_iommu_from_acpi()
1430 ext_flags = e->ext; in init_iommu_from_acpi()
1436 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1437 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1438 PCI_FUNC(e->devid)); in init_iommu_from_acpi()
1440 devid = e->devid; in init_iommu_from_acpi()
1443 pci_seg->alias_table[dev_i] = devid_to; in init_iommu_from_acpi()
1457 handle = e->ext & 0xff; in init_iommu_from_acpi()
1458 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); in init_iommu_from_acpi()
1459 type = (e->ext >> 24) & 0xff; in init_iommu_from_acpi()
1480 * command-line override is present. So call in init_iommu_from_acpi()
1483 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1493 if (h->type != 0x40) { in init_iommu_from_acpi()
1495 e->type); in init_iommu_from_acpi()
1499 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); in init_iommu_from_acpi()
1500 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); in init_iommu_from_acpi()
1501 hid[ACPIHID_HID_LEN - 1] = '\0'; in init_iommu_from_acpi()
1509 switch (e->uidf) { in init_iommu_from_acpi()
1512 if (e->uidl != 0) in init_iommu_from_acpi()
1518 sprintf(uid, "%d", e->uid); in init_iommu_from_acpi()
1523 memcpy(uid, &e->uid, e->uidl); in init_iommu_from_acpi()
1524 uid[e->uidl] = '\0'; in init_iommu_from_acpi()
1531 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); in init_iommu_from_acpi()
1538 flags = e->flags; in init_iommu_from_acpi()
1546 * command-line override is present. So call in init_iommu_from_acpi()
1549 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1583 pci_seg->last_bdf = last_bdf; in alloc_pci_segment()
1585 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1586 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1587 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1589 pci_seg->id = id; in alloc_pci_segment()
1590 init_llist_head(&pci_seg->dev_data_list); in alloc_pci_segment()
1591 INIT_LIST_HEAD(&pci_seg->unity_map); in alloc_pci_segment()
1592 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); in alloc_pci_segment()
1610 if (pci_seg->id == id) in get_pci_segment()
1622 list_del(&pci_seg->list); in free_pci_segments()
1633 if (iommu->iommu.dev) { in free_sysfs()
1634 iommu_device_unregister(&iommu->iommu); in free_sysfs()
1635 iommu_device_sysfs_remove(&iommu->iommu); in free_sysfs()
1656 list_del(&iommu->list); in free_iommu_all()
1663 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1677 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1678 pci_read_config_dword(iommu->dev, 0xf4, &value); in amd_iommu_erratum_746_workaround()
1684 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); in amd_iommu_erratum_746_workaround()
1686 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); in amd_iommu_erratum_746_workaround()
1687 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); in amd_iommu_erratum_746_workaround()
1690 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1694 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1717 pci_info(iommu->dev, "Applying ATS write check workaround\n"); in amd_iommu_ats_write_check_workaround()
1730 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); in init_iommu_one()
1732 return -ENOMEM; in init_iommu_one()
1733 iommu->pci_seg = pci_seg; in init_iommu_one()
1735 raw_spin_lock_init(&iommu->lock); in init_iommu_one()
1736 atomic64_set(&iommu->cmd_sem_val, 0); in init_iommu_one()
1739 list_add_tail(&iommu->list, &amd_iommu_list); in init_iommu_one()
1740 iommu->index = amd_iommus_present++; in init_iommu_one()
1742 if (unlikely(iommu->index >= MAX_IOMMUS)) { in init_iommu_one()
1744 return -ENOSYS; in init_iommu_one()
1747 /* Index is fine - add IOMMU to the array */ in init_iommu_one()
1748 amd_iommus[iommu->index] = iommu; in init_iommu_one()
1753 iommu->devid = h->devid; in init_iommu_one()
1754 iommu->cap_ptr = h->cap_ptr; in init_iommu_one()
1755 iommu->mmio_phys = h->mmio_phys; in init_iommu_one()
1757 switch (h->type) { in init_iommu_one()
1760 if ((h->efr_attr != 0) && in init_iommu_one()
1761 ((h->efr_attr & (0xF << 13)) != 0) && in init_iommu_one()
1762 ((h->efr_attr & (0x3F << 17)) != 0)) in init_iommu_one()
1763 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1765 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1768 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. in init_iommu_one()
1773 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) in init_iommu_one()
1778 if (h->efr_reg & (1 << 9)) in init_iommu_one()
1779 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1781 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1784 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. in init_iommu_one()
1789 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { in init_iommu_one()
1794 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) in init_iommu_one()
1801 return -EINVAL; in init_iommu_one()
1804 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, in init_iommu_one()
1805 iommu->mmio_phys_end); in init_iommu_one()
1806 if (!iommu->mmio_base) in init_iommu_one()
1807 return -ENOMEM; in init_iommu_one()
1817 return -ENOMEM; in init_iommu_one_late()
1820 return -ENOMEM; in init_iommu_one_late()
1823 return -ENOMEM; in init_iommu_one_late()
1825 iommu->int_enabled = false; in init_iommu_one_late()
1832 iommu->index); in init_iommu_one_late()
1847 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; in init_iommu_one_late()
1853 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1863 u8 last_type = ivhd->type; in get_highest_supported_ivhd_type()
1864 u16 devid = ivhd->devid; in get_highest_supported_ivhd_type()
1866 while (((u8 *)ivhd - base < ivrs->length) && in get_highest_supported_ivhd_type()
1867 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { in get_highest_supported_ivhd_type()
1870 if (ivhd->devid == devid) in get_highest_supported_ivhd_type()
1871 last_type = ivhd->type; in get_highest_supported_ivhd_type()
1872 ivhd = (struct ivhd_header *)(p + ivhd->length); in get_highest_supported_ivhd_type()
1889 end += table->length; in init_iommu_all()
1899 h->pci_seg, PCI_BUS_NUM(h->devid), in init_iommu_all()
1900 PCI_SLOT(h->devid), PCI_FUNC(h->devid), in init_iommu_all()
1901 h->cap_ptr, h->flags, h->info); in init_iommu_all()
1902 DUMP_printk(" mmio-addr: %016llx\n", in init_iommu_all()
1903 h->mmio_phys); in init_iommu_all()
1907 return -ENOMEM; in init_iommu_all()
1913 p += h->length; in init_iommu_all()
1934 struct pci_dev *pdev = iommu->dev; in init_iommu_perf_ctr()
1943 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); in init_iommu_perf_ctr()
1944 iommu->max_banks = (u8) ((val >> 12) & 0x3f); in init_iommu_perf_ctr()
1945 iommu->max_counters = (u8) ((val >> 7) & 0xf); in init_iommu_perf_ctr()
1955 return sysfs_emit(buf, "%x\n", iommu->cap); in amd_iommu_show_cap()
1974 .name = "amd-iommu",
1992 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) in late_iommu_features_init()
1996 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); in late_iommu_features_init()
1997 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); in late_iommu_features_init()
2020 int cap_ptr = iommu->cap_ptr; in iommu_init_pci()
2023 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2024 PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
2025 iommu->devid & 0xff); in iommu_init_pci()
2026 if (!iommu->dev) in iommu_init_pci()
2027 return -ENODEV; in iommu_init_pci()
2030 iommu->dev->match_driver = false; in iommu_init_pci()
2032 /* ACPI _PRT won't have an IRQ for IOMMU */ in iommu_init_pci()
2033 iommu->dev->irq_managed = 1; in iommu_init_pci()
2035 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, in iommu_init_pci()
2036 &iommu->cap); in iommu_init_pci()
2038 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) in iommu_init_pci()
2048 iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1; in iommu_init_pci()
2050 BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK); in iommu_init_pci()
2054 if (amd_iommu_max_glx_val == -1) in iommu_init_pci()
2063 return -ENOMEM; in iommu_init_pci()
2065 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { in iommu_init_pci()
2076 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); in iommu_init_pci()
2081 if (is_rd890_iommu(iommu->dev)) { in iommu_init_pci()
2084 iommu->root_pdev = in iommu_init_pci()
2085 pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2086 iommu->dev->bus->number, in iommu_init_pci()
2094 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_init_pci()
2095 &iommu->stored_addr_lo); in iommu_init_pci()
2096 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_init_pci()
2097 &iommu->stored_addr_hi); in iommu_init_pci()
2100 iommu->stored_addr_lo &= ~1; in iommu_init_pci()
2104 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); in iommu_init_pci()
2107 iommu->stored_l2[i] = iommu_read_l2(iommu, i); in iommu_init_pci()
2113 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, in iommu_init_pci()
2114 amd_iommu_groups, "ivhd%d", iommu->index); in iommu_init_pci()
2128 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); in iommu_init_pci()
2130 return pci_enable_device(iommu->dev); in iommu_init_pci()
2179 iommu->index, ret); in amd_iommu_init_pci()
2182 /* Need to setup range after PCI init */ in amd_iommu_init_pci()
2221 r = pci_enable_msi(iommu->dev); in iommu_setup_msi()
2225 r = request_threaded_irq(iommu->dev->irq, in iommu_setup_msi()
2228 0, "AMD-Vi", in iommu_setup_msi()
2232 pci_disable_msi(iommu->dev); in iommu_setup_msi()
2273 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) in intcapxt_irqdomain_alloc()
2274 return -EINVAL; in intcapxt_irqdomain_alloc()
2283 irqd->chip = &intcapxt_controller; in intcapxt_irqdomain_alloc()
2284 irqd->hwirq = info->hwirq; in intcapxt_irqdomain_alloc()
2285 irqd->chip_data = info->data; in intcapxt_irqdomain_alloc()
2301 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_unmask_irq()
2306 xt.dest_mode_logical = apic->dest_mode_logical; in intcapxt_unmask_irq()
2307 xt.vector = cfg->vector; in intcapxt_unmask_irq()
2308 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); in intcapxt_unmask_irq()
2309 xt.destid_24_31 = cfg->dest_apicid >> 24; in intcapxt_unmask_irq()
2311 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); in intcapxt_unmask_irq()
2316 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_mask_irq()
2318 writeq(0, iommu->mmio_base + irqd->hwirq); in intcapxt_mask_irq()
2325 struct irq_data *parent = irqd->parent_data; in intcapxt_set_affinity()
2328 ret = parent->chip->irq_set_affinity(parent, mask, force); in intcapxt_set_affinity()
2336 return on ? -EOPNOTSUPP : 0; in intcapxt_set_wake()
2340 .name = "IOMMU-MSI",
2364 /* No need for locking here (yet) as the init is single-threaded */ in iommu_get_irqdomain()
2368 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); in iommu_get_irqdomain()
2386 int irq, ret; in __iommu_setup_intcapxt() local
2387 int node = dev_to_node(&iommu->dev->dev); in __iommu_setup_intcapxt()
2391 return -ENXIO; in __iommu_setup_intcapxt()
2398 irq = irq_domain_alloc_irqs(domain, 1, node, &info); in __iommu_setup_intcapxt()
2399 if (irq < 0) { in __iommu_setup_intcapxt()
2401 return irq; in __iommu_setup_intcapxt()
2404 ret = request_threaded_irq(irq, amd_iommu_int_handler, in __iommu_setup_intcapxt()
2407 irq_domain_free_irqs(irq, 1); in __iommu_setup_intcapxt()
2419 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), in iommu_setup_intcapxt()
2420 "AMD-Vi%d-Evt", iommu->index); in iommu_setup_intcapxt()
2421 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, in iommu_setup_intcapxt()
2427 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), in iommu_setup_intcapxt()
2428 "AMD-Vi%d-PPR", iommu->index); in iommu_setup_intcapxt()
2429 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, in iommu_setup_intcapxt()
2436 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), in iommu_setup_intcapxt()
2437 "AMD-Vi%d-GA", iommu->index); in iommu_setup_intcapxt()
2438 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, in iommu_setup_intcapxt()
2450 if (iommu->int_enabled) in iommu_init_irq()
2455 else if (iommu->dev->msi_cap) in iommu_init_irq()
2458 ret = -ENODEV; in iommu_init_irq()
2463 iommu->int_enabled = true; in iommu_init_irq()
2488 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { in free_unity_maps()
2489 list_del(&entry->list); in free_unity_maps()
2503 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); in init_unity_map_range()
2505 return -ENOMEM; in init_unity_map_range()
2509 return -ENOMEM; in init_unity_map_range()
2511 switch (m->type) { in init_unity_map_range()
2517 e->devid_start = e->devid_end = m->devid; in init_unity_map_range()
2521 e->devid_start = 0; in init_unity_map_range()
2522 e->devid_end = pci_seg->last_bdf; in init_unity_map_range()
2526 e->devid_start = m->devid; in init_unity_map_range()
2527 e->devid_end = m->aux; in init_unity_map_range()
2530 e->address_start = PAGE_ALIGN(m->range_start); in init_unity_map_range()
2531 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); in init_unity_map_range()
2532 e->prot = m->flags >> 1; in init_unity_map_range()
2535 * Treat per-device exclusion ranges as r/w unity-mapped regions in init_unity_map_range()
2537 * range (exclusion_start and exclusion_length members). This in init_unity_map_range()
2541 if (m->flags & IVMD_FLAG_EXCL_RANGE) in init_unity_map_range()
2542 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; in init_unity_map_range()
2546 " flags: %x\n", s, m->pci_seg, in init_unity_map_range()
2547 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), in init_unity_map_range()
2548 PCI_FUNC(e->devid_start), m->pci_seg, in init_unity_map_range()
2549 PCI_BUS_NUM(e->devid_end), in init_unity_map_range()
2550 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), in init_unity_map_range()
2551 e->address_start, e->address_end, m->flags); in init_unity_map_range()
2553 list_add_tail(&e->list, &pci_seg->unity_map); in init_unity_map_range()
2564 end += table->length; in init_memory_definitions()
2569 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) in init_memory_definitions()
2572 p += m->length; in init_memory_definitions()
2584 struct dev_table_entry *dev_table = pci_seg->dev_table; in init_device_table_dma()
2589 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in init_device_table_dma()
2599 struct dev_table_entry *dev_table = pci_seg->dev_table; in uninit_device_table_dma()
2604 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in uninit_device_table_dma()
2619 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) in init_device_table()
2620 __set_dev_entry_bit(pci_seg->dev_table, in init_device_table()
2627 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? in iommu_init_flags()
2631 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? in iommu_init_flags()
2635 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? in iommu_init_flags()
2639 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? in iommu_init_flags()
2656 struct pci_dev *pdev = iommu->root_pdev; in iommu_apply_resume_quirks()
2659 if (!is_rd890_iommu(iommu->dev) || !pdev) in iommu_apply_resume_quirks()
2676 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2677 iommu->stored_addr_lo); in iommu_apply_resume_quirks()
2678 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_apply_resume_quirks()
2679 iommu->stored_addr_hi); in iommu_apply_resume_quirks()
2684 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); in iommu_apply_resume_quirks()
2688 iommu_write_l2(iommu, i, iommu->stored_l2[i]); in iommu_apply_resume_quirks()
2691 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2692 iommu->stored_addr_lo | 1); in iommu_apply_resume_quirks()
2702 iommu->irte_ops = &irte_128_ops; in iommu_enable_ga()
2705 iommu->irte_ops = &irte_32_ops; in iommu_enable_ga()
2729 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_enable_irtcachedis()
2732 iommu->irtcachedis_enabled = true; in iommu_enable_irtcachedis()
2734 iommu->index, iommu->devid, in iommu_enable_irtcachedis()
2735 iommu->irtcachedis_enabled ? "disabled" : "enabled"); in iommu_enable_irtcachedis()
2758 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2777 if (pci_seg->old_dev_tbl_cpy != NULL) { in early_enable_iommus()
2778 iommu_free_pages(pci_seg->old_dev_tbl_cpy, in early_enable_iommus()
2779 get_order(pci_seg->dev_table_size)); in early_enable_iommus()
2780 pci_seg->old_dev_tbl_cpy = NULL; in early_enable_iommus()
2792 iommu_free_pages(pci_seg->dev_table, in early_enable_iommus()
2793 get_order(pci_seg->dev_table_size)); in early_enable_iommus()
2794 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; in early_enable_iommus()
2834 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
2846 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
2915 /* re-load the hardware */ in amd_iommu_resume()
2958 * anymore - so be careful in check_ioapic_information()
3043 return -ENODEV; in early_amd_iommu_init()
3047 return -ENODEV; in early_amd_iommu_init()
3051 return -EINVAL; in early_amd_iommu_init()
3067 /* Device table - directly used by all IOMMUs */ in early_amd_iommu_init()
3068 ret = -ENOMEM; in early_amd_iommu_init()
3076 * never allocate domain 0 because its used as the non-allocated and in early_amd_iommu_init()
3107 ret = -ENOMEM; in early_amd_iommu_init()
3246 ret = -ENODEV; in state_next()
3254 ret = -EINVAL; in state_next()
3285 ret = -EINVAL; in state_next()
3313 int ret = -EINVAL; in iommu_go_to_state()
3339 return amd_iommu_irq_remap ? 0 : -ENODEV; in amd_iommu_prepare()
3387 * We failed to initialize the AMD IOMMU - try fallback in amd_iommu_init()
3431 return -ENODEV; in amd_iommu_detect()
3434 return -ENODEV; in amd_iommu_detect()
3479 return -EINVAL; in parse_amd_iommu_options()
3498 pr_info("Restricting V1 page-sizes to 4KiB"); in parse_amd_iommu_options()
3501 pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB"); in parse_amd_iommu_options()
3504 pr_notice("Unknown option - '%s'\n", str); in parse_amd_iommu_options()
3537 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", in parse_ivrs_ioapic()
3575 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", in parse_ivrs_hpet()
3678 * Since DTE[Mode]=0 is prohibited on SNP-enabled system in amd_iommu_pasid_supported()
3708 return iommu->max_banks; in amd_iommu_pc_get_max_banks()
3723 return iommu->max_counters; in amd_iommu_pc_get_max_counters()
3736 return -ENODEV; in iommu_pc_get_set_reg()
3740 return -ENODEV; in iommu_pc_get_set_reg()
3745 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | in iommu_pc_get_set_reg()
3746 (iommu->max_counters << 8) | 0x28); in iommu_pc_get_set_reg()
3749 return -EINVAL; in iommu_pc_get_set_reg()
3754 writel((u32)val, iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3755 writel((val >> 32), iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3757 *value = readl(iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3759 *value |= readl(iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3769 return -EINVAL; in amd_iommu_pc_get_reg()
3777 return -EINVAL; in amd_iommu_pc_set_reg()
3803 return -EINVAL; in iommu_page_make_shared()
3847 ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE); in amd_iommu_snp_disable()
3851 ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE); in amd_iommu_snp_disable()
3855 ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE); in amd_iommu_snp_disable()