Lines Matching +full:pci +full:- +full:iommu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
11 #include <linux/pci.h>
19 #include <linux/amd-iommu.h>
24 #include <asm/pci-direct.h>
25 #include <asm/iommu.h>
38 #include "../iommu-pages.h"
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
118 * A device entry describing which devices a specific IOMMU translates and
136 * An AMD IOMMU memory definition structure. It defines things like exclusion
180 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
187 /* IOMMUs have a non-present cache? */
232 bool translation_pre_enabled(struct amd_iommu *iommu) in translation_pre_enabled() argument
234 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
237 static void clear_translation_pre_enabled(struct amd_iommu *iommu) in clear_translation_pre_enabled() argument
239 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
242 static void init_translation_status(struct amd_iommu *iommu) in init_translation_status() argument
246 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status()
248 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
267 struct amd_iommu *iommu; in get_global_efr() local
269 for_each_iommu(iommu) { in get_global_efr()
270 u64 tmp = iommu->features; in get_global_efr()
271 u64 tmp2 = iommu->features2; in get_global_efr()
273 if (list_is_first(&iommu->list, &amd_iommu_list)) { in get_global_efr()
284 …"Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", in get_global_efr()
286 iommu->index, iommu->pci_seg->id, in get_global_efr()
287 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), in get_global_efr()
288 PCI_FUNC(iommu->devid)); in get_global_efr()
300 * (i.e. before PCI init).
302 static void __init early_iommu_features_init(struct amd_iommu *iommu, in early_iommu_features_init() argument
306 iommu->features = h->efr_reg; in early_iommu_features_init()
307 iommu->features2 = h->efr_reg2; in early_iommu_features_init()
315 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) in iommu_read_l1() argument
319 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
320 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
324 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) in iommu_write_l1() argument
326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); in iommu_write_l1()
327 pci_write_config_dword(iommu->dev, 0xfc, val); in iommu_write_l1()
328 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_write_l1()
331 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) in iommu_read_l2() argument
335 pci_write_config_dword(iommu->dev, 0xf0, address); in iommu_read_l2()
336 pci_read_config_dword(iommu->dev, 0xf4, &val); in iommu_read_l2()
340 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) in iommu_write_l2() argument
342 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); in iommu_write_l2()
343 pci_write_config_dword(iommu->dev, 0xf4, val); in iommu_write_l2()
348 * AMD IOMMU MMIO register space handling functions
350 * These functions are used to program the IOMMU device registers in
356 * This function set the exclusion range in the IOMMU. DMA accesses to the
359 static void iommu_set_exclusion_range(struct amd_iommu *iommu) in iommu_set_exclusion_range() argument
361 u64 start = iommu->exclusion_start & PAGE_MASK; in iommu_set_exclusion_range()
362 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; in iommu_set_exclusion_range()
365 if (!iommu->exclusion_start) in iommu_set_exclusion_range()
369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_exclusion_range()
373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_exclusion_range()
377 static void iommu_set_cwwb_range(struct amd_iommu *iommu) in iommu_set_cwwb_range() argument
379 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); in iommu_set_cwwb_range()
386 * Re-purpose Exclusion base/limit registers for Completion wait in iommu_set_cwwb_range()
387 * write-back base/limit. in iommu_set_cwwb_range()
389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_cwwb_range()
396 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_cwwb_range()
400 /* Programs the physical address of the device table into the IOMMU hardware */
401 static void iommu_set_device_table(struct amd_iommu *iommu) in iommu_set_device_table() argument
404 u32 dev_table_size = iommu->pci_seg->dev_table_size; in iommu_set_device_table()
405 void *dev_table = (void *)get_dev_table(iommu); in iommu_set_device_table()
407 BUG_ON(iommu->mmio_base == NULL); in iommu_set_device_table()
413 entry |= (dev_table_size >> 12) - 1; in iommu_set_device_table()
414 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, in iommu_set_device_table()
418 static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift) in iommu_feature_set() argument
422 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_set()
426 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_set()
429 /* Generic functions to enable/disable certain features of the IOMMU. */
430 void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) in iommu_feature_enable() argument
432 iommu_feature_set(iommu, 1ULL, 1ULL, bit); in iommu_feature_enable()
435 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) in iommu_feature_disable() argument
437 iommu_feature_set(iommu, 0ULL, 1ULL, bit); in iommu_feature_disable()
441 static void iommu_enable(struct amd_iommu *iommu) in iommu_enable() argument
443 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); in iommu_enable()
446 static void iommu_disable(struct amd_iommu *iommu) in iommu_disable() argument
448 if (!iommu->mmio_base) in iommu_disable()
452 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable()
455 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); in iommu_disable()
456 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable()
458 /* Disable IOMMU GA_LOG */ in iommu_disable()
459 iommu_feature_disable(iommu, CONTROL_GALOG_EN); in iommu_disable()
460 iommu_feature_disable(iommu, CONTROL_GAINT_EN); in iommu_disable()
462 /* Disable IOMMU PPR logging */ in iommu_disable()
463 iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); in iommu_disable()
464 iommu_feature_disable(iommu, CONTROL_PPRINT_EN); in iommu_disable()
466 /* Disable IOMMU hardware itself */ in iommu_disable()
467 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); in iommu_disable()
470 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); in iommu_disable()
474 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
480 pr_err("Can not reserve memory region %llx-%llx for mmio\n", in iommu_map_mmio_space()
489 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) in iommu_unmap_mmio_space() argument
491 if (iommu->mmio_base) in iommu_unmap_mmio_space()
492 iounmap(iommu->mmio_base); in iommu_unmap_mmio_space()
493 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); in iommu_unmap_mmio_space()
500 switch (h->type) { in get_ivhd_header_size()
514 * The functions below belong to the first pass of AMD IOMMU ACPI table
526 u32 type = ((struct ivhd_entry *)ivhd)->type; in ivhd_entry_length()
538 * After reading the highest device id from the IOMMU PCI capability header
545 int last_devid = -EINVAL; in find_last_devid_from_ivhd()
550 pr_err("Unsupported IVHD type %#x\n", h->type); in find_last_devid_from_ivhd()
551 return -EINVAL; in find_last_devid_from_ivhd()
555 end += h->length; in find_last_devid_from_ivhd()
559 switch (dev->type) { in find_last_devid_from_ivhd()
568 if (dev->devid > last_devid) in find_last_devid_from_ivhd()
569 last_devid = dev->devid; in find_last_devid_from_ivhd()
587 for (i = 0; i < table->length; ++i) in check_ivrs_checksum()
592 return -ENODEV; in check_ivrs_checksum()
611 end += table->length; in find_last_devid_acpi()
614 if (h->pci_seg == pci_seg && in find_last_devid_acpi()
615 h->type == amd_iommu_target_ivhd_type) { in find_last_devid_acpi()
619 return -EINVAL; in find_last_devid_acpi()
623 p += h->length; in find_last_devid_acpi()
633 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
634 * data structures, initialize the per PCI segment device/alias/rlookup table
639 /* Allocate per PCI segment device table */
642 pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, in alloc_dev_table()
643 pci_seg->dev_table_size); in alloc_dev_table()
644 if (!pci_seg->dev_table) in alloc_dev_table()
645 return -ENOMEM; in alloc_dev_table()
653 memunmap((void *)pci_seg->dev_table); in free_dev_table()
655 iommu_free_pages(pci_seg->dev_table); in free_dev_table()
656 pci_seg->dev_table = NULL; in free_dev_table()
659 /* Allocate per PCI segment IOMMU rlookup table. */
662 pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1, in alloc_rlookup_table()
663 sizeof(*pci_seg->rlookup_table), in alloc_rlookup_table()
665 if (pci_seg->rlookup_table == NULL) in alloc_rlookup_table()
666 return -ENOMEM; in alloc_rlookup_table()
673 kvfree(pci_seg->rlookup_table); in free_rlookup_table()
674 pci_seg->rlookup_table = NULL; in free_rlookup_table()
679 pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1, in alloc_irq_lookup_table()
680 sizeof(*pci_seg->irq_lookup_table), in alloc_irq_lookup_table()
682 if (pci_seg->irq_lookup_table == NULL) in alloc_irq_lookup_table()
683 return -ENOMEM; in alloc_irq_lookup_table()
690 kvfree(pci_seg->irq_lookup_table); in free_irq_lookup_table()
691 pci_seg->irq_lookup_table = NULL; in free_irq_lookup_table()
698 pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1, in alloc_alias_table()
699 sizeof(*pci_seg->alias_table), in alloc_alias_table()
701 if (!pci_seg->alias_table) in alloc_alias_table()
702 return -ENOMEM; in alloc_alias_table()
707 for (i = 0; i <= pci_seg->last_bdf; ++i) in alloc_alias_table()
708 pci_seg->alias_table[i] = i; in alloc_alias_table()
715 kvfree(pci_seg->alias_table); in free_alias_table()
716 pci_seg->alias_table = NULL; in free_alias_table()
740 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
741 * write commands to that buffer later and the IOMMU will execute them
744 static int __init alloc_command_buffer(struct amd_iommu *iommu) in alloc_command_buffer() argument
746 iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE); in alloc_command_buffer()
748 return iommu->cmd_buf ? 0 : -ENOMEM; in alloc_command_buffer()
755 void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, in amd_iommu_restart_log() argument
761 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_restart_log()
765 pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); in amd_iommu_restart_log()
767 iommu_feature_disable(iommu, cntrl_log); in amd_iommu_restart_log()
768 iommu_feature_disable(iommu, cntrl_intr); in amd_iommu_restart_log()
770 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_restart_log()
772 iommu_feature_enable(iommu, cntrl_intr); in amd_iommu_restart_log()
773 iommu_feature_enable(iommu, cntrl_log); in amd_iommu_restart_log()
777 * This function restarts event logging in case the IOMMU experienced
780 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) in amd_iommu_restart_event_logging() argument
782 amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, in amd_iommu_restart_event_logging()
788 * This function restarts event logging in case the IOMMU experienced
791 void amd_iommu_restart_ga_log(struct amd_iommu *iommu) in amd_iommu_restart_ga_log() argument
793 amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, in amd_iommu_restart_ga_log()
799 * This function resets the command buffer if the IOMMU stopped fetching
802 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) in amd_iommu_reset_cmd_buffer() argument
804 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
806 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in amd_iommu_reset_cmd_buffer()
807 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in amd_iommu_reset_cmd_buffer()
808 iommu->cmd_buf_head = 0; in amd_iommu_reset_cmd_buffer()
809 iommu->cmd_buf_tail = 0; in amd_iommu_reset_cmd_buffer()
811 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
818 static void iommu_enable_command_buffer(struct amd_iommu *iommu) in iommu_enable_command_buffer() argument
822 BUG_ON(iommu->cmd_buf == NULL); in iommu_enable_command_buffer()
826 * Command buffer is re-used for kdump kernel and setting in iommu_enable_command_buffer()
829 entry = iommu_virt_to_phys(iommu->cmd_buf); in iommu_enable_command_buffer()
831 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, in iommu_enable_command_buffer()
835 amd_iommu_reset_cmd_buffer(iommu); in iommu_enable_command_buffer()
841 static void iommu_disable_command_buffer(struct amd_iommu *iommu) in iommu_disable_command_buffer() argument
843 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable_command_buffer()
846 static void __init free_command_buffer(struct amd_iommu *iommu) in free_command_buffer() argument
848 iommu_free_pages(iommu->cmd_buf); in free_command_buffer()
851 void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, in iommu_alloc_4k_pages() argument
869 /* allocates the memory where the IOMMU will log its events to */
870 static int __init alloc_event_buffer(struct amd_iommu *iommu) in alloc_event_buffer() argument
872 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL, in alloc_event_buffer()
875 return iommu->evt_buf ? 0 : -ENOMEM; in alloc_event_buffer()
878 static void iommu_enable_event_buffer(struct amd_iommu *iommu) in iommu_enable_event_buffer() argument
882 BUG_ON(iommu->evt_buf == NULL); in iommu_enable_event_buffer()
886 * Event buffer is re-used for kdump kernel and setting in iommu_enable_event_buffer()
889 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; in iommu_enable_event_buffer()
890 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, in iommu_enable_event_buffer()
895 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_enable_event_buffer()
896 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_enable_event_buffer()
898 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in iommu_enable_event_buffer()
904 static void iommu_disable_event_buffer(struct amd_iommu *iommu) in iommu_disable_event_buffer() argument
906 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable_event_buffer()
909 static void __init free_event_buffer(struct amd_iommu *iommu) in free_event_buffer() argument
911 iommu_free_pages(iommu->evt_buf); in free_event_buffer()
914 static void free_ga_log(struct amd_iommu *iommu) in free_ga_log() argument
917 iommu_free_pages(iommu->ga_log); in free_ga_log()
918 iommu_free_pages(iommu->ga_log_tail); in free_ga_log()
923 static int iommu_ga_log_enable(struct amd_iommu *iommu) in iommu_ga_log_enable() argument
928 if (!iommu->ga_log) in iommu_ga_log_enable()
929 return -EINVAL; in iommu_ga_log_enable()
931 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; in iommu_ga_log_enable()
932 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, in iommu_ga_log_enable()
934 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & in iommu_ga_log_enable()
935 (BIT_ULL(52)-1)) & ~7ULL; in iommu_ga_log_enable()
936 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, in iommu_ga_log_enable()
938 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_ga_log_enable()
939 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_ga_log_enable()
942 iommu_feature_enable(iommu, CONTROL_GAINT_EN); in iommu_ga_log_enable()
943 iommu_feature_enable(iommu, CONTROL_GALOG_EN); in iommu_ga_log_enable()
946 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in iommu_ga_log_enable()
953 return -EINVAL; in iommu_ga_log_enable()
958 static int iommu_init_ga_log(struct amd_iommu *iommu) in iommu_init_ga_log() argument
963 iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE); in iommu_init_ga_log()
964 if (!iommu->ga_log) in iommu_init_ga_log()
967 iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8); in iommu_init_ga_log()
968 if (!iommu->ga_log_tail) in iommu_init_ga_log()
973 free_ga_log(iommu); in iommu_init_ga_log()
974 return -EINVAL; in iommu_init_ga_log()
978 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) in alloc_cwwb_sem() argument
980 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1); in alloc_cwwb_sem()
981 if (!iommu->cmd_sem) in alloc_cwwb_sem()
982 return -ENOMEM; in alloc_cwwb_sem()
983 iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in alloc_cwwb_sem()
987 static int __init remap_event_buffer(struct amd_iommu *iommu) in remap_event_buffer() argument
991 pr_info_once("Re-using event buffer from the previous kernel\n"); in remap_event_buffer()
992 paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK; in remap_event_buffer()
993 iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE); in remap_event_buffer()
995 return iommu->evt_buf ? 0 : -ENOMEM; in remap_event_buffer()
998 static int __init remap_command_buffer(struct amd_iommu *iommu) in remap_command_buffer() argument
1002 pr_info_once("Re-using command buffer from the previous kernel\n"); in remap_command_buffer()
1003 paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK; in remap_command_buffer()
1004 iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE); in remap_command_buffer()
1006 return iommu->cmd_buf ? 0 : -ENOMEM; in remap_command_buffer()
1009 static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu) in remap_or_alloc_cwwb_sem() argument
1016 * completion wait buffer (CWB) address. Read and re-use it. in remap_or_alloc_cwwb_sem()
1018 pr_info_once("Re-using CWB buffers from the previous kernel\n"); in remap_or_alloc_cwwb_sem()
1019 paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK; in remap_or_alloc_cwwb_sem()
1020 iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE); in remap_or_alloc_cwwb_sem()
1021 if (!iommu->cmd_sem) in remap_or_alloc_cwwb_sem()
1022 return -ENOMEM; in remap_or_alloc_cwwb_sem()
1023 iommu->cmd_sem_paddr = paddr; in remap_or_alloc_cwwb_sem()
1025 return alloc_cwwb_sem(iommu); in remap_or_alloc_cwwb_sem()
1031 static int __init alloc_iommu_buffers(struct amd_iommu *iommu) in alloc_iommu_buffers() argument
1040 ret = remap_or_alloc_cwwb_sem(iommu); in alloc_iommu_buffers()
1044 ret = remap_command_buffer(iommu); in alloc_iommu_buffers()
1048 ret = remap_event_buffer(iommu); in alloc_iommu_buffers()
1052 ret = alloc_cwwb_sem(iommu); in alloc_iommu_buffers()
1056 ret = alloc_command_buffer(iommu); in alloc_iommu_buffers()
1060 ret = alloc_event_buffer(iommu); in alloc_iommu_buffers()
1068 static void __init free_cwwb_sem(struct amd_iommu *iommu) in free_cwwb_sem() argument
1070 if (iommu->cmd_sem) in free_cwwb_sem()
1071 iommu_free_pages((void *)iommu->cmd_sem); in free_cwwb_sem()
1073 static void __init unmap_cwwb_sem(struct amd_iommu *iommu) in unmap_cwwb_sem() argument
1075 if (iommu->cmd_sem) { in unmap_cwwb_sem()
1077 memunmap((void *)iommu->cmd_sem); in unmap_cwwb_sem()
1079 iommu_free_pages((void *)iommu->cmd_sem); in unmap_cwwb_sem()
1083 static void __init unmap_command_buffer(struct amd_iommu *iommu) in unmap_command_buffer() argument
1085 memunmap((void *)iommu->cmd_buf); in unmap_command_buffer()
1088 static void __init unmap_event_buffer(struct amd_iommu *iommu) in unmap_event_buffer() argument
1090 memunmap(iommu->evt_buf); in unmap_event_buffer()
1093 static void __init free_iommu_buffers(struct amd_iommu *iommu) in free_iommu_buffers() argument
1096 unmap_cwwb_sem(iommu); in free_iommu_buffers()
1097 unmap_command_buffer(iommu); in free_iommu_buffers()
1098 unmap_event_buffer(iommu); in free_iommu_buffers()
1100 free_cwwb_sem(iommu); in free_iommu_buffers()
1101 free_command_buffer(iommu); in free_iommu_buffers()
1102 free_event_buffer(iommu); in free_iommu_buffers()
1106 static void iommu_enable_xt(struct amd_iommu *iommu) in iommu_enable_xt() argument
1110 * XT mode (32-bit APIC destination ID) requires in iommu_enable_xt()
1111 * GA mode (128-bit IRTE support) as a prerequisite. in iommu_enable_xt()
1115 iommu_feature_enable(iommu, CONTROL_XT_EN); in iommu_enable_xt()
1119 static void iommu_enable_gt(struct amd_iommu *iommu) in iommu_enable_gt() argument
1124 iommu_feature_enable(iommu, CONTROL_GT_EN); in iommu_enable_gt()
1133 dte->data[i] |= (1UL << _bit); in set_dte_bit()
1136 static bool __reuse_device_table(struct amd_iommu *iommu) in __reuse_device_table() argument
1138 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in __reuse_device_table()
1143 /* Each IOMMU use separate device table with the same size */ in __reuse_device_table()
1144 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); in __reuse_device_table()
1145 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); in __reuse_device_table()
1149 if (old_devtb_size != pci_seg->dev_table_size) { in __reuse_device_table()
1150 pr_err("The device table size of IOMMU:%d is not expected!\n", in __reuse_device_table()
1151 iommu->index); in __reuse_device_table()
1168 * Re-use the previous kernel's device table for kdump. in __reuse_device_table()
1170 pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size); in __reuse_device_table()
1171 if (pci_seg->old_dev_tbl_cpy == NULL) { in __reuse_device_table()
1181 struct amd_iommu *iommu; in reuse_device_table() local
1187 pr_warn("Translation is already enabled - trying to reuse translation structures\n"); in reuse_device_table()
1190 * All IOMMUs within PCI segment shares common device table. in reuse_device_table()
1191 * Hence reuse device table only once per PCI segment. in reuse_device_table()
1194 for_each_iommu(iommu) { in reuse_device_table()
1195 if (pci_seg->id != iommu->pci_seg->id) in reuse_device_table()
1197 if (!__reuse_device_table(iommu)) in reuse_device_table()
1217 if ((e->segid == segid) && in amd_iommu_get_ivhd_dte_flags()
1218 (e->devid_first <= devid) && (devid <= e->devid_last)) { in amd_iommu_get_ivhd_dte_flags()
1219 unsigned int len = e->devid_last - e->devid_first; in amd_iommu_get_ivhd_dte_flags()
1222 dte = &(e->dte); in amd_iommu_get_ivhd_dte_flags()
1235 if ((e->segid == segid) && in search_ivhd_dte_flags()
1236 (e->devid_first == first) && in search_ivhd_dte_flags()
1237 (e->devid_last == last)) in search_ivhd_dte_flags()
1248 set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last, in set_dev_entry_from_acpi_range() argument
1258 if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last)) in set_dev_entry_from_acpi_range()
1286 memcpy(&d->dte, &dte, sizeof(dte)); in set_dev_entry_from_acpi_range()
1287 d->segid = iommu->pci_seg->id; in set_dev_entry_from_acpi_range()
1288 d->devid_first = first; in set_dev_entry_from_acpi_range()
1289 d->devid_last = last; in set_dev_entry_from_acpi_range()
1290 list_add_tail(&d->list, &amd_ivhd_dev_flags_list); in set_dev_entry_from_acpi_range()
1295 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dev_entry_from_acpi_range()
1299 amd_iommu_set_rlookup_table(iommu, i); in set_dev_entry_from_acpi_range()
1303 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, in set_dev_entry_from_acpi() argument
1306 set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags); in set_dev_entry_from_acpi()
1319 return -EINVAL; in add_special_device()
1322 if (!(entry->id == id && entry->cmd_line)) in add_special_device()
1325 pr_info("Command-line override present for %s id %d - ignoring\n", in add_special_device()
1328 *devid = entry->devid; in add_special_device()
1335 return -ENOMEM; in add_special_device()
1337 entry->id = id; in add_special_device()
1338 entry->devid = *devid; in add_special_device()
1339 entry->cmd_line = cmd_line; in add_special_device()
1341 list_add_tail(&entry->list, list); in add_special_device()
1353 if (strcmp(entry->hid, hid) || in add_acpi_hid_device()
1354 (*uid && *entry->uid && strcmp(entry->uid, uid)) || in add_acpi_hid_device()
1355 !entry->cmd_line) in add_acpi_hid_device()
1358 pr_info("Command-line override for hid:%s uid:%s\n", in add_acpi_hid_device()
1360 *devid = entry->devid; in add_acpi_hid_device()
1366 return -ENOMEM; in add_acpi_hid_device()
1368 memcpy(entry->uid, uid, strlen(uid)); in add_acpi_hid_device()
1369 memcpy(entry->hid, hid, strlen(hid)); in add_acpi_hid_device()
1370 entry->devid = *devid; in add_acpi_hid_device()
1371 entry->cmd_line = cmd_line; in add_acpi_hid_device()
1372 entry->root_devid = (entry->devid & (~0x7)); in add_acpi_hid_device()
1375 entry->cmd_line ? "cmd" : "ivrs", in add_acpi_hid_device()
1376 entry->hid, entry->uid, entry->root_devid); in add_acpi_hid_device()
1378 list_add_tail(&entry->list, list); in add_acpi_hid_device()
1417 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1420 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, in init_iommu_from_acpi() argument
1429 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in init_iommu_from_acpi()
1443 iommu->acpi_flags = h->flags; in init_iommu_from_acpi()
1450 pr_err("Unsupported IVHD type %#x\n", h->type); in init_iommu_from_acpi()
1451 return -EINVAL; in init_iommu_from_acpi()
1456 end += h->length; in init_iommu_from_acpi()
1461 seg_id = pci_seg->id; in init_iommu_from_acpi()
1463 switch (e->type) { in init_iommu_from_acpi()
1466 DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags); in init_iommu_from_acpi()
1467 set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0); in init_iommu_from_acpi()
1472 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1473 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1474 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1475 e->flags); in init_iommu_from_acpi()
1477 devid = e->devid; in init_iommu_from_acpi()
1478 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1483 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1484 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1485 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1486 e->flags); in init_iommu_from_acpi()
1488 devid_start = e->devid; in init_iommu_from_acpi()
1489 flags = e->flags; in init_iommu_from_acpi()
1496 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1497 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1498 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1499 e->flags, in init_iommu_from_acpi()
1500 PCI_BUS_NUM(e->ext >> 8), in init_iommu_from_acpi()
1501 PCI_SLOT(e->ext >> 8), in init_iommu_from_acpi()
1502 PCI_FUNC(e->ext >> 8)); in init_iommu_from_acpi()
1504 devid = e->devid; in init_iommu_from_acpi()
1505 devid_to = e->ext >> 8; in init_iommu_from_acpi()
1506 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1507 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); in init_iommu_from_acpi()
1508 pci_seg->alias_table[devid] = devid_to; in init_iommu_from_acpi()
1513 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1514 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1515 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1516 e->flags, in init_iommu_from_acpi()
1517 seg_id, PCI_BUS_NUM(e->ext >> 8), in init_iommu_from_acpi()
1518 PCI_SLOT(e->ext >> 8), in init_iommu_from_acpi()
1519 PCI_FUNC(e->ext >> 8)); in init_iommu_from_acpi()
1521 devid_start = e->devid; in init_iommu_from_acpi()
1522 flags = e->flags; in init_iommu_from_acpi()
1523 devid_to = e->ext >> 8; in init_iommu_from_acpi()
1530 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1531 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1532 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1533 e->flags, e->ext); in init_iommu_from_acpi()
1535 devid = e->devid; in init_iommu_from_acpi()
1536 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1537 e->ext); in init_iommu_from_acpi()
1542 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1543 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1544 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1545 e->flags, e->ext); in init_iommu_from_acpi()
1547 devid_start = e->devid; in init_iommu_from_acpi()
1548 flags = e->flags; in init_iommu_from_acpi()
1549 ext_flags = e->ext; in init_iommu_from_acpi()
1555 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1556 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1557 PCI_FUNC(e->devid)); in init_iommu_from_acpi()
1559 devid = e->devid; in init_iommu_from_acpi()
1562 pci_seg->alias_table[dev_i] = devid_to; in init_iommu_from_acpi()
1563 set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); in init_iommu_from_acpi()
1565 set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags); in init_iommu_from_acpi()
1573 handle = e->ext & 0xff; in init_iommu_from_acpi()
1574 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); in init_iommu_from_acpi()
1575 type = (e->ext >> 24) & 0xff; in init_iommu_from_acpi()
1589 e->flags); in init_iommu_from_acpi()
1597 * command-line override is present. So call in init_iommu_from_acpi()
1600 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1610 if (h->type != 0x40) { in init_iommu_from_acpi()
1612 e->type); in init_iommu_from_acpi()
1616 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); in init_iommu_from_acpi()
1617 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); in init_iommu_from_acpi()
1618 hid[ACPIHID_HID_LEN - 1] = '\0'; in init_iommu_from_acpi()
1626 switch (e->uidf) { in init_iommu_from_acpi()
1629 if (e->uidl != 0) in init_iommu_from_acpi()
1635 sprintf(uid, "%d", e->uid); in init_iommu_from_acpi()
1640 memcpy(uid, &e->uid, e->uidl); in init_iommu_from_acpi()
1641 uid[e->uidl] = '\0'; in init_iommu_from_acpi()
1648 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); in init_iommu_from_acpi()
1654 e->flags); in init_iommu_from_acpi()
1656 flags = e->flags; in init_iommu_from_acpi()
1664 * command-line override is present. So call in init_iommu_from_acpi()
1667 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1681 /* Allocate PCI segment data structure */
1690 * handle in this PCI segment. Upon this information the shared data in alloc_pci_segment()
1691 * structures for the PCI segments in the system will be allocated. in alloc_pci_segment()
1701 pci_seg->last_bdf = last_bdf; in alloc_pci_segment()
1702 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf); in alloc_pci_segment()
1703 pci_seg->dev_table_size = in alloc_pci_segment()
1707 pci_seg->id = id; in alloc_pci_segment()
1708 init_llist_head(&pci_seg->dev_data_list); in alloc_pci_segment()
1709 INIT_LIST_HEAD(&pci_seg->unity_map); in alloc_pci_segment()
1710 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); in alloc_pci_segment()
1726 list_del(&pci_seg->list); in alloc_pci_segment()
1737 if (pci_seg->id == id) in get_pci_segment()
1749 list_del(&pci_seg->list); in free_pci_segments()
1758 static void __init free_sysfs(struct amd_iommu *iommu) in free_sysfs() argument
1760 if (iommu->iommu.dev) { in free_sysfs()
1761 iommu_device_unregister(&iommu->iommu); in free_sysfs()
1762 iommu_device_sysfs_remove(&iommu->iommu); in free_sysfs()
1766 static void __init free_iommu_one(struct amd_iommu *iommu) in free_iommu_one() argument
1768 free_sysfs(iommu); in free_iommu_one()
1769 free_iommu_buffers(iommu); in free_iommu_one()
1770 amd_iommu_free_ppr_log(iommu); in free_iommu_one()
1771 free_ga_log(iommu); in free_iommu_one()
1772 iommu_unmap_mmio_space(iommu); in free_iommu_one()
1773 amd_iommu_iopf_uninit(iommu); in free_iommu_one()
1778 struct amd_iommu *iommu, *next; in free_iommu_all() local
1780 for_each_iommu_safe(iommu, next) { in free_iommu_all()
1781 list_del(&iommu->list); in free_iommu_all()
1782 free_iommu_one(iommu); in free_iommu_all()
1783 kfree(iommu); in free_iommu_all()
1788 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1793 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) in amd_iommu_erratum_746_workaround() argument
1802 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1803 pci_read_config_dword(iommu->dev, 0xf4, &value); in amd_iommu_erratum_746_workaround()
1809 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); in amd_iommu_erratum_746_workaround()
1811 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); in amd_iommu_erratum_746_workaround()
1812 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); in amd_iommu_erratum_746_workaround()
1815 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1819 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1824 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) in amd_iommu_ats_write_check_workaround() argument
1834 value = iommu_read_l2(iommu, 0x47); in amd_iommu_ats_write_check_workaround()
1840 iommu_write_l2(iommu, 0x47, value | BIT(0)); in amd_iommu_ats_write_check_workaround()
1842 pci_info(iommu->dev, "Applying ATS write check workaround\n"); in amd_iommu_ats_write_check_workaround()
1846 * This function glues the initialization function for one IOMMU
1848 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1850 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, in init_iommu_one() argument
1855 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); in init_iommu_one()
1857 return -ENOMEM; in init_iommu_one()
1858 iommu->pci_seg = pci_seg; in init_iommu_one()
1860 raw_spin_lock_init(&iommu->lock); in init_iommu_one()
1861 atomic64_set(&iommu->cmd_sem_val, 0); in init_iommu_one()
1863 /* Add IOMMU to internal data structures */ in init_iommu_one()
1864 list_add_tail(&iommu->list, &amd_iommu_list); in init_iommu_one()
1865 iommu->index = amd_iommus_present++; in init_iommu_one()
1867 if (unlikely(iommu->index >= MAX_IOMMUS)) { in init_iommu_one()
1869 return -ENOSYS; in init_iommu_one()
1873 * Copy data from ACPI table entry to the iommu struct in init_iommu_one()
1875 iommu->devid = h->devid; in init_iommu_one()
1876 iommu->cap_ptr = h->cap_ptr; in init_iommu_one()
1877 iommu->mmio_phys = h->mmio_phys; in init_iommu_one()
1879 switch (h->type) { in init_iommu_one()
1882 if ((h->efr_attr != 0) && in init_iommu_one()
1883 ((h->efr_attr & (0xF << 13)) != 0) && in init_iommu_one()
1884 ((h->efr_attr & (0x3F << 17)) != 0)) in init_iommu_one()
1885 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1887 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1890 if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0) in init_iommu_one()
1895 if (h->efr_reg & (1 << 9)) in init_iommu_one()
1896 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1898 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1901 if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) { in init_iommu_one()
1906 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) in init_iommu_one()
1909 if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) { in init_iommu_one()
1914 early_iommu_features_init(iommu, h); in init_iommu_one()
1918 return -EINVAL; in init_iommu_one()
1921 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, in init_iommu_one()
1922 iommu->mmio_phys_end); in init_iommu_one()
1923 if (!iommu->mmio_base) in init_iommu_one()
1924 return -ENOMEM; in init_iommu_one()
1926 return init_iommu_from_acpi(iommu, h); in init_iommu_one()
1929 static int __init init_iommu_one_late(struct amd_iommu *iommu) in init_iommu_one_late() argument
1933 ret = alloc_iommu_buffers(iommu); in init_iommu_one_late()
1937 iommu->int_enabled = false; in init_iommu_one_late()
1939 init_translation_status(iommu); in init_iommu_one_late()
1940 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_iommu_one_late()
1941 iommu_disable(iommu); in init_iommu_one_late()
1942 clear_translation_pre_enabled(iommu); in init_iommu_one_late()
1943 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", in init_iommu_one_late()
1944 iommu->index); in init_iommu_one_late()
1947 amd_iommu_pre_enabled = translation_pre_enabled(iommu); in init_iommu_one_late()
1950 ret = amd_iommu_create_irq_domain(iommu); in init_iommu_one_late()
1956 * Make sure IOMMU is not considered to translate itself. The IVRS in init_iommu_one_late()
1959 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; in init_iommu_one_late()
1965 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1975 u8 last_type = ivhd->type; in get_highest_supported_ivhd_type()
1976 u16 devid = ivhd->devid; in get_highest_supported_ivhd_type()
1978 while (((u8 *)ivhd - base < ivrs->length) && in get_highest_supported_ivhd_type()
1979 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { in get_highest_supported_ivhd_type()
1982 if (ivhd->devid == devid) in get_highest_supported_ivhd_type()
1983 last_type = ivhd->type; in get_highest_supported_ivhd_type()
1984 ivhd = (struct ivhd_header *)(p + ivhd->length); in get_highest_supported_ivhd_type()
1991 * Iterates over all IOMMU entries in the ACPI table, allocates the
1992 * IOMMU structure and initializes it with init_iommu_one()
1998 struct amd_iommu *iommu; in init_iommu_all() local
2001 end += table->length; in init_iommu_all()
2011 h->pci_seg, PCI_BUS_NUM(h->devid), in init_iommu_all()
2012 PCI_SLOT(h->devid), PCI_FUNC(h->devid), in init_iommu_all()
2013 h->cap_ptr, h->flags, h->info); in init_iommu_all()
2014 DUMP_printk(" mmio-addr: %016llx\n", in init_iommu_all()
2015 h->mmio_phys); in init_iommu_all()
2017 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); in init_iommu_all()
2018 if (iommu == NULL) in init_iommu_all()
2019 return -ENOMEM; in init_iommu_all()
2021 ret = init_iommu_one(iommu, h, table); in init_iommu_all()
2025 p += h->length; in init_iommu_all()
2033 /* Phase 3 : Enabling IOMMU features */ in init_iommu_all()
2034 for_each_iommu(iommu) { in init_iommu_all()
2035 ret = init_iommu_one_late(iommu); in init_iommu_all()
2043 static void init_iommu_perf_ctr(struct amd_iommu *iommu) in init_iommu_perf_ctr() argument
2046 struct pci_dev *pdev = iommu->dev; in init_iommu_perf_ctr()
2053 pci_info(pdev, "IOMMU performance counters supported\n"); in init_iommu_perf_ctr()
2055 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); in init_iommu_perf_ctr()
2056 iommu->max_banks = (u8) ((val >> 12) & 0x3f); in init_iommu_perf_ctr()
2057 iommu->max_counters = (u8) ((val >> 7) & 0xf); in init_iommu_perf_ctr()
2066 struct amd_iommu *iommu = dev_to_amd_iommu(dev); in amd_iommu_show_cap() local
2067 return sysfs_emit(buf, "%x\n", iommu->cap); in amd_iommu_show_cap()
2086 .name = "amd-iommu",
2097 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
2098 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
2100 static void __init late_iommu_features_init(struct amd_iommu *iommu) in late_iommu_features_init() argument
2104 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) in late_iommu_features_init()
2108 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); in late_iommu_features_init()
2109 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); in late_iommu_features_init()
2130 static int __init iommu_init_pci(struct amd_iommu *iommu) in iommu_init_pci() argument
2132 int cap_ptr = iommu->cap_ptr; in iommu_init_pci()
2135 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2136 PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
2137 iommu->devid & 0xff); in iommu_init_pci()
2138 if (!iommu->dev) in iommu_init_pci()
2139 return -ENODEV; in iommu_init_pci()
2141 /* ACPI _PRT won't have an IRQ for IOMMU */ in iommu_init_pci()
2142 iommu->dev->irq_managed = 1; in iommu_init_pci()
2144 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, in iommu_init_pci()
2145 &iommu->cap); in iommu_init_pci()
2147 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) in iommu_init_pci()
2150 late_iommu_features_init(iommu); in iommu_init_pci()
2157 iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1; in iommu_init_pci()
2159 BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK); in iommu_init_pci()
2163 if (amd_iommu_max_glx_val == -1) in iommu_init_pci()
2168 iommu_enable_gt(iommu); in iommu_init_pci()
2171 if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu)) in iommu_init_pci()
2172 return -ENOMEM; in iommu_init_pci()
2174 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { in iommu_init_pci()
2180 init_iommu_perf_ctr(iommu); in iommu_init_pci()
2182 if (is_rd890_iommu(iommu->dev)) { in iommu_init_pci()
2185 iommu->root_pdev = in iommu_init_pci()
2186 pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2187 iommu->dev->bus->number, in iommu_init_pci()
2195 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_init_pci()
2196 &iommu->stored_addr_lo); in iommu_init_pci()
2197 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_init_pci()
2198 &iommu->stored_addr_hi); in iommu_init_pci()
2201 iommu->stored_addr_lo &= ~1; in iommu_init_pci()
2205 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); in iommu_init_pci()
2208 iommu->stored_l2[i] = iommu_read_l2(iommu, i); in iommu_init_pci()
2211 amd_iommu_erratum_746_workaround(iommu); in iommu_init_pci()
2212 amd_iommu_ats_write_check_workaround(iommu); in iommu_init_pci()
2214 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, in iommu_init_pci()
2215 amd_iommu_groups, "ivhd%d", iommu->index); in iommu_init_pci()
2220 * Allocate per IOMMU IOPF queue here so that in attach device path, in iommu_init_pci()
2224 ret = amd_iommu_iopf_init(iommu); in iommu_init_pci()
2229 ret = iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); in iommu_init_pci()
2233 * IOMMU. Do not return an error to enable IRQ remapping in iommu_init_pci()
2236 iommu_device_sysfs_remove(&iommu->iommu); in iommu_init_pci()
2239 return pci_enable_device(iommu->dev); in iommu_init_pci()
2265 pr_cont(" SEV-TIO"); in print_iommu_info()
2283 struct amd_iommu *iommu; in amd_iommu_init_pci() local
2287 /* Init global identity domain before registering IOMMU */ in amd_iommu_init_pci()
2290 for_each_iommu(iommu) { in amd_iommu_init_pci()
2291 ret = iommu_init_pci(iommu); in amd_iommu_init_pci()
2293 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", in amd_iommu_init_pci()
2294 iommu->index, ret); in amd_iommu_init_pci()
2297 /* Need to setup range after PCI init */ in amd_iommu_init_pci()
2298 iommu_set_cwwb_range(iommu); in amd_iommu_init_pci()
2314 for_each_iommu(iommu) in amd_iommu_init_pci()
2315 amd_iommu_flush_all_caches(iommu); in amd_iommu_init_pci()
2327 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2332 static int iommu_setup_msi(struct amd_iommu *iommu) in iommu_setup_msi() argument
2336 r = pci_enable_msi(iommu->dev); in iommu_setup_msi()
2340 r = request_threaded_irq(iommu->dev->irq, in iommu_setup_msi()
2343 0, "AMD-Vi", in iommu_setup_msi()
2344 iommu); in iommu_setup_msi()
2347 pci_disable_msi(iommu->dev); in iommu_setup_msi()
2388 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) in intcapxt_irqdomain_alloc()
2389 return -EINVAL; in intcapxt_irqdomain_alloc()
2398 irqd->chip = &intcapxt_controller; in intcapxt_irqdomain_alloc()
2399 irqd->hwirq = info->hwirq; in intcapxt_irqdomain_alloc()
2400 irqd->chip_data = info->data; in intcapxt_irqdomain_alloc()
2416 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_unmask_irq() local
2421 xt.dest_mode_logical = apic->dest_mode_logical; in intcapxt_unmask_irq()
2422 xt.vector = cfg->vector; in intcapxt_unmask_irq()
2423 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); in intcapxt_unmask_irq()
2424 xt.destid_24_31 = cfg->dest_apicid >> 24; in intcapxt_unmask_irq()
2426 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); in intcapxt_unmask_irq()
2431 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_mask_irq() local
2433 writeq(0, iommu->mmio_base + irqd->hwirq); in intcapxt_mask_irq()
2440 struct irq_data *parent = irqd->parent_data; in intcapxt_set_affinity()
2443 ret = parent->chip->irq_set_affinity(parent, mask, force); in intcapxt_set_affinity()
2451 return on ? -EOPNOTSUPP : 0; in intcapxt_set_wake()
2455 .name = "IOMMU-MSI",
2479 /* No need for locking here (yet) as the init is single-threaded */ in iommu_get_irqdomain()
2483 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); in iommu_get_irqdomain()
2496 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, in __iommu_setup_intcapxt() argument
2502 int node = dev_to_node(&iommu->dev->dev); in __iommu_setup_intcapxt()
2506 return -ENXIO; in __iommu_setup_intcapxt()
2510 info.data = iommu; in __iommu_setup_intcapxt()
2520 thread_fn, 0, devname, iommu); in __iommu_setup_intcapxt()
2530 static int iommu_setup_intcapxt(struct amd_iommu *iommu) in iommu_setup_intcapxt() argument
2534 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), in iommu_setup_intcapxt()
2535 "AMD-Vi%d-Evt", iommu->index); in iommu_setup_intcapxt()
2536 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, in iommu_setup_intcapxt()
2542 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), in iommu_setup_intcapxt()
2543 "AMD-Vi%d-PPR", iommu->index); in iommu_setup_intcapxt()
2544 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, in iommu_setup_intcapxt()
2551 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), in iommu_setup_intcapxt()
2552 "AMD-Vi%d-GA", iommu->index); in iommu_setup_intcapxt()
2553 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, in iommu_setup_intcapxt()
2561 static int iommu_init_irq(struct amd_iommu *iommu) in iommu_init_irq() argument
2565 if (iommu->int_enabled) in iommu_init_irq()
2569 ret = iommu_setup_intcapxt(iommu); in iommu_init_irq()
2570 else if (iommu->dev->msi_cap) in iommu_init_irq()
2571 ret = iommu_setup_msi(iommu); in iommu_init_irq()
2573 ret = -ENODEV; in iommu_init_irq()
2578 iommu->int_enabled = true; in iommu_init_irq()
2582 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); in iommu_init_irq()
2584 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); in iommu_init_irq()
2603 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { in free_unity_maps()
2604 list_del(&entry->list); in free_unity_maps()
2618 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); in init_unity_map_range()
2620 return -ENOMEM; in init_unity_map_range()
2624 return -ENOMEM; in init_unity_map_range()
2626 switch (m->type) { in init_unity_map_range()
2632 e->devid_start = e->devid_end = m->devid; in init_unity_map_range()
2636 e->devid_start = 0; in init_unity_map_range()
2637 e->devid_end = pci_seg->last_bdf; in init_unity_map_range()
2641 e->devid_start = m->devid; in init_unity_map_range()
2642 e->devid_end = m->aux; in init_unity_map_range()
2645 e->address_start = PAGE_ALIGN(m->range_start); in init_unity_map_range()
2646 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); in init_unity_map_range()
2647 e->prot = m->flags >> 1; in init_unity_map_range()
2650 * Treat per-device exclusion ranges as r/w unity-mapped regions in init_unity_map_range()
2656 if (m->flags & IVMD_FLAG_EXCL_RANGE) in init_unity_map_range()
2657 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; in init_unity_map_range()
2661 " flags: %x\n", s, m->pci_seg, in init_unity_map_range()
2662 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), in init_unity_map_range()
2663 PCI_FUNC(e->devid_start), m->pci_seg, in init_unity_map_range()
2664 PCI_BUS_NUM(e->devid_end), in init_unity_map_range()
2665 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), in init_unity_map_range()
2666 e->address_start, e->address_end, m->flags); in init_unity_map_range()
2668 list_add_tail(&e->list, &pci_seg->unity_map); in init_unity_map_range()
2679 end += table->length; in init_memory_definitions()
2684 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) in init_memory_definitions()
2687 p += m->length; in init_memory_definitions()
2699 struct dev_table_entry *dev_table = pci_seg->dev_table; in init_device_table_dma()
2704 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in init_device_table_dma()
2714 struct dev_table_entry *dev_table = pci_seg->dev_table; in uninit_device_table_dma()
2719 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in uninit_device_table_dma()
2734 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) in init_device_table()
2735 set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN); in init_device_table()
2739 static void iommu_init_flags(struct amd_iommu *iommu) in iommu_init_flags() argument
2741 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? in iommu_init_flags()
2742 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : in iommu_init_flags()
2743 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); in iommu_init_flags()
2745 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? in iommu_init_flags()
2746 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : in iommu_init_flags()
2747 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); in iommu_init_flags()
2749 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? in iommu_init_flags()
2750 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : in iommu_init_flags()
2751 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); in iommu_init_flags()
2753 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? in iommu_init_flags()
2754 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : in iommu_init_flags()
2755 iommu_feature_disable(iommu, CONTROL_ISOC_EN); in iommu_init_flags()
2758 * make IOMMU memory accesses cache coherent in iommu_init_flags()
2760 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); in iommu_init_flags()
2763 iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT); in iommu_init_flags()
2767 iommu_feature_enable(iommu, CONTROL_EPH_EN); in iommu_init_flags()
2770 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) in iommu_apply_resume_quirks() argument
2774 struct pci_dev *pdev = iommu->root_pdev; in iommu_apply_resume_quirks()
2776 /* RD890 BIOSes may not have completely reconfigured the iommu */ in iommu_apply_resume_quirks()
2777 if (!is_rd890_iommu(iommu->dev) || !pdev) in iommu_apply_resume_quirks()
2781 * First, we need to ensure that the iommu is enabled. This is in iommu_apply_resume_quirks()
2789 /* Enable the iommu */ in iommu_apply_resume_quirks()
2793 /* Restore the iommu BAR */ in iommu_apply_resume_quirks()
2794 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2795 iommu->stored_addr_lo); in iommu_apply_resume_quirks()
2796 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_apply_resume_quirks()
2797 iommu->stored_addr_hi); in iommu_apply_resume_quirks()
2802 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); in iommu_apply_resume_quirks()
2806 iommu_write_l2(iommu, i, iommu->stored_l2[i]); in iommu_apply_resume_quirks()
2808 /* Lock PCI setup registers */ in iommu_apply_resume_quirks()
2809 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2810 iommu->stored_addr_lo | 1); in iommu_apply_resume_quirks()
2813 static void iommu_enable_ga(struct amd_iommu *iommu) in iommu_enable_ga() argument
2819 iommu_feature_enable(iommu, CONTROL_GA_EN); in iommu_enable_ga()
2820 iommu->irte_ops = &irte_128_ops; in iommu_enable_ga()
2823 iommu->irte_ops = &irte_32_ops; in iommu_enable_ga()
2829 static void iommu_disable_irtcachedis(struct amd_iommu *iommu) in iommu_disable_irtcachedis() argument
2831 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); in iommu_disable_irtcachedis()
2834 static void iommu_enable_irtcachedis(struct amd_iommu *iommu) in iommu_enable_irtcachedis() argument
2846 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); in iommu_enable_irtcachedis()
2847 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_enable_irtcachedis()
2850 iommu->irtcachedis_enabled = true; in iommu_enable_irtcachedis()
2851 pr_info("iommu%d (%#06x) : IRT cache is %s\n", in iommu_enable_irtcachedis()
2852 iommu->index, iommu->devid, in iommu_enable_irtcachedis()
2853 iommu->irtcachedis_enabled ? "disabled" : "enabled"); in iommu_enable_irtcachedis()
2856 static void iommu_enable_2k_int(struct amd_iommu *iommu) in iommu_enable_2k_int() argument
2861 iommu_feature_set(iommu, in iommu_enable_2k_int()
2867 static void early_enable_iommu(struct amd_iommu *iommu) in early_enable_iommu() argument
2869 iommu_disable(iommu); in early_enable_iommu()
2870 iommu_init_flags(iommu); in early_enable_iommu()
2871 iommu_set_device_table(iommu); in early_enable_iommu()
2872 iommu_enable_command_buffer(iommu); in early_enable_iommu()
2873 iommu_enable_event_buffer(iommu); in early_enable_iommu()
2874 iommu_set_exclusion_range(iommu); in early_enable_iommu()
2875 iommu_enable_gt(iommu); in early_enable_iommu()
2876 iommu_enable_ga(iommu); in early_enable_iommu()
2877 iommu_enable_xt(iommu); in early_enable_iommu()
2878 iommu_enable_irtcachedis(iommu); in early_enable_iommu()
2879 iommu_enable_2k_int(iommu); in early_enable_iommu()
2880 iommu_enable(iommu); in early_enable_iommu()
2881 amd_iommu_flush_all_caches(iommu); in early_enable_iommu()
2888 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse
2894 struct amd_iommu *iommu; in early_enable_iommus() local
2907 * previous kernel if SNP enabled as IOMMU commands will in early_enable_iommus()
2914 if (pci_seg->old_dev_tbl_cpy != NULL) { in early_enable_iommus()
2915 memunmap((void *)pci_seg->old_dev_tbl_cpy); in early_enable_iommus()
2916 pci_seg->old_dev_tbl_cpy = NULL; in early_enable_iommus()
2920 for_each_iommu(iommu) { in early_enable_iommus()
2921 clear_translation_pre_enabled(iommu); in early_enable_iommus()
2922 early_enable_iommu(iommu); in early_enable_iommus()
2928 iommu_free_pages(pci_seg->dev_table); in early_enable_iommus()
2929 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; in early_enable_iommus()
2932 for_each_iommu(iommu) { in early_enable_iommus()
2933 iommu_disable_command_buffer(iommu); in early_enable_iommus()
2934 iommu_disable_event_buffer(iommu); in early_enable_iommus()
2935 iommu_disable_irtcachedis(iommu); in early_enable_iommus()
2936 iommu_enable_command_buffer(iommu); in early_enable_iommus()
2937 iommu_enable_event_buffer(iommu); in early_enable_iommus()
2938 iommu_enable_ga(iommu); in early_enable_iommus()
2939 iommu_enable_xt(iommu); in early_enable_iommus()
2940 iommu_enable_irtcachedis(iommu); in early_enable_iommus()
2941 iommu_enable_2k_int(iommu); in early_enable_iommus()
2942 iommu_set_device_table(iommu); in early_enable_iommus()
2943 amd_iommu_flush_all_caches(iommu); in early_enable_iommus()
2950 struct amd_iommu *iommu; in enable_iommus_ppr() local
2955 for_each_iommu(iommu) in enable_iommus_ppr()
2956 amd_iommu_enable_ppr_log(iommu); in enable_iommus_ppr()
2963 struct amd_iommu *iommu; in enable_iommus_vapic() local
2965 for_each_iommu(iommu) { in enable_iommus_vapic()
2970 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
2974 iommu_feature_disable(iommu, CONTROL_GALOG_EN); in enable_iommus_vapic()
2975 iommu_feature_disable(iommu, CONTROL_GAINT_EN); in enable_iommus_vapic()
2982 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
3006 for_each_iommu(iommu) { in enable_iommus_vapic()
3007 if (iommu_init_ga_log(iommu) || in enable_iommus_vapic()
3008 iommu_ga_log_enable(iommu)) in enable_iommus_vapic()
3011 iommu_feature_enable(iommu, CONTROL_GAM_EN); in enable_iommus_vapic()
3013 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); in enable_iommus_vapic()
3023 struct amd_iommu *iommu; in disable_iommus() local
3025 for_each_iommu(iommu) in disable_iommus()
3026 iommu_disable(iommu); in disable_iommus()
3041 struct amd_iommu *iommu; in amd_iommu_resume() local
3043 for_each_iommu(iommu) in amd_iommu_resume()
3044 iommu_apply_resume_quirks(iommu); in amd_iommu_resume()
3046 /* re-load the hardware */ in amd_iommu_resume()
3047 for_each_iommu(iommu) in amd_iommu_resume()
3048 early_enable_iommu(iommu); in amd_iommu_resume()
3091 * anymore - so be careful in check_ioapic_information()
3141 * This is the hardware init function for AMD IOMMU in the system.
3145 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
3150 * 2 pass) Find the highest PCI device id the driver has to handle.
3156 * in the system. It also maps the PCI devices in the
3175 return -ENODEV; in early_amd_iommu_init()
3179 return -ENODEV; in early_amd_iommu_init()
3183 return -EINVAL; in early_amd_iommu_init()
3188 ret = -EINVAL; in early_amd_iommu_init()
3233 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); in early_amd_iommu_init()
3258 ret = -ENOMEM; in early_amd_iommu_init()
3281 struct amd_iommu *iommu; in amd_iommu_enable_interrupts() local
3284 for_each_iommu(iommu) { in amd_iommu_enable_interrupts()
3285 ret = iommu_init_irq(iommu); in amd_iommu_enable_interrupts()
3321 /* Don't use IOMMU if there is Stoney Ridge graphics */ in detect_ivrs()
3327 pr_info("Disable IOMMU on Stoney Ridge\n"); in detect_ivrs()
3333 /* Make sure ACS will be enabled during PCI probe */ in detect_ivrs()
3345 * The SNP support requires that IOMMU must be enabled, and is in iommu_snp_enable()
3349 pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); in iommu_snp_enable()
3354 pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n"); in iommu_snp_enable()
3360 pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); in iommu_snp_enable()
3365 * Enable host SNP support once SNP support is checked on IOMMU. in iommu_snp_enable()
3372 pr_info("IOMMU SNP support enabled.\n"); in iommu_snp_enable()
3382 * AMD IOMMU Initialization State Machine
3394 ret = -ENODEV; in state_next()
3402 ret = -EINVAL; in state_next()
3433 ret = -EINVAL; in state_next()
3446 struct amd_iommu *iommu; in state_next() local
3452 for_each_iommu(iommu) in state_next()
3453 amd_iommu_flush_all_caches(iommu); in state_next()
3461 int ret = -EINVAL; in iommu_go_to_state()
3476 * in IOMMUs, then the system is in a half-baked state, but can limp in iommu_go_to_state()
3477 * along as all memory should be Hypervisor-Owned in the RMP. WARN, in iommu_go_to_state()
3500 return amd_iommu_irq_remap ? 0 : -ENODEV; in amd_iommu_prepare()
3529 /* We enable MSI later when PCI is initialized */ in amd_iommu_enable_faulting()
3535 * This is the core init function for AMD IOMMU hardware in the system.
3547 * We failed to initialize the AMD IOMMU - try fallback in amd_iommu_init()
3574 pr_notice("IOMMU not currently supported when SME is active\n"); in amd_iommu_sme_check()
3581 * Early detect code. This code runs at IOMMU detection time in the DMA
3602 x86_init.iommu.iommu_init = amd_iommu_init; in amd_iommu_detect()
3612 * Parsing functions for the AMD IOMMU specific kernel command line
3642 return -EINVAL; in parse_amd_iommu_options()
3646 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); in parse_amd_iommu_options()
3661 pr_info("Restricting V1 page-sizes to 4KiB"); in parse_amd_iommu_options()
3664 pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB"); in parse_amd_iommu_options()
3667 pr_notice("Unknown option - '%s'\n", str); in parse_amd_iommu_options()
3700 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", in parse_ivrs_ioapic()
3738 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", in parse_ivrs_hpet()
3843 /* CPU page table size should match IOMMU guest page table size */ in amd_iommu_pasid_supported()
3849 * Since DTE[Mode]=0 is prohibited on SNP-enabled system in amd_iommu_pasid_supported()
3859 struct amd_iommu *iommu; in get_amd_iommu() local
3861 for_each_iommu(iommu) in get_amd_iommu()
3863 return iommu; in get_amd_iommu()
3869 * IOMMU EFR Performance Counter support functionality. This code allows
3870 * access to the IOMMU PC functionality.
3876 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_banks() local
3878 if (iommu) in amd_iommu_pc_get_max_banks()
3879 return iommu->max_banks; in amd_iommu_pc_get_max_banks()
3891 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_counters() local
3893 if (iommu) in amd_iommu_pc_get_max_counters()
3894 return iommu->max_counters; in amd_iommu_pc_get_max_counters()
3899 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, in iommu_pc_get_set_reg() argument
3905 /* Make sure the IOMMU PC resource is available */ in iommu_pc_get_set_reg()
3907 return -ENODEV; in iommu_pc_get_set_reg()
3909 /* Check for valid iommu and pc register indexing */ in iommu_pc_get_set_reg()
3910 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) in iommu_pc_get_set_reg()
3911 return -ENODEV; in iommu_pc_get_set_reg()
3916 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | in iommu_pc_get_set_reg()
3917 (iommu->max_counters << 8) | 0x28); in iommu_pc_get_set_reg()
3920 return -EINVAL; in iommu_pc_get_set_reg()
3925 writel((u32)val, iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3926 writel((val >> 32), iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3928 *value = readl(iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3930 *value |= readl(iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3937 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_get_reg() argument
3939 if (!iommu) in amd_iommu_pc_get_reg()
3940 return -EINVAL; in amd_iommu_pc_get_reg()
3942 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); in amd_iommu_pc_get_reg()
3945 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_set_reg() argument
3947 if (!iommu) in amd_iommu_pc_set_reg()
3948 return -EINVAL; in amd_iommu_pc_set_reg()
3950 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); in amd_iommu_pc_set_reg()
3968 pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret); in iommu_page_make_shared()
3973 pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn); in iommu_page_make_shared()
3974 return -EINVAL; in iommu_page_make_shared()
3982 pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n", in iommu_page_make_shared()
4011 struct amd_iommu *iommu; in amd_iommu_snp_disable() local
4017 for_each_iommu(iommu) { in amd_iommu_snp_disable()
4018 ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE); in amd_iommu_snp_disable()
4022 ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE); in amd_iommu_snp_disable()
4026 ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE); in amd_iommu_snp_disable()