Lines Matching +full:iommu +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
14 #include <linux/pci-ats.h>
19 #include <linux/dma-map-ops.h>
20 #include <linux/dma-direct.h>
22 #include <linux/iommu-helper.h>
24 #include <linux/amd-iommu.h>
31 #include <linux/io-pgtable.h>
38 #include <asm/iommu.h>
44 #include "../dma-iommu.h"
46 #include "../iommu-pages.h"
48 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
63 int amd_iommu_max_glx_val = -1;
66 * general struct to manage commands send to an IOMMU
73 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
83 static void set_dte_entry(struct amd_iommu *iommu,
94 return (pdom && (pdom->pd_mode == PD_MODE_V2)); in pdom_is_v2_pgtbl_mode()
99 return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY); in pdom_is_in_pt_mode()
103 * We cannot support PASID w/ existing v1 page table in the same domain
119 return -ENODEV; in get_acpihid_device_id()
122 if (acpi_dev_hid_uid_match(adev, p->hid, in get_acpihid_device_id()
123 p->uid[0] ? p->uid : NULL)) { in get_acpihid_device_id()
126 return p->devid; in get_acpihid_device_id()
129 return -EINVAL; in get_acpihid_device_id()
144 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) in get_dev_table() argument
147 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_dev_table()
150 dev_table = pci_seg->dev_table; in get_dev_table()
163 seg = pci_domain_nr(pdev->bus); in get_device_segment()
173 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
174 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) in amd_iommu_set_rlookup_table() argument
176 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in amd_iommu_set_rlookup_table()
178 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table()
186 if (pci_seg->id == seg) in __rlookup_amd_iommu()
187 return pci_seg->rlookup_table[devid]; in __rlookup_amd_iommu()
202 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) in alloc_dev_data() argument
205 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in alloc_dev_data()
211 mutex_init(&dev_data->mutex); in alloc_dev_data()
212 dev_data->devid = devid; in alloc_dev_data()
213 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
215 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); in alloc_dev_data()
219 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) in search_dev_data() argument
223 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in search_dev_data()
225 if (llist_empty(&pci_seg->dev_data_list)) in search_dev_data()
228 node = pci_seg->dev_data_list.first; in search_dev_data()
230 if (dev_data->devid == devid) in search_dev_data()
239 struct amd_iommu *iommu; in clone_alias() local
246 iommu = rlookup_amd_iommu(&pdev->dev); in clone_alias()
247 if (!iommu) in clone_alias()
250 amd_iommu_set_rlookup_table(iommu, alias); in clone_alias()
251 dev_table = get_dev_table(iommu); in clone_alias()
259 static void clone_aliases(struct amd_iommu *iommu, struct device *dev) in clone_aliases() argument
272 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); in clone_aliases()
277 static void setup_aliases(struct amd_iommu *iommu, struct device *dev) in setup_aliases() argument
280 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in setup_aliases()
291 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)]; in setup_aliases()
293 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) in setup_aliases()
296 clone_aliases(iommu, dev); in setup_aliases()
299 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) in find_dev_data() argument
303 dev_data = search_dev_data(iommu, devid); in find_dev_data()
306 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
310 if (translation_pre_enabled(iommu)) in find_dev_data()
311 dev_data->defer_attach = true; in find_dev_data()
318 * Find or create an IOMMU group for a acpihid device.
330 if ((devid == p->devid) && p->group) in acpihid_device_group()
331 entry->group = p->group; in acpihid_device_group()
334 if (!entry->group) in acpihid_device_group()
335 entry->group = generic_device_group(dev); in acpihid_device_group()
337 iommu_group_ref_get(entry->group); in acpihid_device_group()
339 return entry->group; in acpihid_device_group()
344 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); in pdev_pasid_supported()
374 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_ats()
375 int ret = -EINVAL; in pdev_enable_cap_ats()
377 if (dev_data->ats_enabled) in pdev_enable_cap_ats()
381 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { in pdev_enable_cap_ats()
384 dev_data->ats_enabled = 1; in pdev_enable_cap_ats()
385 dev_data->ats_qdep = pci_ats_queue_depth(pdev); in pdev_enable_cap_ats()
394 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_ats()
396 if (dev_data->ats_enabled) { in pdev_disable_cap_ats()
398 dev_data->ats_enabled = 0; in pdev_disable_cap_ats()
404 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pri()
405 int ret = -EINVAL; in pdev_enable_cap_pri()
407 if (dev_data->pri_enabled) in pdev_enable_cap_pri()
410 if (!dev_data->ats_enabled) in pdev_enable_cap_pri()
413 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { in pdev_enable_cap_pri()
419 dev_data->pri_enabled = 1; in pdev_enable_cap_pri()
420 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in pdev_enable_cap_pri()
431 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pri()
433 if (dev_data->pri_enabled) { in pdev_disable_cap_pri()
435 dev_data->pri_enabled = 0; in pdev_disable_cap_pri()
441 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pasid()
442 int ret = -EINVAL; in pdev_enable_cap_pasid()
444 if (dev_data->pasid_enabled) in pdev_enable_cap_pasid()
447 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { in pdev_enable_cap_pasid()
448 /* Only allow access to user-accessible pages */ in pdev_enable_cap_pasid()
451 dev_data->pasid_enabled = 1; in pdev_enable_cap_pasid()
459 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pasid()
461 if (dev_data->pasid_enabled) { in pdev_disable_cap_pasid()
463 dev_data->pasid_enabled = 0; in pdev_disable_cap_pasid()
488 struct amd_iommu *iommu; in check_device() local
499 iommu = rlookup_amd_iommu(dev); in check_device()
500 if (!iommu) in check_device()
504 pci_seg = iommu->pci_seg; in check_device()
505 if (devid > pci_seg->last_bdf) in check_device()
511 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) in iommu_init_device() argument
524 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
526 return -ENOMEM; in iommu_init_device()
528 dev_data->dev = dev; in iommu_init_device()
529 setup_aliases(iommu, dev); in iommu_init_device()
539 dev_data->flags = pdev_get_caps(to_pci_dev(dev)); in iommu_init_device()
547 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) in iommu_ignore_device() argument
549 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in iommu_ignore_device()
550 struct dev_table_entry *dev_table = get_dev_table(iommu); in iommu_ignore_device()
558 pci_seg->rlookup_table[devid] = NULL; in iommu_ignore_device()
561 setup_aliases(iommu, dev); in iommu_ignore_device()
571 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) in dump_dte_entry() argument
574 struct dev_table_entry *dev_table = get_dev_table(iommu); in dump_dte_entry()
586 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); in dump_command()
589 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_hw_error() argument
601 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_hw_error()
604 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
607 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
613 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_hw_error()
621 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_fault() argument
634 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_fault()
637 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
640 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
646 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_fault()
660 static void amd_iommu_report_page_fault(struct amd_iommu *iommu, in amd_iommu_report_page_fault() argument
667 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_page_fault()
670 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
680 if (dev_data->domain == NULL) { in amd_iommu_report_page_fault()
683 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), in amd_iommu_report_page_fault()
688 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
689 &pdev->dev, address, in amd_iommu_report_page_fault()
696 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
702 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_page_fault()
711 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
713 struct device *dev = iommu->iommu.dev; in iommu_print_event()
739 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); in iommu_print_event()
746 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
748 dump_dte_entry(iommu, devid); in iommu_print_event()
753 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
758 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
771 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
776 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
780 amd_iommu_report_rmp_fault(iommu, event); in iommu_print_event()
783 amd_iommu_report_rmp_hw_error(iommu, event); in iommu_print_event()
789 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
807 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
811 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
812 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
815 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
817 /* Update head pointer of hardware ring-buffer */ in iommu_poll_events()
819 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
835 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
839 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
842 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
843 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
849 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
851 /* Avoid memcpy function-call overhead */ in iommu_poll_ga_log()
854 /* Update head pointer of hardware ring-buffer */ in iommu_poll_ga_log()
856 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
878 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
884 dev_set_msi_domain(dev, iommu->ir_domain); in amd_iommu_set_pci_msi_domain()
889 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
897 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_handle_irq() local
898 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
903 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
906 pr_devel("Processing IOMMU (ivhd%d) %s Log\n", in amd_iommu_handle_irq()
907 iommu->index, evt_type); in amd_iommu_handle_irq()
908 int_handler(iommu); in amd_iommu_handle_irq()
912 overflow_handler(iommu); in amd_iommu_handle_irq()
916 * When re-enabling interrupt (by writing 1 in amd_iommu_handle_irq()
922 * Workaround: The IOMMU driver should read back the in amd_iommu_handle_irq()
925 * again and re-clear the bits in amd_iommu_handle_irq()
927 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
976 * IOMMU command queuing functions
980 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
984 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
990 pr_alert("Completion-Wait loop timed out\n"); in wait_on_sem()
991 return -EIO; in wait_on_sem()
997 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
1004 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
1005 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
1009 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
1011 /* Tell the IOMMU about it */ in copy_cmd_to_buffer()
1012 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
1016 struct amd_iommu *iommu, in build_completion_wait() argument
1019 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1022 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; in build_completion_wait()
1023 cmd->data[1] = upper_32_bits(paddr); in build_completion_wait()
1024 cmd->data[2] = lower_32_bits(data); in build_completion_wait()
1025 cmd->data[3] = upper_32_bits(data); in build_completion_wait()
1032 cmd->data[0] = devid; in build_inv_dte()
1049 end = address + size - 1; in build_inv_address()
1055 msb_diff = fls64(end ^ address) - 1; in build_inv_address()
1065 * The msb-bit must be clear on the address. Just set all the in build_inv_address()
1068 address |= (1ull << msb_diff) - 1; in build_inv_address()
1074 /* Set the size bit - we flush more than one 4kb page */ in build_inv_address()
1086 cmd->data[1] |= domid; in build_inv_iommu_pages()
1087 cmd->data[2] = lower_32_bits(inv_address); in build_inv_iommu_pages()
1088 cmd->data[3] = upper_32_bits(inv_address); in build_inv_iommu_pages()
1089 /* PDE bit - we want to flush everything, not only the PTEs */ in build_inv_iommu_pages()
1090 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; in build_inv_iommu_pages()
1092 cmd->data[0] |= pasid; in build_inv_iommu_pages()
1093 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iommu_pages()
1106 cmd->data[0] = devid; in build_inv_iotlb_pages()
1107 cmd->data[0] |= (qdep & 0xff) << 24; in build_inv_iotlb_pages()
1108 cmd->data[1] = devid; in build_inv_iotlb_pages()
1109 cmd->data[2] = lower_32_bits(inv_address); in build_inv_iotlb_pages()
1110 cmd->data[3] = upper_32_bits(inv_address); in build_inv_iotlb_pages()
1112 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; in build_inv_iotlb_pages()
1113 cmd->data[1] |= (pasid & 0xff) << 16; in build_inv_iotlb_pages()
1114 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iotlb_pages()
1125 cmd->data[0] = devid; in build_complete_ppr()
1127 cmd->data[1] = pasid; in build_complete_ppr()
1128 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; in build_complete_ppr()
1130 cmd->data[3] = tag & 0x1ff; in build_complete_ppr()
1131 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; in build_complete_ppr()
1145 cmd->data[0] = devid; in build_inv_irt()
1153 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1160 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1162 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1169 return -EIO; in __iommu_queue_command_sync()
1176 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1182 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1185 iommu->need_sync = sync; in __iommu_queue_command_sync()
1190 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1197 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1198 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1199 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1204 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1206 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1211 * buffer of an IOMMU
1213 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1220 if (!iommu->need_sync) in iommu_completion_wait()
1223 data = atomic64_inc_return(&iommu->cmd_sem_val); in iommu_completion_wait()
1224 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1226 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1228 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1232 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1235 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1245 lockdep_assert_held(&domain->lock); in domain_flush_complete()
1248 * Devices of this domain are behind this IOMMU in domain_flush_complete()
1251 xa_for_each(&domain->iommu_array, i, pdom_iommu_info) in domain_flush_complete()
1252 iommu_completion_wait(pdom_iommu_info->iommu); in domain_flush_complete()
1255 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1261 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1264 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1267 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_dte_all()
1270 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1272 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1279 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1282 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_tlb_all()
1288 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1291 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1294 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1300 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1302 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1305 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1311 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1312 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1315 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1321 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1324 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1327 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_irt_all()
1329 if (iommu->irtcachedis_enabled) in amd_iommu_flush_irt_all()
1333 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1335 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1338 void amd_iommu_flush_all_caches(struct amd_iommu *iommu) in amd_iommu_flush_all_caches() argument
1341 amd_iommu_flush_all(iommu); in amd_iommu_flush_all_caches()
1343 amd_iommu_flush_dte_all(iommu); in amd_iommu_flush_all_caches()
1344 amd_iommu_flush_irt_all(iommu); in amd_iommu_flush_all_caches()
1345 amd_iommu_flush_tlb_all(iommu); in amd_iommu_flush_all_caches()
1350 * Command send function for flushing on-device TLB
1355 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_iotlb() local
1357 int qdep = dev_data->ats_qdep; in device_flush_iotlb()
1359 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, in device_flush_iotlb()
1362 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1367 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1369 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1377 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_dte() local
1383 if (dev_is_pci(dev_data->dev)) in device_flush_dte()
1384 pdev = to_pci_dev(dev_data->dev); in device_flush_dte()
1388 device_flush_dte_alias, iommu); in device_flush_dte()
1390 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1394 pci_seg = iommu->pci_seg; in device_flush_dte()
1395 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1396 if (alias != dev_data->devid) { in device_flush_dte()
1397 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1402 if (dev_data->ats_enabled) { in device_flush_dte()
1418 lockdep_assert_held(&pdom->lock); in domain_flush_pages_v2()
1419 list_for_each_entry(dev_data, &pdom->dev_list, list) { in domain_flush_pages_v2()
1420 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in domain_flush_pages_v2() local
1421 u16 domid = dev_data->gcr3_info.domid; in domain_flush_pages_v2()
1426 ret |= iommu_queue_command(iommu, &cmd); in domain_flush_pages_v2()
1440 lockdep_assert_held(&pdom->lock); in domain_flush_pages_v1()
1443 pdom->id, IOMMU_NO_PASID, false); in domain_flush_pages_v1()
1445 xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) { in domain_flush_pages_v1()
1447 * Devices of this domain are behind this IOMMU in domain_flush_pages_v1()
1450 ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd); in domain_flush_pages_v1()
1468 lockdep_assert_held(&domain->lock); in __domain_flush_pages()
1477 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1479 if (!dev_data->ats_enabled) in __domain_flush_pages()
1491 lockdep_assert_held(&domain->lock); in amd_iommu_domain_flush_pages()
1496 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ in amd_iommu_domain_flush_pages()
1519 * size is always non-zero, but address might be zero, causing in amd_iommu_domain_flush_pages()
1522 * of the address on x86-32, cast to long when doing the check. in amd_iommu_domain_flush_pages()
1533 size -= flush_size; in amd_iommu_domain_flush_pages()
1536 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ in amd_iommu_domain_flush_pages()
1540 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1551 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in amd_iommu_dev_flush_pasid_pages() local
1554 dev_data->gcr3_info.domid, pasid, true); in amd_iommu_dev_flush_pasid_pages()
1555 iommu_queue_command(iommu, &cmd); in amd_iommu_dev_flush_pasid_pages()
1557 if (dev_data->ats_enabled) in amd_iommu_dev_flush_pasid_pages()
1560 iommu_completion_wait(iommu); in amd_iommu_dev_flush_pasid_pages()
1577 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1579 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1591 lockdep_assert_held(&domain->lock); in amd_iommu_update_and_flush_device_table()
1593 list_for_each_entry(dev_data, &domain->dev_list, list) { in amd_iommu_update_and_flush_device_table()
1594 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in amd_iommu_update_and_flush_device_table() local
1596 set_dte_entry(iommu, dev_data); in amd_iommu_update_and_flush_device_table()
1597 clone_aliases(iommu, dev_data->dev); in amd_iommu_update_and_flush_device_table()
1600 list_for_each_entry(dev_data, &domain->dev_list, list) in amd_iommu_update_and_flush_device_table()
1618 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
1622 iommu = get_amd_iommu_from_dev(dev); in amd_iommu_complete_ppr()
1624 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
1625 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
1627 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
1633 * allocated for every IOMMU as the default domain. If device isolation
1642 return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC); in pdom_id_alloc()
1682 if (gcr3_info->glx == 2) in free_gcr3_table()
1683 free_gcr3_tbl_level2(gcr3_info->gcr3_tbl); in free_gcr3_table()
1684 else if (gcr3_info->glx == 1) in free_gcr3_table()
1685 free_gcr3_tbl_level1(gcr3_info->gcr3_tbl); in free_gcr3_table()
1687 WARN_ON_ONCE(gcr3_info->glx != 0); in free_gcr3_table()
1689 gcr3_info->glx = 0; in free_gcr3_table()
1692 pdom_id_free(gcr3_info->domid); in free_gcr3_table()
1694 iommu_free_page(gcr3_info->gcr3_tbl); in free_gcr3_table()
1695 gcr3_info->gcr3_tbl = NULL; in free_gcr3_table()
1699 * Number of GCR3 table levels required. Level must be 4-Kbyte
1706 if (pasids == -1) in get_gcr3_levels()
1711 return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels; in get_gcr3_levels()
1715 struct amd_iommu *iommu, int pasids) in setup_gcr3_table() argument
1718 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; in setup_gcr3_table()
1722 return -EINVAL; in setup_gcr3_table()
1724 if (gcr3_info->gcr3_tbl) in setup_gcr3_table()
1725 return -EBUSY; in setup_gcr3_table()
1730 return -ENOSPC; in setup_gcr3_table()
1731 gcr3_info->domid = domid; in setup_gcr3_table()
1733 gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC); in setup_gcr3_table()
1734 if (gcr3_info->gcr3_tbl == NULL) { in setup_gcr3_table()
1736 return -ENOMEM; in setup_gcr3_table()
1739 gcr3_info->glx = levels; in setup_gcr3_table()
1749 u64 *root = gcr3_info->gcr3_tbl; in __get_gcr3_pte()
1750 int level = gcr3_info->glx; in __get_gcr3_pte()
1773 level -= 1; in __get_gcr3_pte()
1782 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in update_gcr3()
1787 return -ENOMEM; in update_gcr3()
1801 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_set_gcr3()
1804 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_set_gcr3()
1810 gcr3_info->pasid_cnt++; in amd_iommu_set_gcr3()
1816 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_clear_gcr3()
1819 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_clear_gcr3()
1825 gcr3_info->pasid_cnt--; in amd_iommu_clear_gcr3()
1829 static void set_dte_entry(struct amd_iommu *iommu, in set_dte_entry() argument
1835 u16 devid = dev_data->devid; in set_dte_entry()
1837 struct protection_domain *domain = dev_data->domain; in set_dte_entry()
1838 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_entry()
1839 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in set_dte_entry()
1841 if (gcr3_info && gcr3_info->gcr3_tbl) in set_dte_entry()
1842 domid = dev_data->gcr3_info.domid; in set_dte_entry()
1844 domid = domain->id; in set_dte_entry()
1846 if (domain->iop.mode != PAGE_MODE_NONE) in set_dte_entry()
1847 pte_root = iommu_virt_to_phys(domain->iop.root); in set_dte_entry()
1849 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1855 * When SNP is enabled, Only set TV bit when IOMMU in set_dte_entry()
1863 if (dev_data->ats_enabled) in set_dte_entry()
1866 if (dev_data->ppr) in set_dte_entry()
1869 if (domain->dirty_tracking) in set_dte_entry()
1872 if (gcr3_info && gcr3_info->gcr3_tbl) { in set_dte_entry()
1873 u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl); in set_dte_entry()
1874 u64 glx = gcr3_info->glx; in set_dte_entry()
1916 * the previous kernel--if so, it needs to flush the translation cache in set_dte_entry()
1920 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1924 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) in clear_dte_entry() argument
1926 struct dev_table_entry *dev_table = get_dev_table(iommu); in clear_dte_entry()
1936 amd_iommu_apply_erratum_63(iommu, devid); in clear_dte_entry()
1942 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in dev_update_dte() local
1945 set_dte_entry(iommu, dev_data); in dev_update_dte()
1947 clear_dte_entry(iommu, dev_data->devid); in dev_update_dte()
1949 clone_aliases(iommu, dev_data->dev); in dev_update_dte()
1951 iommu_completion_wait(iommu); in dev_update_dte()
1961 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in init_gcr3_table() local
1962 int max_pasids = dev_data->max_pasids; in init_gcr3_table()
1974 * supported by the device/IOMMU. in init_gcr3_table()
1976 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, in init_gcr3_table()
1985 ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); in init_gcr3_table()
1987 free_gcr3_table(&dev_data->gcr3_info); in init_gcr3_table()
1995 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in destroy_gcr3_table()
2000 if (gcr3_info->gcr3_tbl == NULL) in destroy_gcr3_table()
2006 static int pdom_attach_iommu(struct amd_iommu *iommu, in pdom_attach_iommu() argument
2010 struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg; in pdom_attach_iommu()
2014 spin_lock_irqsave(&pdom->lock, flags); in pdom_attach_iommu()
2016 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); in pdom_attach_iommu()
2018 pdom_iommu_info->refcnt++; in pdom_attach_iommu()
2024 ret = -ENOMEM; in pdom_attach_iommu()
2028 pdom_iommu_info->iommu = iommu; in pdom_attach_iommu()
2029 pdom_iommu_info->refcnt = 1; in pdom_attach_iommu()
2031 curr = xa_cmpxchg(&pdom->iommu_array, iommu->index, in pdom_attach_iommu()
2035 ret = -ENOSPC; in pdom_attach_iommu()
2040 if (cfg->amd.nid == NUMA_NO_NODE) in pdom_attach_iommu()
2041 cfg->amd.nid = dev_to_node(&iommu->dev->dev); in pdom_attach_iommu()
2044 spin_unlock_irqrestore(&pdom->lock, flags); in pdom_attach_iommu()
2048 static void pdom_detach_iommu(struct amd_iommu *iommu, in pdom_detach_iommu() argument
2054 spin_lock_irqsave(&pdom->lock, flags); in pdom_detach_iommu()
2056 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); in pdom_detach_iommu()
2058 spin_unlock_irqrestore(&pdom->lock, flags); in pdom_detach_iommu()
2062 pdom_iommu_info->refcnt--; in pdom_detach_iommu()
2063 if (pdom_iommu_info->refcnt == 0) { in pdom_detach_iommu()
2064 xa_erase(&pdom->iommu_array, iommu->index); in pdom_detach_iommu()
2068 spin_unlock_irqrestore(&pdom->lock, flags); in pdom_detach_iommu()
2079 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in attach_device() local
2084 mutex_lock(&dev_data->mutex); in attach_device()
2086 if (dev_data->domain != NULL) { in attach_device()
2087 ret = -EBUSY; in attach_device()
2092 ret = pdom_attach_iommu(iommu, domain); in attach_device()
2100 pdom_detach_iommu(iommu, domain); in attach_device()
2105 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; in attach_device()
2114 if (amd_iommu_iopf_add_device(iommu, dev_data)) in attach_device()
2121 dev_data->domain = domain; in attach_device()
2122 spin_lock_irqsave(&domain->lock, flags); in attach_device()
2123 list_add(&dev_data->list, &domain->dev_list); in attach_device()
2124 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
2130 mutex_unlock(&dev_data->mutex); in attach_device()
2141 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in detach_device() local
2142 struct protection_domain *domain = dev_data->domain; in detach_device()
2145 mutex_lock(&dev_data->mutex); in detach_device()
2153 if (WARN_ON(!dev_data->domain)) in detach_device()
2157 if (dev_data->ppr) { in detach_device()
2159 amd_iommu_iopf_remove_device(iommu, dev_data); in detach_device()
2169 spin_lock_irqsave(&domain->lock, flags); in detach_device()
2171 list_del(&dev_data->list); in detach_device()
2172 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
2179 dev_data->domain = NULL; in detach_device()
2181 /* decrease reference counters - needs to happen after the flushes */ in detach_device()
2182 pdom_detach_iommu(iommu, domain); in detach_device()
2185 mutex_unlock(&dev_data->mutex); in detach_device()
2191 struct amd_iommu *iommu; in amd_iommu_probe_device() local
2196 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
2198 iommu = rlookup_amd_iommu(dev); in amd_iommu_probe_device()
2199 if (!iommu) in amd_iommu_probe_device()
2200 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
2203 if (!iommu->iommu.ops) in amd_iommu_probe_device()
2204 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
2207 return &iommu->iommu; in amd_iommu_probe_device()
2209 ret = iommu_init_device(iommu, dev); in amd_iommu_probe_device()
2211 dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); in amd_iommu_probe_device()
2213 iommu_ignore_device(iommu, dev); in amd_iommu_probe_device()
2217 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
2218 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
2221 * If IOMMU and device supports PASID then it will contain max in amd_iommu_probe_device()
2227 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, in amd_iommu_probe_device()
2232 iommu_completion_wait(iommu); in amd_iommu_probe_device()
2244 WARN_ON(dev_data->domain); in amd_iommu_release_device()
2248 * device is re-plugged - not doing so would introduce a ton of races. in amd_iommu_release_device()
2262 * The following functions belong to the exported interface of AMD IOMMU
2264 * This interface allows access to lower level functions of the IOMMU
2272 WARN_ON(!list_empty(&domain->dev_list)); in protection_domain_free()
2273 if (domain->domain.type & __IOMMU_DOMAIN_PAGING) in protection_domain_free()
2274 free_io_pgtable_ops(&domain->iop.pgtbl.ops); in protection_domain_free()
2275 pdom_id_free(domain->id); in protection_domain_free()
2281 spin_lock_init(&domain->lock); in protection_domain_init()
2282 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init()
2283 INIT_LIST_HEAD(&domain->dev_data_list); in protection_domain_init()
2284 xa_init(&domain->iommu_array); in protection_domain_init()
2285 domain->iop.pgtbl.cfg.amd.nid = nid; in protection_domain_init()
2302 domain->id = domid; in protection_domain_alloc()
2320 domain->pd_mode = PD_MODE_V1; in pdom_setup_pgtable()
2323 domain->pd_mode = PD_MODE_V2; in pdom_setup_pgtable()
2326 return -EINVAL; in pdom_setup_pgtable()
2330 alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain); in pdom_setup_pgtable()
2332 return -ENOMEM; in pdom_setup_pgtable()
2343 return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); in dma_max_address()
2346 static bool amd_iommu_hd_support(struct amd_iommu *iommu) in amd_iommu_hd_support() argument
2348 return iommu && (iommu->features & FEATURE_HDSUP); in amd_iommu_hd_support()
2357 struct amd_iommu *iommu = NULL; in do_iommu_domain_alloc() local
2361 iommu = get_amd_iommu_from_dev(dev); in do_iommu_domain_alloc()
2364 * Since DTE[Mode]=0 is prohibited on SNP-enabled system, in do_iommu_domain_alloc()
2368 return ERR_PTR(-EINVAL); in do_iommu_domain_alloc()
2373 return ERR_PTR(-ENOMEM); in do_iommu_domain_alloc()
2377 pdom_id_free(domain->id); in do_iommu_domain_alloc()
2382 domain->domain.geometry.aperture_start = 0; in do_iommu_domain_alloc()
2383 domain->domain.geometry.aperture_end = dma_max_address(pgtable); in do_iommu_domain_alloc()
2384 domain->domain.geometry.force_aperture = true; in do_iommu_domain_alloc()
2385 domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap; in do_iommu_domain_alloc()
2387 if (iommu) { in do_iommu_domain_alloc()
2388 domain->domain.type = type; in do_iommu_domain_alloc()
2389 domain->domain.ops = iommu->iommu.ops->default_domain_ops; in do_iommu_domain_alloc()
2392 domain->domain.dirty_ops = &amd_dirty_ops; in do_iommu_domain_alloc()
2395 return &domain->domain; in do_iommu_domain_alloc()
2404 * Force IOMMU v1 page table when allocating in amd_iommu_domain_alloc()
2405 * domain for pass-through devices. in amd_iommu_domain_alloc()
2423 struct amd_iommu *iommu = NULL; in amd_iommu_domain_alloc_paging_flags() local
2428 iommu = get_amd_iommu_from_dev(dev); in amd_iommu_domain_alloc_paging_flags()
2431 return ERR_PTR(-EOPNOTSUPP); in amd_iommu_domain_alloc_paging_flags()
2433 /* Allocate domain with v2 page table if IOMMU supports PASID. */ in amd_iommu_domain_alloc_paging_flags()
2436 return ERR_PTR(-EOPNOTSUPP); in amd_iommu_domain_alloc_paging_flags()
2441 /* Allocate domain with v1 page table for dirty tracking */ in amd_iommu_domain_alloc_paging_flags()
2443 if (iommu && amd_iommu_hd_support(iommu)) { in amd_iommu_domain_alloc_paging_flags()
2448 return ERR_PTR(-EOPNOTSUPP); in amd_iommu_domain_alloc_paging_flags()
2467 if (dev_data->domain) in blocked_domain_attach_device()
2471 mutex_lock(&dev_data->mutex); in blocked_domain_attach_device()
2473 mutex_unlock(&dev_data->mutex); in blocked_domain_attach_device()
2495 domain->type = IOMMU_DOMAIN_IDENTITY; in amd_iommu_init_identity_domain()
2496 domain->ops = &identity_domain_ops; in amd_iommu_init_identity_domain()
2497 domain->owner = &amd_iommu_ops; in amd_iommu_init_identity_domain()
2504 /* Same as blocked domain except it supports only ops->attach_dev() */
2517 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in amd_iommu_attach_device() local
2524 if (dev_data->domain == domain) in amd_iommu_attach_device()
2527 dev_data->defer_attach = false; in amd_iommu_attach_device()
2530 * Restrict to devices with compatible IOMMU hardware support in amd_iommu_attach_device()
2533 if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) in amd_iommu_attach_device()
2534 return -EINVAL; in amd_iommu_attach_device()
2536 if (dev_data->domain) in amd_iommu_attach_device()
2543 if (dom->type == IOMMU_DOMAIN_UNMANAGED) in amd_iommu_attach_device()
2544 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2546 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2557 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; in amd_iommu_iotlb_sync_map()
2559 if (ops->map_pages) in amd_iommu_iotlb_sync_map()
2569 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; in amd_iommu_map_pages()
2571 int ret = -EINVAL; in amd_iommu_map_pages()
2573 if ((domain->pd_mode == PD_MODE_V1) && in amd_iommu_map_pages()
2574 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_map_pages()
2575 return -EINVAL; in amd_iommu_map_pages()
2582 if (ops->map_pages) { in amd_iommu_map_pages()
2583 ret = ops->map_pages(ops, iova, paddr, pgsize, in amd_iommu_map_pages()
2595 * AMD's IOMMU can flush as many pages as necessary in a single flush. in amd_iommu_iotlb_gather_add_page()
2597 * to whether "non-present cache" is on, it is probably best to prefer in amd_iommu_iotlb_gather_add_page()
2600 * hypervisor needs to synchronize the host IOMMU PTEs with those of in amd_iommu_iotlb_gather_add_page()
2601 * the guest, and the trade-off is different: unnecessary TLB flushes in amd_iommu_iotlb_gather_add_page()
2616 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; in amd_iommu_unmap_pages()
2619 if ((domain->pd_mode == PD_MODE_V1) && in amd_iommu_unmap_pages()
2620 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_unmap_pages()
2623 r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0; in amd_iommu_unmap_pages()
2635 struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; in amd_iommu_iova_to_phys()
2637 return ops->iova_to_phys(ops, iova); in amd_iommu_iova_to_phys()
2654 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); in amd_iommu_capable() local
2656 return amd_iommu_hd_support(iommu); in amd_iommu_capable()
2672 struct amd_iommu *iommu; in amd_iommu_set_dirty_tracking() local
2676 spin_lock_irqsave(&pdomain->lock, flags); in amd_iommu_set_dirty_tracking()
2677 if (!(pdomain->dirty_tracking ^ enable)) { in amd_iommu_set_dirty_tracking()
2678 spin_unlock_irqrestore(&pdomain->lock, flags); in amd_iommu_set_dirty_tracking()
2682 list_for_each_entry(dev_data, &pdomain->dev_list, list) { in amd_iommu_set_dirty_tracking()
2683 iommu = get_amd_iommu_from_dev_data(dev_data); in amd_iommu_set_dirty_tracking()
2685 dev_table = get_dev_table(iommu); in amd_iommu_set_dirty_tracking()
2686 pte_root = dev_table[dev_data->devid].data[0]; in amd_iommu_set_dirty_tracking()
2692 dev_table[dev_data->devid].data[0] = pte_root; in amd_iommu_set_dirty_tracking()
2701 pdomain->dirty_tracking = enable; in amd_iommu_set_dirty_tracking()
2702 spin_unlock_irqrestore(&pdomain->lock, flags); in amd_iommu_set_dirty_tracking()
2713 struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops; in amd_iommu_read_and_clear_dirty()
2716 if (!ops || !ops->read_and_clear_dirty) in amd_iommu_read_and_clear_dirty()
2717 return -EOPNOTSUPP; in amd_iommu_read_and_clear_dirty()
2719 spin_lock_irqsave(&pdomain->lock, lflags); in amd_iommu_read_and_clear_dirty()
2720 if (!pdomain->dirty_tracking && dirty->bitmap) { in amd_iommu_read_and_clear_dirty()
2721 spin_unlock_irqrestore(&pdomain->lock, lflags); in amd_iommu_read_and_clear_dirty()
2722 return -EINVAL; in amd_iommu_read_and_clear_dirty()
2724 spin_unlock_irqrestore(&pdomain->lock, lflags); in amd_iommu_read_and_clear_dirty()
2726 return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); in amd_iommu_read_and_clear_dirty()
2734 struct amd_iommu *iommu; in amd_iommu_get_resv_regions() local
2743 iommu = get_amd_iommu_from_dev(dev); in amd_iommu_get_resv_regions()
2744 pci_seg = iommu->pci_seg; in amd_iommu_get_resv_regions()
2746 list_for_each_entry(entry, &pci_seg->unity_map, list) { in amd_iommu_get_resv_regions()
2750 if (devid < entry->devid_start || devid > entry->devid_end) in amd_iommu_get_resv_regions()
2754 length = entry->address_end - entry->address_start; in amd_iommu_get_resv_regions()
2755 if (entry->prot & IOMMU_PROT_IR) in amd_iommu_get_resv_regions()
2757 if (entry->prot & IOMMU_PROT_IW) in amd_iommu_get_resv_regions()
2759 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) in amd_iommu_get_resv_regions()
2763 region = iommu_alloc_resv_region(entry->address_start, in amd_iommu_get_resv_regions()
2767 dev_err(dev, "Out of memory allocating dm-regions\n"); in amd_iommu_get_resv_regions()
2770 list_add_tail(®ion->list, head); in amd_iommu_get_resv_regions()
2774 MSI_RANGE_END - MSI_RANGE_START + 1, in amd_iommu_get_resv_regions()
2778 list_add_tail(®ion->list, head); in amd_iommu_get_resv_regions()
2781 HT_RANGE_END - HT_RANGE_START + 1, in amd_iommu_get_resv_regions()
2785 list_add_tail(®ion->list, head); in amd_iommu_get_resv_regions()
2792 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2800 spin_lock_irqsave(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2802 spin_unlock_irqrestore(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2811 spin_lock_irqsave(&dom->lock, flags); in amd_iommu_iotlb_sync()
2812 amd_iommu_domain_flush_pages(dom, gather->start, in amd_iommu_iotlb_sync()
2813 gather->end - gather->start + 1); in amd_iommu_iotlb_sync()
2814 spin_unlock_irqrestore(&dom->lock, flags); in amd_iommu_iotlb_sync()
2826 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) in amd_iommu_def_domain_type()
2831 * - memory encryption is active, because some of those devices in amd_iommu_def_domain_type()
2832 * (AMD GPUs) don't have the encryption bit in their DMA-mask in amd_iommu_def_domain_type()
2834 * - SNP is enabled, because it prohibits DTE[Mode]=0. in amd_iommu_def_domain_type()
2866 ret = -EINVAL; in amd_iommu_dev_enable_feature()
2882 ret = -EINVAL; in amd_iommu_dev_disable_feature()
2930 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt_and_complete() argument
2937 if (iommu->irtcachedis_enabled) in iommu_flush_irt_and_complete()
2941 data = atomic64_inc_return(&iommu->cmd_sem_val); in iommu_flush_irt_and_complete()
2942 build_completion_wait(&cmd2, iommu, data); in iommu_flush_irt_and_complete()
2944 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2945 ret = __iommu_queue_command_sync(iommu, &cmd, true); in iommu_flush_irt_and_complete()
2948 ret = __iommu_queue_command_sync(iommu, &cmd2, false); in iommu_flush_irt_and_complete()
2951 wait_on_sem(iommu, data); in iommu_flush_irt_and_complete()
2953 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2956 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, in set_dte_irq_entry() argument
2960 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_irq_entry()
2964 dte |= iommu_virt_to_phys(table->table); in set_dte_irq_entry()
2972 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) in get_irq_table() argument
2975 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_irq_table()
2977 if (WARN_ONCE(!pci_seg->rlookup_table[devid], in get_irq_table()
2978 "%s: no iommu for devid %x:%x\n", in get_irq_table()
2979 __func__, pci_seg->id, devid)) in get_irq_table()
2982 table = pci_seg->irq_lookup_table[devid]; in get_irq_table()
2984 __func__, pci_seg->id, devid)) in get_irq_table()
2998 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); in __alloc_irq_table()
2999 if (!table->table) { in __alloc_irq_table()
3003 raw_spin_lock_init(&table->lock); in __alloc_irq_table()
3006 memset(table->table, 0, in __alloc_irq_table()
3009 memset(table->table, 0, in __alloc_irq_table()
3014 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
3017 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in set_remap_table_entry()
3019 pci_seg->irq_lookup_table[devid] = table; in set_remap_table_entry()
3020 set_dte_irq_entry(iommu, devid, table); in set_remap_table_entry()
3021 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
3029 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); in set_remap_table_entry_alias() local
3031 if (!iommu) in set_remap_table_entry_alias()
3032 return -EINVAL; in set_remap_table_entry_alias()
3034 pci_seg = iommu->pci_seg; in set_remap_table_entry_alias()
3035 pci_seg->irq_lookup_table[alias] = table; in set_remap_table_entry_alias()
3036 set_dte_irq_entry(iommu, alias, table); in set_remap_table_entry_alias()
3037 iommu_flush_dte(pci_seg->rlookup_table[alias], alias); in set_remap_table_entry_alias()
3042 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, in alloc_irq_table() argument
3053 pci_seg = iommu->pci_seg; in alloc_irq_table()
3054 table = pci_seg->irq_lookup_table[devid]; in alloc_irq_table()
3058 alias = pci_seg->alias_table[devid]; in alloc_irq_table()
3059 table = pci_seg->irq_lookup_table[alias]; in alloc_irq_table()
3061 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3073 table = pci_seg->irq_lookup_table[devid]; in alloc_irq_table()
3077 table = pci_seg->irq_lookup_table[alias]; in alloc_irq_table()
3079 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3090 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3093 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
3096 iommu_completion_wait(iommu); in alloc_irq_table()
3102 kmem_cache_free(amd_iommu_irq_cache, new_table->table); in alloc_irq_table()
3108 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, in alloc_irq_index() argument
3115 table = alloc_irq_table(iommu, devid, pdev); in alloc_irq_index()
3117 return -ENODEV; in alloc_irq_index()
3122 raw_spin_lock_irqsave(&table->lock, flags); in alloc_irq_index()
3125 for (index = ALIGN(table->min_index, alignment), c = 0; in alloc_irq_index()
3127 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3136 for (; c != 0; --c) in alloc_irq_index()
3137 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3139 index -= count - 1; in alloc_irq_index()
3146 index = -ENOSPC; in alloc_irq_index()
3149 raw_spin_unlock_irqrestore(&table->lock, flags); in alloc_irq_index()
3154 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in __modify_irte_ga() argument
3162 table = get_irq_table(iommu, devid); in __modify_irte_ga()
3164 return -ENOMEM; in __modify_irte_ga()
3166 raw_spin_lock_irqsave(&table->lock, flags); in __modify_irte_ga()
3168 entry = (struct irte_ga *)table->table; in __modify_irte_ga()
3172 * We use cmpxchg16 to atomically update the 128-bit IRTE, in __modify_irte_ga()
3177 old = entry->irte; in __modify_irte_ga()
3178 WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); in __modify_irte_ga()
3180 raw_spin_unlock_irqrestore(&table->lock, flags); in __modify_irte_ga()
3185 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in modify_irte_ga() argument
3190 ret = __modify_irte_ga(iommu, devid, index, irte); in modify_irte_ga()
3194 iommu_flush_irt_and_complete(iommu, devid); in modify_irte_ga()
3199 static int modify_irte(struct amd_iommu *iommu, in modify_irte() argument
3205 table = get_irq_table(iommu, devid); in modify_irte()
3207 return -ENOMEM; in modify_irte()
3209 raw_spin_lock_irqsave(&table->lock, flags); in modify_irte()
3210 table->table[index] = irte->val; in modify_irte()
3211 raw_spin_unlock_irqrestore(&table->lock, flags); in modify_irte()
3213 iommu_flush_irt_and_complete(iommu, devid); in modify_irte()
3218 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) in free_irte() argument
3223 table = get_irq_table(iommu, devid); in free_irte()
3227 raw_spin_lock_irqsave(&table->lock, flags); in free_irte()
3228 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3229 raw_spin_unlock_irqrestore(&table->lock, flags); in free_irte()
3231 iommu_flush_irt_and_complete(iommu, devid); in free_irte()
3240 irte->val = 0; in irte_prepare()
3241 irte->fields.vector = vector; in irte_prepare()
3242 irte->fields.int_type = delivery_mode; in irte_prepare()
3243 irte->fields.destination = dest_apicid; in irte_prepare()
3244 irte->fields.dm = dest_mode; in irte_prepare()
3245 irte->fields.valid = 1; in irte_prepare()
3254 irte->lo.val = 0; in irte_ga_prepare()
3255 irte->hi.val = 0; in irte_ga_prepare()
3256 irte->lo.fields_remap.int_type = delivery_mode; in irte_ga_prepare()
3257 irte->lo.fields_remap.dm = dest_mode; in irte_ga_prepare()
3258 irte->hi.fields.vector = vector; in irte_ga_prepare()
3259 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid); in irte_ga_prepare()
3260 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); in irte_ga_prepare()
3261 irte->lo.fields_remap.valid = 1; in irte_ga_prepare()
3264 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_activate() argument
3268 irte->fields.valid = 1; in irte_activate()
3269 modify_irte(iommu, devid, index, irte); in irte_activate()
3272 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_activate() argument
3276 irte->lo.fields_remap.valid = 1; in irte_ga_activate()
3277 modify_irte_ga(iommu, devid, index, irte); in irte_ga_activate()
3280 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_deactivate() argument
3284 irte->fields.valid = 0; in irte_deactivate()
3285 modify_irte(iommu, devid, index, irte); in irte_deactivate()
3288 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_deactivate() argument
3292 irte->lo.fields_remap.valid = 0; in irte_ga_deactivate()
3293 modify_irte_ga(iommu, devid, index, irte); in irte_ga_deactivate()
3296 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_set_affinity() argument
3301 irte->fields.vector = vector; in irte_set_affinity()
3302 irte->fields.destination = dest_apicid; in irte_set_affinity()
3303 modify_irte(iommu, devid, index, irte); in irte_set_affinity()
3306 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_ga_set_affinity() argument
3311 if (!irte->lo.fields_remap.guest_mode) { in irte_ga_set_affinity()
3312 irte->hi.fields.vector = vector; in irte_ga_set_affinity()
3313 irte->lo.fields_remap.destination = in irte_ga_set_affinity()
3315 irte->hi.fields.destination = in irte_ga_set_affinity()
3317 modify_irte_ga(iommu, devid, index, irte); in irte_ga_set_affinity()
3324 table->table[index] = IRTE_ALLOCATED; in irte_set_allocated()
3329 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_set_allocated()
3332 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3333 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3334 irte->hi.fields.vector = 0xff; in irte_ga_set_allocated()
3339 union irte *ptr = (union irte *)table->table; in irte_is_allocated()
3342 return irte->val != 0; in irte_is_allocated()
3347 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_is_allocated()
3350 return irte->hi.fields.vector != 0; in irte_ga_is_allocated()
3355 table->table[index] = 0; in irte_clear_allocated()
3360 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_clear_allocated()
3363 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3364 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3369 switch (info->type) { in get_devid()
3371 return get_ioapic_devid(info->devid); in get_devid()
3373 return get_hpet_devid(info->devid); in get_devid()
3376 return get_device_sbdf_id(msi_desc_to_dev(info->desc)); in get_devid()
3379 return -1; in get_devid()
3393 msg->data = index; in fill_msi_msg()
3394 msg->address_lo = 0; in fill_msi_msg()
3395 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; in fill_msi_msg()
3396 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; in fill_msi_msg()
3404 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_prepare_irte()
3405 struct amd_iommu *iommu = data->iommu; in irq_remapping_prepare_irte() local
3407 if (!iommu) in irq_remapping_prepare_irte()
3410 data->irq_2_irte.devid = devid; in irq_remapping_prepare_irte()
3411 data->irq_2_irte.index = index + sub_handle; in irq_remapping_prepare_irte()
3412 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, in irq_remapping_prepare_irte()
3413 apic->dest_mode_logical, irq_cfg->vector, in irq_remapping_prepare_irte()
3414 irq_cfg->dest_apicid, devid); in irq_remapping_prepare_irte()
3416 switch (info->type) { in irq_remapping_prepare_irte()
3421 fill_msi_msg(&data->msi_entry, irte_info->index); in irq_remapping_prepare_irte()
3456 struct amd_iommu *iommu; in irq_remapping_alloc() local
3462 return -EINVAL; in irq_remapping_alloc()
3463 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI) in irq_remapping_alloc()
3464 return -EINVAL; in irq_remapping_alloc()
3468 return -EINVAL; in irq_remapping_alloc()
3472 iommu = __rlookup_amd_iommu(seg, devid); in irq_remapping_alloc()
3473 if (!iommu) in irq_remapping_alloc()
3474 return -EINVAL; in irq_remapping_alloc()
3480 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { in irq_remapping_alloc()
3483 table = alloc_irq_table(iommu, devid, NULL); in irq_remapping_alloc()
3485 if (!table->min_index) { in irq_remapping_alloc()
3490 table->min_index = 32; in irq_remapping_alloc()
3492 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3494 WARN_ON(table->min_index != 32); in irq_remapping_alloc()
3495 index = info->ioapic.pin; in irq_remapping_alloc()
3497 index = -ENOMEM; in irq_remapping_alloc()
3499 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI || in irq_remapping_alloc()
3500 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { in irq_remapping_alloc()
3501 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); in irq_remapping_alloc()
3503 index = alloc_irq_index(iommu, devid, nr_irqs, align, in irq_remapping_alloc()
3504 msi_desc_to_pci_dev(info->desc)); in irq_remapping_alloc()
3506 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL); in irq_remapping_alloc()
3519 ret = -EINVAL; in irq_remapping_alloc()
3523 ret = -ENOMEM; in irq_remapping_alloc()
3529 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); in irq_remapping_alloc()
3531 data->entry = kzalloc(sizeof(struct irte_ga), in irq_remapping_alloc()
3533 if (!data->entry) { in irq_remapping_alloc()
3538 data->iommu = iommu; in irq_remapping_alloc()
3539 irq_data->hwirq = (devid << 16) + i; in irq_remapping_alloc()
3540 irq_data->chip_data = data; in irq_remapping_alloc()
3541 irq_data->chip = &amd_ir_chip; in irq_remapping_alloc()
3549 for (i--; i >= 0; i--) { in irq_remapping_alloc()
3552 kfree(irq_data->chip_data); in irq_remapping_alloc()
3555 free_irte(iommu, devid, index + i); in irq_remapping_alloc()
3571 if (irq_data && irq_data->chip_data) { in irq_remapping_free()
3572 data = irq_data->chip_data; in irq_remapping_free()
3573 irte_info = &data->irq_2_irte; in irq_remapping_free()
3574 free_irte(data->iommu, irte_info->devid, irte_info->index); in irq_remapping_free()
3575 kfree(data->entry); in irq_remapping_free()
3582 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3590 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_activate()
3591 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_activate()
3592 struct amd_iommu *iommu = data->iommu; in irq_remapping_activate() local
3595 if (!iommu) in irq_remapping_activate()
3598 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, in irq_remapping_activate()
3599 irte_info->index); in irq_remapping_activate()
3600 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3607 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_deactivate()
3608 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_deactivate()
3609 struct amd_iommu *iommu = data->iommu; in irq_remapping_deactivate() local
3611 if (iommu) in irq_remapping_deactivate()
3612 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, in irq_remapping_deactivate()
3613 irte_info->index); in irq_remapping_deactivate()
3619 struct amd_iommu *iommu; in irq_remapping_select() local
3620 int devid = -1; in irq_remapping_select()
3626 devid = get_ioapic_devid(fwspec->param[0]); in irq_remapping_select()
3628 devid = get_hpet_devid(fwspec->param[0]); in irq_remapping_select()
3632 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); in irq_remapping_select()
3634 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3648 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_activate_guest_mode()
3654 valid = entry->lo.fields_vapic.valid; in amd_iommu_activate_guest_mode()
3656 entry->lo.val = 0; in amd_iommu_activate_guest_mode()
3657 entry->hi.val = 0; in amd_iommu_activate_guest_mode()
3659 entry->lo.fields_vapic.valid = valid; in amd_iommu_activate_guest_mode()
3660 entry->lo.fields_vapic.guest_mode = 1; in amd_iommu_activate_guest_mode()
3661 entry->lo.fields_vapic.ga_log_intr = 1; in amd_iommu_activate_guest_mode()
3662 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; in amd_iommu_activate_guest_mode()
3663 entry->hi.fields.vector = ir_data->ga_vector; in amd_iommu_activate_guest_mode()
3664 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; in amd_iommu_activate_guest_mode()
3666 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3667 ir_data->irq_2_irte.index, entry); in amd_iommu_activate_guest_mode()
3674 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_deactivate_guest_mode()
3675 struct irq_cfg *cfg = ir_data->cfg; in amd_iommu_deactivate_guest_mode()
3679 !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_deactivate_guest_mode()
3682 valid = entry->lo.fields_remap.valid; in amd_iommu_deactivate_guest_mode()
3684 entry->lo.val = 0; in amd_iommu_deactivate_guest_mode()
3685 entry->hi.val = 0; in amd_iommu_deactivate_guest_mode()
3687 entry->lo.fields_remap.valid = valid; in amd_iommu_deactivate_guest_mode()
3688 entry->lo.fields_remap.dm = apic->dest_mode_logical; in amd_iommu_deactivate_guest_mode()
3689 entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED; in amd_iommu_deactivate_guest_mode()
3690 entry->hi.fields.vector = cfg->vector; in amd_iommu_deactivate_guest_mode()
3691 entry->lo.fields_remap.destination = in amd_iommu_deactivate_guest_mode()
3692 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3693 entry->hi.fields.destination = in amd_iommu_deactivate_guest_mode()
3694 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3696 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3697 ir_data->irq_2_irte.index, entry); in amd_iommu_deactivate_guest_mode()
3705 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; in amd_ir_set_vcpu_affinity()
3706 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_vcpu_affinity()
3707 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_vcpu_affinity()
3710 if (ir_data->iommu == NULL) in amd_ir_set_vcpu_affinity()
3711 return -EINVAL; in amd_ir_set_vcpu_affinity()
3713 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3719 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()
3722 ir_data->cfg = irqd_cfg(data); in amd_ir_set_vcpu_affinity()
3723 pi_data->ir_data = ir_data; in amd_ir_set_vcpu_affinity()
3732 pi_data->is_guest_mode = false; in amd_ir_set_vcpu_affinity()
3735 pi_data->prev_ga_tag = ir_data->cached_ga_tag; in amd_ir_set_vcpu_affinity()
3736 if (pi_data->is_guest_mode) { in amd_ir_set_vcpu_affinity()
3737 ir_data->ga_root_ptr = (pi_data->base >> 12); in amd_ir_set_vcpu_affinity()
3738 ir_data->ga_vector = vcpu_pi_info->vector; in amd_ir_set_vcpu_affinity()
3739 ir_data->ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
3742 ir_data->cached_ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
3751 ir_data->cached_ga_tag = 0; in amd_ir_set_vcpu_affinity()
3758 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
3768 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3769 irte_info->index, cfg->vector, in amd_ir_update_irte()
3770 cfg->dest_apicid); in amd_ir_update_irte()
3776 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_affinity()
3777 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_affinity()
3779 struct irq_data *parent = data->parent_data; in amd_ir_set_affinity()
3780 struct amd_iommu *iommu = ir_data->iommu; in amd_ir_set_affinity() local
3783 if (!iommu) in amd_ir_set_affinity()
3784 return -ENODEV; in amd_ir_set_affinity()
3786 ret = parent->chip->irq_set_affinity(parent, mask, force); in amd_ir_set_affinity()
3790 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
3803 struct amd_ir_data *ir_data = irq_data->chip_data; in ir_compose_msi_msg()
3805 *msg = ir_data->msi_entry; in ir_compose_msi_msg()
3809 .name = "AMD-IR",
3818 .prefix = "IR-",
3822 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
3826 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3828 return -ENOMEM; in amd_iommu_create_irq_domain()
3829 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, in amd_iommu_create_irq_domain()
3830 fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
3831 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3833 return -ENOMEM; in amd_iommu_create_irq_domain()
3836 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); in amd_iommu_create_irq_domain()
3837 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | in amd_iommu_create_irq_domain()
3839 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; in amd_iommu_create_irq_domain()
3847 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_update_ga()
3850 !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_update_ga()
3853 if (!ir_data->iommu) in amd_iommu_update_ga()
3854 return -ENODEV; in amd_iommu_update_ga()
3857 entry->lo.fields_vapic.destination = in amd_iommu_update_ga()
3859 entry->hi.fields.destination = in amd_iommu_update_ga()
3862 entry->lo.fields_vapic.is_run = is_run; in amd_iommu_update_ga()
3864 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_update_ga()
3865 ir_data->irq_2_irte.index, entry); in amd_iommu_update_ga()