iommu.c (d2f85a263883b679f87ed8f911746105658e9c47) iommu.c (06c375053cefc3a2f383d200596abe5ab3fb35f9)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright © 2006-2014 Intel Corporation.
4 *
5 * Authors: David Woodhouse <dwmw2@infradead.org>,
6 * Ashok Raj <ashok.raj@intel.com>,
7 * Shaohua Li <shaohua.li@intel.com>,
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,

--- 13 unchanged lines hidden (view full) ---

22#include <linux/spinlock.h>
23#include <linux/syscore_ops.h>
24#include <linux/tboot.h>
25#include <uapi/linux/iommufd.h>
26
27#include "iommu.h"
28#include "../dma-iommu.h"
29#include "../irq_remapping.h"
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright © 2006-2014 Intel Corporation.
4 *
5 * Authors: David Woodhouse <dwmw2@infradead.org>,
6 * Ashok Raj <ashok.raj@intel.com>,
7 * Shaohua Li <shaohua.li@intel.com>,
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,

--- 13 unchanged lines hidden (view full) ---

22#include <linux/spinlock.h>
23#include <linux/syscore_ops.h>
24#include <linux/tboot.h>
25#include <uapi/linux/iommufd.h>
26
27#include "iommu.h"
28#include "../dma-iommu.h"
29#include "../irq_remapping.h"
30#include "../iommu-pages.h"
30#include "pasid.h"
31#include "cap_audit.h"
32#include "perfmon.h"
33
34#define ROOT_SIZE VTD_PAGE_SIZE
35#define CONTEXT_SIZE VTD_PAGE_SIZE
36
37#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)

--- 255 unchanged lines hidden (view full) ---

293 while (*str == ',')
294 str++;
295 }
296
297 return 1;
298}
299__setup("intel_iommu=", intel_iommu_setup);
300
31#include "pasid.h"
32#include "cap_audit.h"
33#include "perfmon.h"
34
35#define ROOT_SIZE VTD_PAGE_SIZE
36#define CONTEXT_SIZE VTD_PAGE_SIZE
37
38#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)

--- 255 unchanged lines hidden (view full) ---

294 while (*str == ',')
295 str++;
296 }
297
298 return 1;
299}
300__setup("intel_iommu=", intel_iommu_setup);
301
301void *alloc_pgtable_page(int node, gfp_t gfp)
302{
303 struct page *page;
304 void *vaddr = NULL;
305
306 page = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
307 if (page)
308 vaddr = page_address(page);
309 return vaddr;
310}
311
312void free_pgtable_page(void *vaddr)
313{
314 free_page((unsigned long)vaddr);
315}
316
317static int domain_type_is_si(struct dmar_domain *domain)
318{
319 return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
320}
321
322static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
323{
324 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;

--- 215 unchanged lines hidden (view full) ---

540 }
541 if (*entry & 1)
542 context = phys_to_virt(*entry & VTD_PAGE_MASK);
543 else {
544 unsigned long phy_addr;
545 if (!alloc)
546 return NULL;
547
302static int domain_type_is_si(struct dmar_domain *domain)
303{
304 return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
305}
306
307static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
308{
309 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;

--- 215 unchanged lines hidden (view full) ---

525 }
526 if (*entry & 1)
527 context = phys_to_virt(*entry & VTD_PAGE_MASK);
528 else {
529 unsigned long phy_addr;
530 if (!alloc)
531 return NULL;
532
548 context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
533 context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
549 if (!context)
550 return NULL;
551
552 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
553 phy_addr = virt_to_phys((void *)context);
554 *entry = phy_addr | 1;
555 __iommu_flush_cache(iommu, entry, sizeof(*entry));
556 }

--- 157 unchanged lines hidden (view full) ---

714 int i;
715
716 if (!iommu->root_entry)
717 return;
718
719 for (i = 0; i < ROOT_ENTRY_NR; i++) {
720 context = iommu_context_addr(iommu, i, 0, 0);
721 if (context)
534 if (!context)
535 return NULL;
536
537 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
538 phy_addr = virt_to_phys((void *)context);
539 *entry = phy_addr | 1;
540 __iommu_flush_cache(iommu, entry, sizeof(*entry));
541 }

--- 157 unchanged lines hidden (view full) ---

699 int i;
700
701 if (!iommu->root_entry)
702 return;
703
704 for (i = 0; i < ROOT_ENTRY_NR; i++) {
705 context = iommu_context_addr(iommu, i, 0, 0);
706 if (context)
722 free_pgtable_page(context);
707 iommu_free_page(context);
723
724 if (!sm_supported(iommu))
725 continue;
726
727 context = iommu_context_addr(iommu, i, 0x80, 0);
728 if (context)
708
709 if (!sm_supported(iommu))
710 continue;
711
712 context = iommu_context_addr(iommu, i, 0x80, 0);
713 if (context)
729 free_pgtable_page(context);
714 iommu_free_page(context);
730 }
731
715 }
716
732 free_pgtable_page(iommu->root_entry);
717 iommu_free_page(iommu->root_entry);
733 iommu->root_entry = NULL;
734}
735
736#ifdef CONFIG_DMAR_DEBUG
737static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
738 u8 bus, u8 devfn, struct dma_pte *parent, int level)
739{
740 struct dma_pte *pte;

--- 121 unchanged lines hidden (view full) ---

862 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
863 break;
864 if (level == *target_level)
865 break;
866
867 if (!dma_pte_present(pte)) {
868 uint64_t pteval;
869
718 iommu->root_entry = NULL;
719}
720
721#ifdef CONFIG_DMAR_DEBUG
722static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
723 u8 bus, u8 devfn, struct dma_pte *parent, int level)
724{
725 struct dma_pte *pte;

--- 121 unchanged lines hidden (view full) ---

847 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
848 break;
849 if (level == *target_level)
850 break;
851
852 if (!dma_pte_present(pte)) {
853 uint64_t pteval;
854
870 tmp_page = alloc_pgtable_page(domain->nid, gfp);
855 tmp_page = iommu_alloc_page_node(domain->nid, gfp);
871
872 if (!tmp_page)
873 return NULL;
874
875 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
876 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
877 if (domain->use_first_level)
878 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
879
880 if (cmpxchg64(&pte->val, 0ULL, pteval))
881 /* Someone else set it while we were thinking; use theirs. */
856
857 if (!tmp_page)
858 return NULL;
859
860 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
861 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
862 if (domain->use_first_level)
863 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
864
865 if (cmpxchg64(&pte->val, 0ULL, pteval))
866 /* Someone else set it while we were thinking; use theirs. */
882 free_pgtable_page(tmp_page);
867 iommu_free_page(tmp_page);
883 else
884 domain_flush_cache(domain, pte, sizeof(*pte));
885 }
886 if (level == 1)
887 break;
888
889 parent = phys_to_virt(dma_pte_addr(pte));
890 level--;

--- 96 unchanged lines hidden (view full) ---

987 /*
988 * Free the page table if we're below the level we want to
989 * retain and the range covers the entire table.
990 */
991 if (level < retain_level && !(start_pfn > level_pfn ||
992 last_pfn < level_pfn + level_size(level) - 1)) {
993 dma_clear_pte(pte);
994 domain_flush_cache(domain, pte, sizeof(*pte));
868 else
869 domain_flush_cache(domain, pte, sizeof(*pte));
870 }
871 if (level == 1)
872 break;
873
874 parent = phys_to_virt(dma_pte_addr(pte));
875 level--;

--- 96 unchanged lines hidden (view full) ---

972 /*
973 * Free the page table if we're below the level we want to
974 * retain and the range covers the entire table.
975 */
976 if (level < retain_level && !(start_pfn > level_pfn ||
977 last_pfn < level_pfn + level_size(level) - 1)) {
978 dma_clear_pte(pte);
979 domain_flush_cache(domain, pte, sizeof(*pte));
995 free_pgtable_page(level_pte);
980 iommu_free_page(level_pte);
996 }
997next:
998 pfn += level_size(level);
999 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1000}
1001
1002/*
1003 * clear last level (leaf) ptes and free page table pages below the

--- 7 unchanged lines hidden (view full) ---

1011 dma_pte_clear_range(domain, start_pfn, last_pfn);
1012
1013 /* We don't need lock here; nobody else touches the iova range */
1014 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1015 domain->pgd, 0, start_pfn, last_pfn);
1016
1017 /* free pgd */
1018 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
981 }
982next:
983 pfn += level_size(level);
984 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
985}
986
987/*
988 * clear last level (leaf) ptes and free page table pages below the

--- 7 unchanged lines hidden (view full) ---

996 dma_pte_clear_range(domain, start_pfn, last_pfn);
997
998 /* We don't need lock here; nobody else touches the iova range */
999 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1000 domain->pgd, 0, start_pfn, last_pfn);
1001
1002 /* free pgd */
1003 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1019 free_pgtable_page(domain->pgd);
1004 iommu_free_page(domain->pgd);
1020 domain->pgd = NULL;
1021 }
1022}
1023
1024/* When a page at a given level is being unlinked from its parent, we don't
1025 need to *modify* it at all. All we need to do is make a list of all the
1026 pages which can be freed just as soon as we've flushed the IOTLB and we
1027 know the hardware page-walk will no longer touch them.

--- 85 unchanged lines hidden (view full) ---

1113 }
1114}
1115
1116/* iommu handling */
1117static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1118{
1119 struct root_entry *root;
1120
1005 domain->pgd = NULL;
1006 }
1007}
1008
1009/* When a page at a given level is being unlinked from its parent, we don't
1010 need to *modify* it at all. All we need to do is make a list of all the
1011 pages which can be freed just as soon as we've flushed the IOTLB and we
1012 know the hardware page-walk will no longer touch them.

--- 85 unchanged lines hidden (view full) ---

1098 }
1099}
1100
1101/* iommu handling */
1102static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1103{
1104 struct root_entry *root;
1105
1121 root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
1106 root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
1122 if (!root) {
1123 pr_err("Allocating root entry for %s failed\n",
1124 iommu->name);
1125 return -ENOMEM;
1126 }
1127
1128 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1129 iommu->root_entry = root;

--- 706 unchanged lines hidden (view full) ---

1836}
1837
1838static void domain_exit(struct dmar_domain *domain)
1839{
1840 if (domain->pgd) {
1841 LIST_HEAD(freelist);
1842
1843 domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
1107 if (!root) {
1108 pr_err("Allocating root entry for %s failed\n",
1109 iommu->name);
1110 return -ENOMEM;
1111 }
1112
1113 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1114 iommu->root_entry = root;

--- 706 unchanged lines hidden (view full) ---

1821}
1822
1823static void domain_exit(struct dmar_domain *domain)
1824{
1825 if (domain->pgd) {
1826 LIST_HEAD(freelist);
1827
1828 domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
1844 put_pages_list(&freelist);
1829 iommu_put_pages_list(&freelist);
1845 }
1846
1847 if (WARN_ON(!list_empty(&domain->devices)))
1848 return;
1849
1850 kfree(domain);
1851}
1852

--- 639 unchanged lines hidden (view full) ---

2492 }
2493
2494 ret = -ENOMEM;
2495 old_ce = memremap(old_ce_phys, PAGE_SIZE,
2496 MEMREMAP_WB);
2497 if (!old_ce)
2498 goto out;
2499
1830 }
1831
1832 if (WARN_ON(!list_empty(&domain->devices)))
1833 return;
1834
1835 kfree(domain);
1836}
1837

--- 639 unchanged lines hidden (view full) ---

2477 }
2478
2479 ret = -ENOMEM;
2480 old_ce = memremap(old_ce_phys, PAGE_SIZE,
2481 MEMREMAP_WB);
2482 if (!old_ce)
2483 goto out;
2484
2500 new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
2485 new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
2501 if (!new_ce)
2502 goto out_unmap;
2503
2504 ret = 0;
2505 }
2506
2507 /* Now copy the context entry */
2508 memcpy(&ce, old_ce + idx, sizeof(ce));

--- 912 unchanged lines hidden (view full) ---

3421 domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
3422
3423 rcu_read_lock();
3424 for_each_active_iommu(iommu, drhd)
3425 iommu_flush_iotlb_psi(iommu, si_domain,
3426 start_vpfn, mhp->nr_pages,
3427 list_empty(&freelist), 0);
3428 rcu_read_unlock();
2486 if (!new_ce)
2487 goto out_unmap;
2488
2489 ret = 0;
2490 }
2491
2492 /* Now copy the context entry */
2493 memcpy(&ce, old_ce + idx, sizeof(ce));

--- 912 unchanged lines hidden (view full) ---

3406 domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
3407
3408 rcu_read_lock();
3409 for_each_active_iommu(iommu, drhd)
3410 iommu_flush_iotlb_psi(iommu, si_domain,
3411 start_vpfn, mhp->nr_pages,
3412 list_empty(&freelist), 0);
3413 rcu_read_unlock();
3429 put_pages_list(&freelist);
3414 iommu_put_pages_list(&freelist);
3430 }
3431 break;
3432 }
3433
3434 return NOTIFY_OK;
3435}
3436
3437static struct notifier_block intel_iommu_memory_nb = {

--- 390 unchanged lines hidden (view full) ---

3828 adjust_width = guestwidth_to_adjustwidth(guest_width);
3829 domain->agaw = width_to_agaw(adjust_width);
3830
3831 domain->iommu_coherency = false;
3832 domain->iommu_superpage = 0;
3833 domain->max_addr = 0;
3834
3835 /* always allocate the top pgd */
3415 }
3416 break;
3417 }
3418
3419 return NOTIFY_OK;
3420}
3421
3422static struct notifier_block intel_iommu_memory_nb = {

--- 390 unchanged lines hidden (view full) ---

3813 adjust_width = guestwidth_to_adjustwidth(guest_width);
3814 domain->agaw = width_to_agaw(adjust_width);
3815
3816 domain->iommu_coherency = false;
3817 domain->iommu_superpage = 0;
3818 domain->max_addr = 0;
3819
3820 /* always allocate the top pgd */
3836 domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
3821 domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC);
3837 if (!domain->pgd)
3838 return -ENOMEM;
3839 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3840 return 0;
3841}
3842
3843static int blocking_domain_attach_dev(struct iommu_domain *domain,
3844 struct device *dev)

--- 137 unchanged lines hidden (view full) ---

3982 * Knock out extra levels of page tables if necessary
3983 */
3984 while (iommu->agaw < dmar_domain->agaw) {
3985 struct dma_pte *pte;
3986
3987 pte = dmar_domain->pgd;
3988 if (dma_pte_present(pte)) {
3989 dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte));
3822 if (!domain->pgd)
3823 return -ENOMEM;
3824 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3825 return 0;
3826}
3827
3828static int blocking_domain_attach_dev(struct iommu_domain *domain,
3829 struct device *dev)

--- 137 unchanged lines hidden (view full) ---

3967 * Knock out extra levels of page tables if necessary
3968 */
3969 while (iommu->agaw < dmar_domain->agaw) {
3970 struct dma_pte *pte;
3971
3972 pte = dmar_domain->pgd;
3973 if (dma_pte_present(pte)) {
3974 dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte));
3990 free_pgtable_page(pte);
3975 iommu_free_page(pte);
3991 }
3992 dmar_domain->agaw--;
3993 }
3994
3995 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
3996 context_copied(iommu, info->bus, info->devfn))
3997 return intel_pasid_setup_sm_context(dev);
3998

--- 137 unchanged lines hidden (view full) ---

4136 xa_for_each(&dmar_domain->iommu_array, i, info)
4137 iommu_flush_iotlb_psi(info->iommu, dmar_domain,
4138 start_pfn, nrpages,
4139 list_empty(&gather->freelist), 0);
4140
4141 if (dmar_domain->nested_parent)
4142 parent_domain_flush(dmar_domain, start_pfn, nrpages,
4143 list_empty(&gather->freelist));
3976 }
3977 dmar_domain->agaw--;
3978 }
3979
3980 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
3981 context_copied(iommu, info->bus, info->devfn))
3982 return intel_pasid_setup_sm_context(dev);
3983

--- 137 unchanged lines hidden (view full) ---

4121 xa_for_each(&dmar_domain->iommu_array, i, info)
4122 iommu_flush_iotlb_psi(info->iommu, dmar_domain,
4123 start_pfn, nrpages,
4124 list_empty(&gather->freelist), 0);
4125
4126 if (dmar_domain->nested_parent)
4127 parent_domain_flush(dmar_domain, start_pfn, nrpages,
4128 list_empty(&gather->freelist));
4144 put_pages_list(&gather->freelist);
4129 iommu_put_pages_list(&gather->freelist);
4145}
4146
4147static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4148 dma_addr_t iova)
4149{
4150 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4151 struct dma_pte *pte;
4152 int level = 0;

--- 141 unchanged lines hidden (view full) ---

4294
4295 if (info->ats_supported && ecap_prs(iommu->ecap) &&
4296 pci_pri_supported(pdev))
4297 info->pri_supported = 1;
4298 }
4299 }
4300
4301 dev_iommu_priv_set(dev, info);
4130}
4131
4132static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4133 dma_addr_t iova)
4134{
4135 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4136 struct dma_pte *pte;
4137 int level = 0;

--- 141 unchanged lines hidden (view full) ---

4279
4280 if (info->ats_supported && ecap_prs(iommu->ecap) &&
4281 pci_pri_supported(pdev))
4282 info->pri_supported = 1;
4283 }
4284 }
4285
4286 dev_iommu_priv_set(dev, info);
4302 ret = device_rbtree_insert(iommu, info);
4303 if (ret)
4304 goto free;
4287 if (pdev && pci_ats_supported(pdev)) {
4288 ret = device_rbtree_insert(iommu, info);
4289 if (ret)
4290 goto free;
4291 }
4305
4306 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
4307 ret = intel_pasid_alloc_table(dev);
4308 if (ret) {
4309 dev_err(dev, "PASID table allocation failed\n");
4310 goto clear_rbtree;
4311 }
4312

--- 18 unchanged lines hidden (view full) ---

4331}
4332
4333static void intel_iommu_release_device(struct device *dev)
4334{
4335 struct device_domain_info *info = dev_iommu_priv_get(dev);
4336 struct intel_iommu *iommu = info->iommu;
4337
4338 mutex_lock(&iommu->iopf_lock);
4292
4293 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
4294 ret = intel_pasid_alloc_table(dev);
4295 if (ret) {
4296 dev_err(dev, "PASID table allocation failed\n");
4297 goto clear_rbtree;
4298 }
4299

--- 18 unchanged lines hidden (view full) ---

4318}
4319
4320static void intel_iommu_release_device(struct device *dev)
4321{
4322 struct device_domain_info *info = dev_iommu_priv_get(dev);
4323 struct intel_iommu *iommu = info->iommu;
4324
4325 mutex_lock(&iommu->iopf_lock);
4339 device_rbtree_remove(info);
4326 if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
4327 device_rbtree_remove(info);
4340 mutex_unlock(&iommu->iopf_lock);
4341
4342 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
4343 !context_copied(iommu, info->bus, info->devfn))
4344 intel_pasid_teardown_sm_context(dev);
4345
4346 intel_pasid_free_table(dev);
4347 intel_iommu_debugfs_remove_dev(info);

--- 234 unchanged lines hidden (view full) ---

4582 struct iommu_domain_info *info;
4583 unsigned long i;
4584
4585 xa_for_each(&dmar_domain->iommu_array, i, info)
4586 __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
4587 return 0;
4588}
4589
4328 mutex_unlock(&iommu->iopf_lock);
4329
4330 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
4331 !context_copied(iommu, info->bus, info->devfn))
4332 intel_pasid_teardown_sm_context(dev);
4333
4334 intel_pasid_free_table(dev);
4335 intel_iommu_debugfs_remove_dev(info);

--- 234 unchanged lines hidden (view full) ---

4570 struct iommu_domain_info *info;
4571 unsigned long i;
4572
4573 xa_for_each(&dmar_domain->iommu_array, i, info)
4574 __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
4575 return 0;
4576}
4577
4590static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
4591 struct iommu_domain *domain)
4578static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
4592{
4593 struct device_domain_info *info = dev_iommu_priv_get(dev);
4579{
4580 struct device_domain_info *info = dev_iommu_priv_get(dev);
4594 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4595 struct dev_pasid_info *curr, *dev_pasid = NULL;
4596 struct intel_iommu *iommu = info->iommu;
4581 struct dev_pasid_info *curr, *dev_pasid = NULL;
4582 struct intel_iommu *iommu = info->iommu;
4583 struct dmar_domain *dmar_domain;
4584 struct iommu_domain *domain;
4597 unsigned long flags;
4598
4585 unsigned long flags;
4586
4587 domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
4588 if (WARN_ON_ONCE(!domain))
4589 goto out_tear_down;
4590
4599 /*
4600 * The SVA implementation needs to handle its own stuffs like the mm
4601 * notification. Before consolidating that code into iommu core, let
4602 * the intel sva code handle it.
4603 */
4604 if (domain->type == IOMMU_DOMAIN_SVA) {
4605 intel_svm_remove_dev_pasid(dev, pasid);
4606 goto out_tear_down;
4607 }
4608
4591 /*
4592 * The SVA implementation needs to handle its own stuffs like the mm
4593 * notification. Before consolidating that code into iommu core, let
4594 * the intel sva code handle it.
4595 */
4596 if (domain->type == IOMMU_DOMAIN_SVA) {
4597 intel_svm_remove_dev_pasid(dev, pasid);
4598 goto out_tear_down;
4599 }
4600
4601 dmar_domain = to_dmar_domain(domain);
4609 spin_lock_irqsave(&dmar_domain->lock, flags);
4610 list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
4611 if (curr->dev == dev && curr->pasid == pasid) {
4612 list_del(&curr->link_domain);
4613 dev_pasid = curr;
4614 break;
4615 }
4616 }

--- 536 unchanged lines hidden ---
4602 spin_lock_irqsave(&dmar_domain->lock, flags);
4603 list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
4604 if (curr->dev == dev && curr->pasid == pasid) {
4605 list_del(&curr->link_domain);
4606 dev_pasid = curr;
4607 break;
4608 }
4609 }

--- 536 unchanged lines hidden ---