1672cf6dfSJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only 2672cf6dfSJoerg Roedel /* 3672cf6dfSJoerg Roedel * Copyright © 2015 Intel Corporation. 4672cf6dfSJoerg Roedel * 5672cf6dfSJoerg Roedel * Authors: David Woodhouse <dwmw2@infradead.org> 6672cf6dfSJoerg Roedel */ 7672cf6dfSJoerg Roedel 8672cf6dfSJoerg Roedel #include <linux/intel-iommu.h> 9672cf6dfSJoerg Roedel #include <linux/mmu_notifier.h> 10672cf6dfSJoerg Roedel #include <linux/sched.h> 11672cf6dfSJoerg Roedel #include <linux/sched/mm.h> 12672cf6dfSJoerg Roedel #include <linux/slab.h> 13672cf6dfSJoerg Roedel #include <linux/intel-svm.h> 14672cf6dfSJoerg Roedel #include <linux/rculist.h> 15672cf6dfSJoerg Roedel #include <linux/pci.h> 16672cf6dfSJoerg Roedel #include <linux/pci-ats.h> 17672cf6dfSJoerg Roedel #include <linux/dmar.h> 18672cf6dfSJoerg Roedel #include <linux/interrupt.h> 19672cf6dfSJoerg Roedel #include <linux/mm_types.h> 20100b8a14SLu Baolu #include <linux/xarray.h> 21672cf6dfSJoerg Roedel #include <linux/ioasid.h> 22672cf6dfSJoerg Roedel #include <asm/page.h> 2320f0afd1SFenghua Yu #include <asm/fpu/api.h> 24e93a67f5SLu Baolu #include <trace/events/intel_iommu.h> 25672cf6dfSJoerg Roedel 2602f3effdSLu Baolu #include "pasid.h" 270f4834abSLu Baolu #include "perf.h" 2840483774SLu Baolu #include "../iommu-sva-lib.h" 29672cf6dfSJoerg Roedel 30672cf6dfSJoerg Roedel static irqreturn_t prq_event_thread(int irq, void *d); 31c7b6bac9SFenghua Yu static void intel_svm_drain_prq(struct device *dev, u32 pasid); 3240483774SLu Baolu #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva) 33672cf6dfSJoerg Roedel 34100b8a14SLu Baolu static DEFINE_XARRAY_ALLOC(pasid_private_array); 35100b8a14SLu Baolu static int pasid_private_add(ioasid_t pasid, void *priv) 36100b8a14SLu Baolu { 37100b8a14SLu Baolu return xa_alloc(&pasid_private_array, &pasid, priv, 38100b8a14SLu Baolu XA_LIMIT(pasid, pasid), GFP_ATOMIC); 39100b8a14SLu Baolu } 40100b8a14SLu Baolu 41100b8a14SLu Baolu static void pasid_private_remove(ioasid_t pasid) 42100b8a14SLu Baolu { 43100b8a14SLu Baolu xa_erase(&pasid_private_array, pasid); 44100b8a14SLu Baolu } 45100b8a14SLu Baolu 46100b8a14SLu Baolu static void *pasid_private_find(ioasid_t pasid) 47100b8a14SLu Baolu { 48100b8a14SLu Baolu return xa_load(&pasid_private_array, pasid); 49100b8a14SLu Baolu } 50100b8a14SLu Baolu 519e52cc0fSLu Baolu static struct intel_svm_dev * 529e52cc0fSLu Baolu svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid) 539e52cc0fSLu Baolu { 549e52cc0fSLu Baolu struct intel_svm_dev *sdev = NULL, *t; 559e52cc0fSLu Baolu 569e52cc0fSLu Baolu rcu_read_lock(); 579e52cc0fSLu Baolu list_for_each_entry_rcu(t, &svm->devs, list) { 589e52cc0fSLu Baolu if (t->sid == sid) { 599e52cc0fSLu Baolu sdev = t; 609e52cc0fSLu Baolu break; 619e52cc0fSLu Baolu } 629e52cc0fSLu Baolu } 639e52cc0fSLu Baolu rcu_read_unlock(); 649e52cc0fSLu Baolu 659e52cc0fSLu Baolu return sdev; 669e52cc0fSLu Baolu } 679e52cc0fSLu Baolu 689e52cc0fSLu Baolu static struct intel_svm_dev * 699e52cc0fSLu Baolu svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev) 709e52cc0fSLu Baolu { 719e52cc0fSLu Baolu struct intel_svm_dev *sdev = NULL, *t; 729e52cc0fSLu Baolu 739e52cc0fSLu Baolu rcu_read_lock(); 749e52cc0fSLu Baolu list_for_each_entry_rcu(t, &svm->devs, list) { 759e52cc0fSLu Baolu if (t->dev == dev) { 769e52cc0fSLu Baolu sdev = t; 779e52cc0fSLu Baolu break; 789e52cc0fSLu Baolu } 799e52cc0fSLu Baolu } 809e52cc0fSLu Baolu rcu_read_unlock(); 819e52cc0fSLu Baolu 829e52cc0fSLu Baolu return sdev; 839e52cc0fSLu Baolu } 849e52cc0fSLu Baolu 85672cf6dfSJoerg Roedel int intel_svm_enable_prq(struct intel_iommu *iommu) 86672cf6dfSJoerg Roedel { 874c82b886SLu Baolu struct iopf_queue *iopfq; 88672cf6dfSJoerg Roedel struct page *pages; 89672cf6dfSJoerg Roedel int irq, ret; 90672cf6dfSJoerg Roedel 91672cf6dfSJoerg Roedel pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); 92672cf6dfSJoerg Roedel if (!pages) { 93672cf6dfSJoerg Roedel pr_warn("IOMMU: %s: Failed to allocate page request queue\n", 94672cf6dfSJoerg Roedel iommu->name); 95672cf6dfSJoerg Roedel return -ENOMEM; 96672cf6dfSJoerg Roedel } 97672cf6dfSJoerg Roedel iommu->prq = page_address(pages); 98672cf6dfSJoerg Roedel 99672cf6dfSJoerg Roedel irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); 100672cf6dfSJoerg Roedel if (irq <= 0) { 101672cf6dfSJoerg Roedel pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", 102672cf6dfSJoerg Roedel iommu->name); 103672cf6dfSJoerg Roedel ret = -EINVAL; 1044c82b886SLu Baolu goto free_prq; 105672cf6dfSJoerg Roedel } 106672cf6dfSJoerg Roedel iommu->pr_irq = irq; 107672cf6dfSJoerg Roedel 1084c82b886SLu Baolu snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), 1094c82b886SLu Baolu "dmar%d-iopfq", iommu->seq_id); 1104c82b886SLu Baolu iopfq = iopf_queue_alloc(iommu->iopfq_name); 1114c82b886SLu Baolu if (!iopfq) { 1124c82b886SLu Baolu pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); 1134c82b886SLu Baolu ret = -ENOMEM; 1144c82b886SLu Baolu goto free_hwirq; 1154c82b886SLu Baolu } 1164c82b886SLu Baolu iommu->iopf_queue = iopfq; 1174c82b886SLu Baolu 118672cf6dfSJoerg Roedel snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); 119672cf6dfSJoerg Roedel 120672cf6dfSJoerg Roedel ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT, 121672cf6dfSJoerg Roedel iommu->prq_name, iommu); 122672cf6dfSJoerg Roedel if (ret) { 123672cf6dfSJoerg Roedel pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", 124672cf6dfSJoerg Roedel iommu->name); 1254c82b886SLu Baolu goto free_iopfq; 126672cf6dfSJoerg Roedel } 127672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); 128672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); 129672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); 130672cf6dfSJoerg Roedel 131672cf6dfSJoerg Roedel init_completion(&iommu->prq_complete); 132672cf6dfSJoerg Roedel 133672cf6dfSJoerg Roedel return 0; 1344c82b886SLu Baolu 1354c82b886SLu Baolu free_iopfq: 1364c82b886SLu Baolu iopf_queue_free(iommu->iopf_queue); 1374c82b886SLu Baolu iommu->iopf_queue = NULL; 1384c82b886SLu Baolu free_hwirq: 1394c82b886SLu Baolu dmar_free_hwirq(irq); 1404c82b886SLu Baolu iommu->pr_irq = 0; 1414c82b886SLu Baolu free_prq: 1424c82b886SLu Baolu free_pages((unsigned long)iommu->prq, PRQ_ORDER); 1434c82b886SLu Baolu iommu->prq = NULL; 1444c82b886SLu Baolu 1454c82b886SLu Baolu return ret; 146672cf6dfSJoerg Roedel } 147672cf6dfSJoerg Roedel 148672cf6dfSJoerg Roedel int intel_svm_finish_prq(struct intel_iommu *iommu) 149672cf6dfSJoerg Roedel { 150672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); 151672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); 152672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); 153672cf6dfSJoerg Roedel 154672cf6dfSJoerg Roedel if (iommu->pr_irq) { 155672cf6dfSJoerg Roedel free_irq(iommu->pr_irq, iommu); 156672cf6dfSJoerg Roedel dmar_free_hwirq(iommu->pr_irq); 157672cf6dfSJoerg Roedel iommu->pr_irq = 0; 158672cf6dfSJoerg Roedel } 159672cf6dfSJoerg Roedel 1604c82b886SLu Baolu if (iommu->iopf_queue) { 1614c82b886SLu Baolu iopf_queue_free(iommu->iopf_queue); 1624c82b886SLu Baolu iommu->iopf_queue = NULL; 1634c82b886SLu Baolu } 1644c82b886SLu Baolu 165672cf6dfSJoerg Roedel free_pages((unsigned long)iommu->prq, PRQ_ORDER); 166672cf6dfSJoerg Roedel iommu->prq = NULL; 167672cf6dfSJoerg Roedel 168672cf6dfSJoerg Roedel return 0; 169672cf6dfSJoerg Roedel } 170672cf6dfSJoerg Roedel 171672cf6dfSJoerg Roedel static inline bool intel_svm_capable(struct intel_iommu *iommu) 172672cf6dfSJoerg Roedel { 173672cf6dfSJoerg Roedel return iommu->flags & VTD_FLAG_SVM_CAPABLE; 174672cf6dfSJoerg Roedel } 175672cf6dfSJoerg Roedel 176672cf6dfSJoerg Roedel void intel_svm_check(struct intel_iommu *iommu) 177672cf6dfSJoerg Roedel { 178672cf6dfSJoerg Roedel if (!pasid_supported(iommu)) 179672cf6dfSJoerg Roedel return; 180672cf6dfSJoerg Roedel 181672cf6dfSJoerg Roedel if (cpu_feature_enabled(X86_FEATURE_GBPAGES) && 182672cf6dfSJoerg Roedel !cap_fl1gp_support(iommu->cap)) { 183672cf6dfSJoerg Roedel pr_err("%s SVM disabled, incompatible 1GB page capability\n", 184672cf6dfSJoerg Roedel iommu->name); 185672cf6dfSJoerg Roedel return; 186672cf6dfSJoerg Roedel } 187672cf6dfSJoerg Roedel 188672cf6dfSJoerg Roedel if (cpu_feature_enabled(X86_FEATURE_LA57) && 189672cf6dfSJoerg Roedel !cap_5lp_support(iommu->cap)) { 190672cf6dfSJoerg Roedel pr_err("%s SVM disabled, incompatible paging mode\n", 191672cf6dfSJoerg Roedel iommu->name); 192672cf6dfSJoerg Roedel return; 193672cf6dfSJoerg Roedel } 194672cf6dfSJoerg Roedel 195672cf6dfSJoerg Roedel iommu->flags |= VTD_FLAG_SVM_CAPABLE; 196672cf6dfSJoerg Roedel } 197672cf6dfSJoerg Roedel 1982d6ffc63SLu Baolu static void __flush_svm_range_dev(struct intel_svm *svm, 1992d6ffc63SLu Baolu struct intel_svm_dev *sdev, 2002d6ffc63SLu Baolu unsigned long address, 2012d6ffc63SLu Baolu unsigned long pages, int ih) 202672cf6dfSJoerg Roedel { 2039872f9bdSLu Baolu struct device_domain_info *info = get_domain_info(sdev->dev); 204672cf6dfSJoerg Roedel 2059872f9bdSLu Baolu if (WARN_ON(!pages)) 2069872f9bdSLu Baolu return; 207672cf6dfSJoerg Roedel 2089872f9bdSLu Baolu qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); 2099872f9bdSLu Baolu if (info->ats_enabled) 2109872f9bdSLu Baolu qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, 2119872f9bdSLu Baolu svm->pasid, sdev->qdep, address, 2129872f9bdSLu Baolu order_base_2(pages)); 213672cf6dfSJoerg Roedel } 214672cf6dfSJoerg Roedel 2152d6ffc63SLu Baolu static void intel_flush_svm_range_dev(struct intel_svm *svm, 2162d6ffc63SLu Baolu struct intel_svm_dev *sdev, 2172d6ffc63SLu Baolu unsigned long address, 2182d6ffc63SLu Baolu unsigned long pages, int ih) 2192d6ffc63SLu Baolu { 2202d6ffc63SLu Baolu unsigned long shift = ilog2(__roundup_pow_of_two(pages)); 2212d6ffc63SLu Baolu unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift)); 2222d6ffc63SLu Baolu unsigned long start = ALIGN_DOWN(address, align); 2232d6ffc63SLu Baolu unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align); 2242d6ffc63SLu Baolu 2252d6ffc63SLu Baolu while (start < end) { 2262d6ffc63SLu Baolu __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih); 2272d6ffc63SLu Baolu start += align; 2282d6ffc63SLu Baolu } 2292d6ffc63SLu Baolu } 2302d6ffc63SLu Baolu 231672cf6dfSJoerg Roedel static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, 232672cf6dfSJoerg Roedel unsigned long pages, int ih) 233672cf6dfSJoerg Roedel { 234672cf6dfSJoerg Roedel struct intel_svm_dev *sdev; 235672cf6dfSJoerg Roedel 236672cf6dfSJoerg Roedel rcu_read_lock(); 237672cf6dfSJoerg Roedel list_for_each_entry_rcu(sdev, &svm->devs, list) 238672cf6dfSJoerg Roedel intel_flush_svm_range_dev(svm, sdev, address, pages, ih); 239672cf6dfSJoerg Roedel rcu_read_unlock(); 240672cf6dfSJoerg Roedel } 241672cf6dfSJoerg Roedel 242672cf6dfSJoerg Roedel /* Pages have been freed at this point */ 243672cf6dfSJoerg Roedel static void intel_invalidate_range(struct mmu_notifier *mn, 244672cf6dfSJoerg Roedel struct mm_struct *mm, 245672cf6dfSJoerg Roedel unsigned long start, unsigned long end) 246672cf6dfSJoerg Roedel { 247672cf6dfSJoerg Roedel struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 248672cf6dfSJoerg Roedel 249672cf6dfSJoerg Roedel intel_flush_svm_range(svm, start, 250672cf6dfSJoerg Roedel (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); 251672cf6dfSJoerg Roedel } 252672cf6dfSJoerg Roedel 253672cf6dfSJoerg Roedel static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 254672cf6dfSJoerg Roedel { 255672cf6dfSJoerg Roedel struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 256672cf6dfSJoerg Roedel struct intel_svm_dev *sdev; 257672cf6dfSJoerg Roedel 258672cf6dfSJoerg Roedel /* This might end up being called from exit_mmap(), *before* the page 259672cf6dfSJoerg Roedel * tables are cleared. And __mmu_notifier_release() will delete us from 260672cf6dfSJoerg Roedel * the list of notifiers so that our invalidate_range() callback doesn't 261672cf6dfSJoerg Roedel * get called when the page tables are cleared. So we need to protect 262672cf6dfSJoerg Roedel * against hardware accessing those page tables. 263672cf6dfSJoerg Roedel * 264672cf6dfSJoerg Roedel * We do it by clearing the entry in the PASID table and then flushing 265672cf6dfSJoerg Roedel * the IOTLB and the PASID table caches. This might upset hardware; 266672cf6dfSJoerg Roedel * perhaps we'll want to point the PASID to a dummy PGD (like the zero 267672cf6dfSJoerg Roedel * page) so that we end up taking a fault that the hardware really 268672cf6dfSJoerg Roedel * *has* to handle gracefully without affecting other processes. 269672cf6dfSJoerg Roedel */ 270672cf6dfSJoerg Roedel rcu_read_lock(); 271672cf6dfSJoerg Roedel list_for_each_entry_rcu(sdev, &svm->devs, list) 2729ad9f45bSLiu Yi L intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, 273672cf6dfSJoerg Roedel svm->pasid, true); 274672cf6dfSJoerg Roedel rcu_read_unlock(); 275672cf6dfSJoerg Roedel 276672cf6dfSJoerg Roedel } 277672cf6dfSJoerg Roedel 278672cf6dfSJoerg Roedel static const struct mmu_notifier_ops intel_mmuops = { 279672cf6dfSJoerg Roedel .release = intel_mm_release, 280672cf6dfSJoerg Roedel .invalidate_range = intel_invalidate_range, 281672cf6dfSJoerg Roedel }; 282672cf6dfSJoerg Roedel 283672cf6dfSJoerg Roedel static DEFINE_MUTEX(pasid_mutex); 284672cf6dfSJoerg Roedel 28519abcf70SLu Baolu static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid, 28619abcf70SLu Baolu struct intel_svm **rsvm, 28719abcf70SLu Baolu struct intel_svm_dev **rsdev) 28819abcf70SLu Baolu { 2899e52cc0fSLu Baolu struct intel_svm_dev *sdev = NULL; 29019abcf70SLu Baolu struct intel_svm *svm; 29119abcf70SLu Baolu 29219abcf70SLu Baolu /* The caller should hold the pasid_mutex lock */ 29319abcf70SLu Baolu if (WARN_ON(!mutex_is_locked(&pasid_mutex))) 29419abcf70SLu Baolu return -EINVAL; 29519abcf70SLu Baolu 29619abcf70SLu Baolu if (pasid == INVALID_IOASID || pasid >= PASID_MAX) 29719abcf70SLu Baolu return -EINVAL; 29819abcf70SLu Baolu 299100b8a14SLu Baolu svm = pasid_private_find(pasid); 30019abcf70SLu Baolu if (IS_ERR(svm)) 30119abcf70SLu Baolu return PTR_ERR(svm); 30219abcf70SLu Baolu 30319abcf70SLu Baolu if (!svm) 30419abcf70SLu Baolu goto out; 30519abcf70SLu Baolu 30619abcf70SLu Baolu /* 30719abcf70SLu Baolu * If we found svm for the PASID, there must be at least one device 30819abcf70SLu Baolu * bond. 30919abcf70SLu Baolu */ 31019abcf70SLu Baolu if (WARN_ON(list_empty(&svm->devs))) 31119abcf70SLu Baolu return -EINVAL; 3129e52cc0fSLu Baolu sdev = svm_lookup_device_by_dev(svm, dev); 31319abcf70SLu Baolu 31419abcf70SLu Baolu out: 31519abcf70SLu Baolu *rsvm = svm; 31619abcf70SLu Baolu *rsdev = sdev; 31719abcf70SLu Baolu 31819abcf70SLu Baolu return 0; 31919abcf70SLu Baolu } 32019abcf70SLu Baolu 321672cf6dfSJoerg Roedel int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, 322672cf6dfSJoerg Roedel struct iommu_gpasid_bind_data *data) 323672cf6dfSJoerg Roedel { 324dd6692f1SLu Baolu struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); 32519abcf70SLu Baolu struct intel_svm_dev *sdev = NULL; 326672cf6dfSJoerg Roedel struct dmar_domain *dmar_domain; 327eea4e29aSLiu Yi L struct device_domain_info *info; 32819abcf70SLu Baolu struct intel_svm *svm = NULL; 329420d42f6SLu Baolu unsigned long iflags; 330672cf6dfSJoerg Roedel int ret = 0; 331672cf6dfSJoerg Roedel 332672cf6dfSJoerg Roedel if (WARN_ON(!iommu) || !data) 333672cf6dfSJoerg Roedel return -EINVAL; 334672cf6dfSJoerg Roedel 3356278eecbSJacob Pan if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD) 3366278eecbSJacob Pan return -EINVAL; 3376278eecbSJacob Pan 3386278eecbSJacob Pan /* IOMMU core ensures argsz is more than the start of the union */ 3396278eecbSJacob Pan if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd)) 3406278eecbSJacob Pan return -EINVAL; 3416278eecbSJacob Pan 3426278eecbSJacob Pan /* Make sure no undefined flags are used in vendor data */ 3436278eecbSJacob Pan if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1)) 344672cf6dfSJoerg Roedel return -EINVAL; 345672cf6dfSJoerg Roedel 346672cf6dfSJoerg Roedel if (!dev_is_pci(dev)) 347672cf6dfSJoerg Roedel return -ENOTSUPP; 348672cf6dfSJoerg Roedel 349672cf6dfSJoerg Roedel /* VT-d supports devices with full 20 bit PASIDs only */ 350672cf6dfSJoerg Roedel if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX) 351672cf6dfSJoerg Roedel return -EINVAL; 352672cf6dfSJoerg Roedel 353672cf6dfSJoerg Roedel /* 354672cf6dfSJoerg Roedel * We only check host PASID range, we have no knowledge to check 355672cf6dfSJoerg Roedel * guest PASID range. 356672cf6dfSJoerg Roedel */ 357672cf6dfSJoerg Roedel if (data->hpasid <= 0 || data->hpasid >= PASID_MAX) 358672cf6dfSJoerg Roedel return -EINVAL; 359672cf6dfSJoerg Roedel 360eea4e29aSLiu Yi L info = get_domain_info(dev); 361eea4e29aSLiu Yi L if (!info) 362eea4e29aSLiu Yi L return -EINVAL; 363eea4e29aSLiu Yi L 364672cf6dfSJoerg Roedel dmar_domain = to_dmar_domain(domain); 365672cf6dfSJoerg Roedel 366672cf6dfSJoerg Roedel mutex_lock(&pasid_mutex); 36719abcf70SLu Baolu ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev); 36819abcf70SLu Baolu if (ret) 369672cf6dfSJoerg Roedel goto out; 370672cf6dfSJoerg Roedel 37119abcf70SLu Baolu if (sdev) { 372672cf6dfSJoerg Roedel /* 373d315e9e6SJacob Pan * Do not allow multiple bindings of the same device-PASID since 374d315e9e6SJacob Pan * there is only one SL page tables per PASID. We may revisit 375d315e9e6SJacob Pan * once sharing PGD across domains are supported. 376672cf6dfSJoerg Roedel */ 37719abcf70SLu Baolu dev_warn_ratelimited(dev, "Already bound with PASID %u\n", 378672cf6dfSJoerg Roedel svm->pasid); 379672cf6dfSJoerg Roedel ret = -EBUSY; 380672cf6dfSJoerg Roedel goto out; 381672cf6dfSJoerg Roedel } 38219abcf70SLu Baolu 38319abcf70SLu Baolu if (!svm) { 384672cf6dfSJoerg Roedel /* We come here when PASID has never been bond to a device. */ 385672cf6dfSJoerg Roedel svm = kzalloc(sizeof(*svm), GFP_KERNEL); 386672cf6dfSJoerg Roedel if (!svm) { 387672cf6dfSJoerg Roedel ret = -ENOMEM; 388672cf6dfSJoerg Roedel goto out; 389672cf6dfSJoerg Roedel } 390672cf6dfSJoerg Roedel /* REVISIT: upper layer/VFIO can track host process that bind 391672cf6dfSJoerg Roedel * the PASID. ioasid_set = mm might be sufficient for vfio to 392672cf6dfSJoerg Roedel * check pasid VMM ownership. We can drop the following line 393672cf6dfSJoerg Roedel * once VFIO and IOASID set check is in place. 394672cf6dfSJoerg Roedel */ 395672cf6dfSJoerg Roedel svm->mm = get_task_mm(current); 396672cf6dfSJoerg Roedel svm->pasid = data->hpasid; 397672cf6dfSJoerg Roedel if (data->flags & IOMMU_SVA_GPASID_VAL) { 398672cf6dfSJoerg Roedel svm->gpasid = data->gpasid; 399672cf6dfSJoerg Roedel svm->flags |= SVM_FLAG_GUEST_PASID; 400672cf6dfSJoerg Roedel } 401100b8a14SLu Baolu pasid_private_add(data->hpasid, svm); 402672cf6dfSJoerg Roedel INIT_LIST_HEAD_RCU(&svm->devs); 403672cf6dfSJoerg Roedel mmput(svm->mm); 404672cf6dfSJoerg Roedel } 405672cf6dfSJoerg Roedel sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); 406672cf6dfSJoerg Roedel if (!sdev) { 407672cf6dfSJoerg Roedel ret = -ENOMEM; 408672cf6dfSJoerg Roedel goto out; 409672cf6dfSJoerg Roedel } 410672cf6dfSJoerg Roedel sdev->dev = dev; 411eea4e29aSLiu Yi L sdev->sid = PCI_DEVID(info->bus, info->devfn); 4129ad9f45bSLiu Yi L sdev->iommu = iommu; 413672cf6dfSJoerg Roedel 414672cf6dfSJoerg Roedel /* Only count users if device has aux domains */ 415672cf6dfSJoerg Roedel if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) 416672cf6dfSJoerg Roedel sdev->users = 1; 417672cf6dfSJoerg Roedel 418672cf6dfSJoerg Roedel /* Set up device context entry for PASID if not enabled already */ 419672cf6dfSJoerg Roedel ret = intel_iommu_enable_pasid(iommu, sdev->dev); 420672cf6dfSJoerg Roedel if (ret) { 421672cf6dfSJoerg Roedel dev_err_ratelimited(dev, "Failed to enable PASID capability\n"); 422672cf6dfSJoerg Roedel kfree(sdev); 423672cf6dfSJoerg Roedel goto out; 424672cf6dfSJoerg Roedel } 425672cf6dfSJoerg Roedel 426672cf6dfSJoerg Roedel /* 427672cf6dfSJoerg Roedel * PASID table is per device for better security. Therefore, for 428672cf6dfSJoerg Roedel * each bind of a new device even with an existing PASID, we need to 429672cf6dfSJoerg Roedel * call the nested mode setup function here. 430672cf6dfSJoerg Roedel */ 431420d42f6SLu Baolu spin_lock_irqsave(&iommu->lock, iflags); 432672cf6dfSJoerg Roedel ret = intel_pasid_setup_nested(iommu, dev, 433672cf6dfSJoerg Roedel (pgd_t *)(uintptr_t)data->gpgd, 4348d3bb3b8SJacob Pan data->hpasid, &data->vendor.vtd, dmar_domain, 435672cf6dfSJoerg Roedel data->addr_width); 436420d42f6SLu Baolu spin_unlock_irqrestore(&iommu->lock, iflags); 437672cf6dfSJoerg Roedel if (ret) { 438672cf6dfSJoerg Roedel dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n", 439672cf6dfSJoerg Roedel data->hpasid, ret); 440672cf6dfSJoerg Roedel /* 441672cf6dfSJoerg Roedel * PASID entry should be in cleared state if nested mode 442672cf6dfSJoerg Roedel * set up failed. So we only need to clear IOASID tracking 443672cf6dfSJoerg Roedel * data such that free call will succeed. 444672cf6dfSJoerg Roedel */ 445672cf6dfSJoerg Roedel kfree(sdev); 446672cf6dfSJoerg Roedel goto out; 447672cf6dfSJoerg Roedel } 448672cf6dfSJoerg Roedel 449672cf6dfSJoerg Roedel svm->flags |= SVM_FLAG_GUEST_MODE; 450672cf6dfSJoerg Roedel 451672cf6dfSJoerg Roedel init_rcu_head(&sdev->rcu); 452672cf6dfSJoerg Roedel list_add_rcu(&sdev->list, &svm->devs); 453672cf6dfSJoerg Roedel out: 454672cf6dfSJoerg Roedel if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) { 455100b8a14SLu Baolu pasid_private_remove(data->hpasid); 456672cf6dfSJoerg Roedel kfree(svm); 457672cf6dfSJoerg Roedel } 458672cf6dfSJoerg Roedel 459672cf6dfSJoerg Roedel mutex_unlock(&pasid_mutex); 460672cf6dfSJoerg Roedel return ret; 461672cf6dfSJoerg Roedel } 462672cf6dfSJoerg Roedel 463c7b6bac9SFenghua Yu int intel_svm_unbind_gpasid(struct device *dev, u32 pasid) 464672cf6dfSJoerg Roedel { 465dd6692f1SLu Baolu struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); 466672cf6dfSJoerg Roedel struct intel_svm_dev *sdev; 467672cf6dfSJoerg Roedel struct intel_svm *svm; 46819abcf70SLu Baolu int ret; 469672cf6dfSJoerg Roedel 470672cf6dfSJoerg Roedel if (WARN_ON(!iommu)) 471672cf6dfSJoerg Roedel return -EINVAL; 472672cf6dfSJoerg Roedel 473672cf6dfSJoerg Roedel mutex_lock(&pasid_mutex); 47419abcf70SLu Baolu ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); 47519abcf70SLu Baolu if (ret) 476672cf6dfSJoerg Roedel goto out; 477672cf6dfSJoerg Roedel 47819abcf70SLu Baolu if (sdev) { 479672cf6dfSJoerg Roedel if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) 480672cf6dfSJoerg Roedel sdev->users--; 481672cf6dfSJoerg Roedel if (!sdev->users) { 482672cf6dfSJoerg Roedel list_del_rcu(&sdev->list); 483672cf6dfSJoerg Roedel intel_pasid_tear_down_entry(iommu, dev, 484672cf6dfSJoerg Roedel svm->pasid, false); 485672cf6dfSJoerg Roedel intel_svm_drain_prq(dev, svm->pasid); 486672cf6dfSJoerg Roedel kfree_rcu(sdev, rcu); 487672cf6dfSJoerg Roedel 488672cf6dfSJoerg Roedel if (list_empty(&svm->devs)) { 489672cf6dfSJoerg Roedel /* 490672cf6dfSJoerg Roedel * We do not free the IOASID here in that 491672cf6dfSJoerg Roedel * IOMMU driver did not allocate it. 492672cf6dfSJoerg Roedel * Unlike native SVM, IOASID for guest use was 493672cf6dfSJoerg Roedel * allocated prior to the bind call. 494672cf6dfSJoerg Roedel * In any case, if the free call comes before 495672cf6dfSJoerg Roedel * the unbind, IOMMU driver will get notified 496672cf6dfSJoerg Roedel * and perform cleanup. 497672cf6dfSJoerg Roedel */ 498100b8a14SLu Baolu pasid_private_remove(pasid); 499672cf6dfSJoerg Roedel kfree(svm); 500672cf6dfSJoerg Roedel } 501672cf6dfSJoerg Roedel } 502672cf6dfSJoerg Roedel } 503672cf6dfSJoerg Roedel out: 504672cf6dfSJoerg Roedel mutex_unlock(&pasid_mutex); 505672cf6dfSJoerg Roedel return ret; 506672cf6dfSJoerg Roedel } 507672cf6dfSJoerg Roedel 50820f0afd1SFenghua Yu static void _load_pasid(void *unused) 50920f0afd1SFenghua Yu { 51020f0afd1SFenghua Yu update_pasid(); 51120f0afd1SFenghua Yu } 51220f0afd1SFenghua Yu 51320f0afd1SFenghua Yu static void load_pasid(struct mm_struct *mm, u32 pasid) 51420f0afd1SFenghua Yu { 51520f0afd1SFenghua Yu mutex_lock(&mm->context.lock); 51620f0afd1SFenghua Yu 51720f0afd1SFenghua Yu /* Update PASID MSR on all CPUs running the mm's tasks. */ 51820f0afd1SFenghua Yu on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true); 51920f0afd1SFenghua Yu 52020f0afd1SFenghua Yu mutex_unlock(&mm->context.lock); 52120f0afd1SFenghua Yu } 52220f0afd1SFenghua Yu 52340483774SLu Baolu static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, 52440483774SLu Baolu unsigned int flags) 525672cf6dfSJoerg Roedel { 52640483774SLu Baolu ioasid_t max_pasid = dev_is_pci(dev) ? 52740483774SLu Baolu pci_max_pasids(to_pci_dev(dev)) : intel_pasid_max_id; 52840483774SLu Baolu 52940483774SLu Baolu return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1); 53040483774SLu Baolu } 53140483774SLu Baolu 53240483774SLu Baolu static void intel_svm_free_pasid(struct mm_struct *mm) 53340483774SLu Baolu { 53440483774SLu Baolu iommu_sva_free_pasid(mm); 53540483774SLu Baolu } 53640483774SLu Baolu 53740483774SLu Baolu static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, 53840483774SLu Baolu struct device *dev, 53940483774SLu Baolu struct mm_struct *mm, 54040483774SLu Baolu unsigned int flags) 54140483774SLu Baolu { 54240483774SLu Baolu struct device_domain_info *info = get_domain_info(dev); 54340483774SLu Baolu unsigned long iflags, sflags; 544672cf6dfSJoerg Roedel struct intel_svm_dev *sdev; 54540483774SLu Baolu struct intel_svm *svm; 54640483774SLu Baolu int ret = 0; 547672cf6dfSJoerg Roedel 54840483774SLu Baolu svm = pasid_private_find(mm->pasid); 54940483774SLu Baolu if (!svm) { 55040483774SLu Baolu svm = kzalloc(sizeof(*svm), GFP_KERNEL); 55140483774SLu Baolu if (!svm) 55240483774SLu Baolu return ERR_PTR(-ENOMEM); 553672cf6dfSJoerg Roedel 55440483774SLu Baolu svm->pasid = mm->pasid; 55540483774SLu Baolu svm->mm = mm; 55640483774SLu Baolu svm->flags = flags; 55740483774SLu Baolu INIT_LIST_HEAD_RCU(&svm->devs); 558672cf6dfSJoerg Roedel 55940483774SLu Baolu if (!(flags & SVM_FLAG_SUPERVISOR_MODE)) { 56040483774SLu Baolu svm->notifier.ops = &intel_mmuops; 56140483774SLu Baolu ret = mmu_notifier_register(&svm->notifier, mm); 56240483774SLu Baolu if (ret) { 56340483774SLu Baolu kfree(svm); 56440483774SLu Baolu return ERR_PTR(ret); 565672cf6dfSJoerg Roedel } 566672cf6dfSJoerg Roedel } 567672cf6dfSJoerg Roedel 56840483774SLu Baolu ret = pasid_private_add(svm->pasid, svm); 56940483774SLu Baolu if (ret) { 57040483774SLu Baolu if (svm->notifier.ops) 57140483774SLu Baolu mmu_notifier_unregister(&svm->notifier, mm); 57240483774SLu Baolu kfree(svm); 57340483774SLu Baolu return ERR_PTR(ret); 57440483774SLu Baolu } 575672cf6dfSJoerg Roedel } 576672cf6dfSJoerg Roedel 577672cf6dfSJoerg Roedel /* Find the matching device in svm list */ 5789e52cc0fSLu Baolu sdev = svm_lookup_device_by_dev(svm, dev); 5799e52cc0fSLu Baolu if (sdev) { 580672cf6dfSJoerg Roedel sdev->users++; 581672cf6dfSJoerg Roedel goto success; 582672cf6dfSJoerg Roedel } 583672cf6dfSJoerg Roedel 584672cf6dfSJoerg Roedel sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); 585672cf6dfSJoerg Roedel if (!sdev) { 586672cf6dfSJoerg Roedel ret = -ENOMEM; 58740483774SLu Baolu goto free_svm; 588672cf6dfSJoerg Roedel } 58940483774SLu Baolu 590672cf6dfSJoerg Roedel sdev->dev = dev; 5919ad9f45bSLiu Yi L sdev->iommu = iommu; 592672cf6dfSJoerg Roedel sdev->did = FLPT_DEFAULT_DID; 593672cf6dfSJoerg Roedel sdev->sid = PCI_DEVID(info->bus, info->devfn); 59440483774SLu Baolu sdev->users = 1; 59540483774SLu Baolu sdev->pasid = svm->pasid; 59640483774SLu Baolu sdev->sva.dev = dev; 59740483774SLu Baolu init_rcu_head(&sdev->rcu); 598672cf6dfSJoerg Roedel if (info->ats_enabled) { 599672cf6dfSJoerg Roedel sdev->dev_iotlb = 1; 600672cf6dfSJoerg Roedel sdev->qdep = info->ats_qdep; 601672cf6dfSJoerg Roedel if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS) 602672cf6dfSJoerg Roedel sdev->qdep = 0; 603672cf6dfSJoerg Roedel } 604672cf6dfSJoerg Roedel 60540483774SLu Baolu /* Setup the pasid table: */ 60640483774SLu Baolu sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ? 60740483774SLu Baolu PASID_FLAG_SUPERVISOR_MODE : 0; 60840483774SLu Baolu sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; 609420d42f6SLu Baolu spin_lock_irqsave(&iommu->lock, iflags); 61040483774SLu Baolu ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, 61140483774SLu Baolu FLPT_DEFAULT_DID, sflags); 612420d42f6SLu Baolu spin_unlock_irqrestore(&iommu->lock, iflags); 613672cf6dfSJoerg Roedel 61440483774SLu Baolu if (ret) 61540483774SLu Baolu goto free_sdev; 61640483774SLu Baolu 61720f0afd1SFenghua Yu /* The newly allocated pasid is loaded to the mm. */ 61840483774SLu Baolu if (!(flags & SVM_FLAG_SUPERVISOR_MODE) && list_empty(&svm->devs)) 61920f0afd1SFenghua Yu load_pasid(mm, svm->pasid); 62040483774SLu Baolu 621672cf6dfSJoerg Roedel list_add_rcu(&sdev->list, &svm->devs); 622672cf6dfSJoerg Roedel success: 62340483774SLu Baolu return &sdev->sva; 62440483774SLu Baolu 62540483774SLu Baolu free_sdev: 62640483774SLu Baolu kfree(sdev); 62740483774SLu Baolu free_svm: 62840483774SLu Baolu if (list_empty(&svm->devs)) { 62940483774SLu Baolu if (svm->notifier.ops) 63040483774SLu Baolu mmu_notifier_unregister(&svm->notifier, mm); 63140483774SLu Baolu pasid_private_remove(mm->pasid); 63240483774SLu Baolu kfree(svm); 63340483774SLu Baolu } 63440483774SLu Baolu 63540483774SLu Baolu return ERR_PTR(ret); 636672cf6dfSJoerg Roedel } 637672cf6dfSJoerg Roedel 638672cf6dfSJoerg Roedel /* Caller must hold pasid_mutex */ 639c7b6bac9SFenghua Yu static int intel_svm_unbind_mm(struct device *dev, u32 pasid) 640672cf6dfSJoerg Roedel { 641672cf6dfSJoerg Roedel struct intel_svm_dev *sdev; 642672cf6dfSJoerg Roedel struct intel_iommu *iommu; 643672cf6dfSJoerg Roedel struct intel_svm *svm; 64440483774SLu Baolu struct mm_struct *mm; 645672cf6dfSJoerg Roedel int ret = -EINVAL; 646672cf6dfSJoerg Roedel 647dd6692f1SLu Baolu iommu = device_to_iommu(dev, NULL, NULL); 648672cf6dfSJoerg Roedel if (!iommu) 649672cf6dfSJoerg Roedel goto out; 650672cf6dfSJoerg Roedel 65119abcf70SLu Baolu ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); 65219abcf70SLu Baolu if (ret) 653672cf6dfSJoerg Roedel goto out; 65440483774SLu Baolu mm = svm->mm; 655672cf6dfSJoerg Roedel 65619abcf70SLu Baolu if (sdev) { 657672cf6dfSJoerg Roedel sdev->users--; 658672cf6dfSJoerg Roedel if (!sdev->users) { 659672cf6dfSJoerg Roedel list_del_rcu(&sdev->list); 660672cf6dfSJoerg Roedel /* Flush the PASID cache and IOTLB for this device. 661672cf6dfSJoerg Roedel * Note that we do depend on the hardware *not* using 662672cf6dfSJoerg Roedel * the PASID any more. Just as we depend on other 663672cf6dfSJoerg Roedel * devices never using PASIDs that they have no right 664672cf6dfSJoerg Roedel * to use. We have a *shared* PASID table, because it's 665672cf6dfSJoerg Roedel * large and has to be physically contiguous. So it's 666672cf6dfSJoerg Roedel * hard to be as defensive as we might like. */ 667672cf6dfSJoerg Roedel intel_pasid_tear_down_entry(iommu, dev, 668672cf6dfSJoerg Roedel svm->pasid, false); 669672cf6dfSJoerg Roedel intel_svm_drain_prq(dev, svm->pasid); 670672cf6dfSJoerg Roedel kfree_rcu(sdev, rcu); 671672cf6dfSJoerg Roedel 672672cf6dfSJoerg Roedel if (list_empty(&svm->devs)) { 67340483774SLu Baolu if (svm->notifier.ops) { 67440483774SLu Baolu mmu_notifier_unregister(&svm->notifier, mm); 67520f0afd1SFenghua Yu /* Clear mm's pasid. */ 67640483774SLu Baolu load_pasid(mm, PASID_DISABLED); 67720f0afd1SFenghua Yu } 678100b8a14SLu Baolu pasid_private_remove(svm->pasid); 679672cf6dfSJoerg Roedel /* We mandate that no page faults may be outstanding 680672cf6dfSJoerg Roedel * for the PASID when intel_svm_unbind_mm() is called. 681672cf6dfSJoerg Roedel * If that is not obeyed, subtle errors will happen. 682672cf6dfSJoerg Roedel * Let's make them less subtle... */ 683672cf6dfSJoerg Roedel memset(svm, 0x6b, sizeof(*svm)); 684672cf6dfSJoerg Roedel kfree(svm); 685672cf6dfSJoerg Roedel } 686672cf6dfSJoerg Roedel } 68762ef907aSFenghua Yu /* Drop a PASID reference and free it if no reference. */ 68862ef907aSFenghua Yu intel_svm_free_pasid(mm); 689672cf6dfSJoerg Roedel } 690672cf6dfSJoerg Roedel out: 691672cf6dfSJoerg Roedel return ret; 692672cf6dfSJoerg Roedel } 693672cf6dfSJoerg Roedel 694672cf6dfSJoerg Roedel /* Page request queue descriptor */ 695672cf6dfSJoerg Roedel struct page_req_dsc { 696672cf6dfSJoerg Roedel union { 697672cf6dfSJoerg Roedel struct { 698672cf6dfSJoerg Roedel u64 type:8; 699672cf6dfSJoerg Roedel u64 pasid_present:1; 700672cf6dfSJoerg Roedel u64 priv_data_present:1; 701672cf6dfSJoerg Roedel u64 rsvd:6; 702672cf6dfSJoerg Roedel u64 rid:16; 703672cf6dfSJoerg Roedel u64 pasid:20; 704672cf6dfSJoerg Roedel u64 exe_req:1; 705672cf6dfSJoerg Roedel u64 pm_req:1; 706672cf6dfSJoerg Roedel u64 rsvd2:10; 707672cf6dfSJoerg Roedel }; 708672cf6dfSJoerg Roedel u64 qw_0; 709672cf6dfSJoerg Roedel }; 710672cf6dfSJoerg Roedel union { 711672cf6dfSJoerg Roedel struct { 712672cf6dfSJoerg Roedel u64 rd_req:1; 713672cf6dfSJoerg Roedel u64 wr_req:1; 714672cf6dfSJoerg Roedel u64 lpig:1; 715672cf6dfSJoerg Roedel u64 prg_index:9; 716672cf6dfSJoerg Roedel u64 addr:52; 717672cf6dfSJoerg Roedel }; 718672cf6dfSJoerg Roedel u64 qw_1; 719672cf6dfSJoerg Roedel }; 720672cf6dfSJoerg Roedel u64 priv_data[2]; 721672cf6dfSJoerg Roedel }; 722672cf6dfSJoerg Roedel 723672cf6dfSJoerg Roedel static bool is_canonical_address(u64 addr) 724672cf6dfSJoerg Roedel { 725672cf6dfSJoerg Roedel int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); 726672cf6dfSJoerg Roedel long saddr = (long) addr; 727672cf6dfSJoerg Roedel 728672cf6dfSJoerg Roedel return (((saddr << shift) >> shift) == saddr); 729672cf6dfSJoerg Roedel } 730672cf6dfSJoerg Roedel 731672cf6dfSJoerg Roedel /** 732672cf6dfSJoerg Roedel * intel_svm_drain_prq - Drain page requests and responses for a pasid 733672cf6dfSJoerg Roedel * @dev: target device 734672cf6dfSJoerg Roedel * @pasid: pasid for draining 735672cf6dfSJoerg Roedel * 736672cf6dfSJoerg Roedel * Drain all pending page requests and responses related to @pasid in both 737672cf6dfSJoerg Roedel * software and hardware. This is supposed to be called after the device 738672cf6dfSJoerg Roedel * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB 739672cf6dfSJoerg Roedel * and DevTLB have been invalidated. 740672cf6dfSJoerg Roedel * 741672cf6dfSJoerg Roedel * It waits until all pending page requests for @pasid in the page fault 742672cf6dfSJoerg Roedel * queue are completed by the prq handling thread. Then follow the steps 743672cf6dfSJoerg Roedel * described in VT-d spec CH7.10 to drain all page requests and page 744672cf6dfSJoerg Roedel * responses pending in the hardware. 745672cf6dfSJoerg Roedel */ 746c7b6bac9SFenghua Yu static void intel_svm_drain_prq(struct device *dev, u32 pasid) 747672cf6dfSJoerg Roedel { 748672cf6dfSJoerg Roedel struct device_domain_info *info; 749672cf6dfSJoerg Roedel struct dmar_domain *domain; 750672cf6dfSJoerg Roedel struct intel_iommu *iommu; 751672cf6dfSJoerg Roedel struct qi_desc desc[3]; 752672cf6dfSJoerg Roedel struct pci_dev *pdev; 753672cf6dfSJoerg Roedel int head, tail; 754672cf6dfSJoerg Roedel u16 sid, did; 755672cf6dfSJoerg Roedel int qdep; 756672cf6dfSJoerg Roedel 757672cf6dfSJoerg Roedel info = get_domain_info(dev); 758672cf6dfSJoerg Roedel if (WARN_ON(!info || !dev_is_pci(dev))) 759672cf6dfSJoerg Roedel return; 760672cf6dfSJoerg Roedel 761672cf6dfSJoerg Roedel if (!info->pri_enabled) 762672cf6dfSJoerg Roedel return; 763672cf6dfSJoerg Roedel 764672cf6dfSJoerg Roedel iommu = info->iommu; 765672cf6dfSJoerg Roedel domain = info->domain; 766672cf6dfSJoerg Roedel pdev = to_pci_dev(dev); 767672cf6dfSJoerg Roedel sid = PCI_DEVID(info->bus, info->devfn); 768672cf6dfSJoerg Roedel did = domain->iommu_did[iommu->seq_id]; 769672cf6dfSJoerg Roedel qdep = pci_ats_queue_depth(pdev); 770672cf6dfSJoerg Roedel 771672cf6dfSJoerg Roedel /* 772672cf6dfSJoerg Roedel * Check and wait until all pending page requests in the queue are 773672cf6dfSJoerg Roedel * handled by the prq handling thread. 774672cf6dfSJoerg Roedel */ 775672cf6dfSJoerg Roedel prq_retry: 776672cf6dfSJoerg Roedel reinit_completion(&iommu->prq_complete); 777672cf6dfSJoerg Roedel tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 778672cf6dfSJoerg Roedel head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 779672cf6dfSJoerg Roedel while (head != tail) { 780672cf6dfSJoerg Roedel struct page_req_dsc *req; 781672cf6dfSJoerg Roedel 782672cf6dfSJoerg Roedel req = &iommu->prq[head / sizeof(*req)]; 783672cf6dfSJoerg Roedel if (!req->pasid_present || req->pasid != pasid) { 784672cf6dfSJoerg Roedel head = (head + sizeof(*req)) & PRQ_RING_MASK; 785672cf6dfSJoerg Roedel continue; 786672cf6dfSJoerg Roedel } 787672cf6dfSJoerg Roedel 788672cf6dfSJoerg Roedel wait_for_completion(&iommu->prq_complete); 789672cf6dfSJoerg Roedel goto prq_retry; 790672cf6dfSJoerg Roedel } 791672cf6dfSJoerg Roedel 792*6ef05051SFenghua Yu /* 793*6ef05051SFenghua Yu * A work in IO page fault workqueue may try to lock pasid_mutex now. 794*6ef05051SFenghua Yu * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for 795*6ef05051SFenghua Yu * all works in the workqueue to finish may cause deadlock. 796*6ef05051SFenghua Yu * 797*6ef05051SFenghua Yu * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev(). 798*6ef05051SFenghua Yu * Unlock it to allow the works to be handled while waiting for 799*6ef05051SFenghua Yu * them to finish. 800*6ef05051SFenghua Yu */ 801*6ef05051SFenghua Yu lockdep_assert_held(&pasid_mutex); 802*6ef05051SFenghua Yu mutex_unlock(&pasid_mutex); 803d5b9e4bfSLu Baolu iopf_queue_flush_dev(dev); 804*6ef05051SFenghua Yu mutex_lock(&pasid_mutex); 805d5b9e4bfSLu Baolu 806672cf6dfSJoerg Roedel /* 807672cf6dfSJoerg Roedel * Perform steps described in VT-d spec CH7.10 to drain page 808672cf6dfSJoerg Roedel * requests and responses in hardware. 809672cf6dfSJoerg Roedel */ 810672cf6dfSJoerg Roedel memset(desc, 0, sizeof(desc)); 811672cf6dfSJoerg Roedel desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) | 812672cf6dfSJoerg Roedel QI_IWD_FENCE | 813672cf6dfSJoerg Roedel QI_IWD_TYPE; 814672cf6dfSJoerg Roedel desc[1].qw0 = QI_EIOTLB_PASID(pasid) | 815672cf6dfSJoerg Roedel QI_EIOTLB_DID(did) | 816672cf6dfSJoerg Roedel QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | 817672cf6dfSJoerg Roedel QI_EIOTLB_TYPE; 818672cf6dfSJoerg Roedel desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) | 819672cf6dfSJoerg Roedel QI_DEV_EIOTLB_SID(sid) | 820672cf6dfSJoerg Roedel QI_DEV_EIOTLB_QDEP(qdep) | 821672cf6dfSJoerg Roedel QI_DEIOTLB_TYPE | 822672cf6dfSJoerg Roedel QI_DEV_IOTLB_PFSID(info->pfsid); 823672cf6dfSJoerg Roedel qi_retry: 824672cf6dfSJoerg Roedel reinit_completion(&iommu->prq_complete); 825672cf6dfSJoerg Roedel qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); 826672cf6dfSJoerg Roedel if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { 827672cf6dfSJoerg Roedel wait_for_completion(&iommu->prq_complete); 828672cf6dfSJoerg Roedel goto qi_retry; 829672cf6dfSJoerg Roedel } 830672cf6dfSJoerg Roedel } 831672cf6dfSJoerg Roedel 832eb8d93eaSLu Baolu static int prq_to_iommu_prot(struct page_req_dsc *req) 833eb8d93eaSLu Baolu { 834eb8d93eaSLu Baolu int prot = 0; 835eb8d93eaSLu Baolu 836eb8d93eaSLu Baolu if (req->rd_req) 837eb8d93eaSLu Baolu prot |= IOMMU_FAULT_PERM_READ; 838eb8d93eaSLu Baolu if (req->wr_req) 839eb8d93eaSLu Baolu prot |= IOMMU_FAULT_PERM_WRITE; 840eb8d93eaSLu Baolu if (req->exe_req) 841eb8d93eaSLu Baolu prot |= IOMMU_FAULT_PERM_EXEC; 842eb8d93eaSLu Baolu if (req->pm_req) 843eb8d93eaSLu Baolu prot |= IOMMU_FAULT_PERM_PRIV; 844eb8d93eaSLu Baolu 845eb8d93eaSLu Baolu return prot; 846eb8d93eaSLu Baolu } 847eb8d93eaSLu Baolu 8480f4834abSLu Baolu static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, 8490f4834abSLu Baolu struct page_req_dsc *desc) 850eb8d93eaSLu Baolu { 851eb8d93eaSLu Baolu struct iommu_fault_event event; 852eb8d93eaSLu Baolu 853eb8d93eaSLu Baolu if (!dev || !dev_is_pci(dev)) 854eb8d93eaSLu Baolu return -ENODEV; 855eb8d93eaSLu Baolu 856eb8d93eaSLu Baolu /* Fill in event data for device specific processing */ 857eb8d93eaSLu Baolu memset(&event, 0, sizeof(struct iommu_fault_event)); 858eb8d93eaSLu Baolu event.fault.type = IOMMU_FAULT_PAGE_REQ; 85903d20509SLu Baolu event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; 860eb8d93eaSLu Baolu event.fault.prm.pasid = desc->pasid; 861eb8d93eaSLu Baolu event.fault.prm.grpid = desc->prg_index; 862eb8d93eaSLu Baolu event.fault.prm.perm = prq_to_iommu_prot(desc); 863eb8d93eaSLu Baolu 864eb8d93eaSLu Baolu if (desc->lpig) 865eb8d93eaSLu Baolu event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; 866eb8d93eaSLu Baolu if (desc->pasid_present) { 867eb8d93eaSLu Baolu event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; 868eb8d93eaSLu Baolu event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 869eb8d93eaSLu Baolu } 870eb8d93eaSLu Baolu if (desc->priv_data_present) { 871eb8d93eaSLu Baolu /* 872eb8d93eaSLu Baolu * Set last page in group bit if private data is present, 873eb8d93eaSLu Baolu * page response is required as it does for LPIG. 874eb8d93eaSLu Baolu * iommu_report_device_fault() doesn't understand this vendor 875eb8d93eaSLu Baolu * specific requirement thus we set last_page as a workaround. 876eb8d93eaSLu Baolu */ 877eb8d93eaSLu Baolu event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; 878eb8d93eaSLu Baolu event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; 879606636dcSGustavo A. R. Silva event.fault.prm.private_data[0] = desc->priv_data[0]; 880606636dcSGustavo A. R. Silva event.fault.prm.private_data[1] = desc->priv_data[1]; 8810f4834abSLu Baolu } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) { 8820f4834abSLu Baolu /* 8830f4834abSLu Baolu * If the private data fields are not used by hardware, use it 8840f4834abSLu Baolu * to monitor the prq handle latency. 8850f4834abSLu Baolu */ 8860f4834abSLu Baolu event.fault.prm.private_data[0] = ktime_to_ns(ktime_get()); 887eb8d93eaSLu Baolu } 888eb8d93eaSLu Baolu 889eb8d93eaSLu Baolu return iommu_report_device_fault(dev, &event); 890eb8d93eaSLu Baolu } 891eb8d93eaSLu Baolu 892ae7f09b1SLu Baolu static void handle_bad_prq_event(struct intel_iommu *iommu, 893ae7f09b1SLu Baolu struct page_req_dsc *req, int result) 894672cf6dfSJoerg Roedel { 895ae7f09b1SLu Baolu struct qi_desc desc; 896672cf6dfSJoerg Roedel 897ae7f09b1SLu Baolu pr_err("%s: Invalid page request: %08llx %08llx\n", 898672cf6dfSJoerg Roedel iommu->name, ((unsigned long long *)req)[0], 899672cf6dfSJoerg Roedel ((unsigned long long *)req)[1]); 900672cf6dfSJoerg Roedel 901eb8d93eaSLu Baolu /* 902ae7f09b1SLu Baolu * Per VT-d spec. v3.0 ch7.7, system software must 903ae7f09b1SLu Baolu * respond with page group response if private data 904ae7f09b1SLu Baolu * is present (PDP) or last page in group (LPIG) bit 905ae7f09b1SLu Baolu * is set. This is an additional VT-d feature beyond 906ae7f09b1SLu Baolu * PCI ATS spec. 907eb8d93eaSLu Baolu */ 908ae7f09b1SLu Baolu if (!req->lpig && !req->priv_data_present) 909ae7f09b1SLu Baolu return; 910ae7f09b1SLu Baolu 911ae7f09b1SLu Baolu desc.qw0 = QI_PGRP_PASID(req->pasid) | 912ae7f09b1SLu Baolu QI_PGRP_DID(req->rid) | 913ae7f09b1SLu Baolu QI_PGRP_PASID_P(req->pasid_present) | 914ae7f09b1SLu Baolu QI_PGRP_PDP(req->priv_data_present) | 915ae7f09b1SLu Baolu QI_PGRP_RESP_CODE(result) | 916ae7f09b1SLu Baolu QI_PGRP_RESP_TYPE; 917ae7f09b1SLu Baolu desc.qw1 = QI_PGRP_IDX(req->prg_index) | 918ae7f09b1SLu Baolu QI_PGRP_LPIG(req->lpig); 919606636dcSGustavo A. R. Silva 920606636dcSGustavo A. R. Silva if (req->priv_data_present) { 921606636dcSGustavo A. R. Silva desc.qw2 = req->priv_data[0]; 922606636dcSGustavo A. R. Silva desc.qw3 = req->priv_data[1]; 923606636dcSGustavo A. R. Silva } else { 924ae7f09b1SLu Baolu desc.qw2 = 0; 925ae7f09b1SLu Baolu desc.qw3 = 0; 926606636dcSGustavo A. R. Silva } 927ae7f09b1SLu Baolu 928ae7f09b1SLu Baolu qi_submit_sync(iommu, &desc, 1, 0); 929eb8d93eaSLu Baolu } 930eb8d93eaSLu Baolu 931ae7f09b1SLu Baolu static irqreturn_t prq_event_thread(int irq, void *d) 932ae7f09b1SLu Baolu { 933ae7f09b1SLu Baolu struct intel_svm_dev *sdev = NULL; 934ae7f09b1SLu Baolu struct intel_iommu *iommu = d; 935ae7f09b1SLu Baolu struct intel_svm *svm = NULL; 936ae7f09b1SLu Baolu struct page_req_dsc *req; 937ae7f09b1SLu Baolu int head, tail, handled; 938ae7f09b1SLu Baolu u64 address; 939ae7f09b1SLu Baolu 940ae7f09b1SLu Baolu /* 941ae7f09b1SLu Baolu * Clear PPR bit before reading head/tail registers, to ensure that 942ae7f09b1SLu Baolu * we get a new interrupt if needed. 943ae7f09b1SLu Baolu */ 944ae7f09b1SLu Baolu writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); 945ae7f09b1SLu Baolu 946ae7f09b1SLu Baolu tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 947ae7f09b1SLu Baolu head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 948ae7f09b1SLu Baolu handled = (head != tail); 949ae7f09b1SLu Baolu while (head != tail) { 950ae7f09b1SLu Baolu req = &iommu->prq[head / sizeof(*req)]; 951ae7f09b1SLu Baolu address = (u64)req->addr << VTD_PAGE_SHIFT; 952ae7f09b1SLu Baolu 953ae7f09b1SLu Baolu if (unlikely(!req->pasid_present)) { 954ae7f09b1SLu Baolu pr_err("IOMMU: %s: Page request without PASID\n", 955ae7f09b1SLu Baolu iommu->name); 956ae7f09b1SLu Baolu bad_req: 957ae7f09b1SLu Baolu svm = NULL; 958ae7f09b1SLu Baolu sdev = NULL; 959ae7f09b1SLu Baolu handle_bad_prq_event(iommu, req, QI_RESP_INVALID); 960ae7f09b1SLu Baolu goto prq_advance; 961ae7f09b1SLu Baolu } 962ae7f09b1SLu Baolu 963ae7f09b1SLu Baolu if (unlikely(!is_canonical_address(address))) { 964ae7f09b1SLu Baolu pr_err("IOMMU: %s: Address is not canonical\n", 965ae7f09b1SLu Baolu iommu->name); 966ae7f09b1SLu Baolu goto bad_req; 967ae7f09b1SLu Baolu } 968ae7f09b1SLu Baolu 969ae7f09b1SLu Baolu if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) { 970ae7f09b1SLu Baolu pr_err("IOMMU: %s: Page request in Privilege Mode\n", 971ae7f09b1SLu Baolu iommu->name); 972ae7f09b1SLu Baolu goto bad_req; 973ae7f09b1SLu Baolu } 974ae7f09b1SLu Baolu 975ae7f09b1SLu Baolu if (unlikely(req->exe_req && req->rd_req)) { 976ae7f09b1SLu Baolu pr_err("IOMMU: %s: Execution request not supported\n", 977ae7f09b1SLu Baolu iommu->name); 978ae7f09b1SLu Baolu goto bad_req; 979ae7f09b1SLu Baolu } 980ae7f09b1SLu Baolu 981ae7f09b1SLu Baolu if (!svm || svm->pasid != req->pasid) { 982ae7f09b1SLu Baolu /* 983ae7f09b1SLu Baolu * It can't go away, because the driver is not permitted 984ae7f09b1SLu Baolu * to unbind the mm while any page faults are outstanding. 985ae7f09b1SLu Baolu */ 986ae7f09b1SLu Baolu svm = pasid_private_find(req->pasid); 987ae7f09b1SLu Baolu if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE)) 988ae7f09b1SLu Baolu goto bad_req; 989ae7f09b1SLu Baolu } 990ae7f09b1SLu Baolu 991ae7f09b1SLu Baolu if (!sdev || sdev->sid != req->rid) { 992ae7f09b1SLu Baolu sdev = svm_lookup_device_by_sid(svm, req->rid); 993ae7f09b1SLu Baolu if (!sdev) 994ae7f09b1SLu Baolu goto bad_req; 995ae7f09b1SLu Baolu } 996ae7f09b1SLu Baolu 997e93a67f5SLu Baolu sdev->prq_seq_number++; 998e93a67f5SLu Baolu 999ae7f09b1SLu Baolu /* 1000ae7f09b1SLu Baolu * If prq is to be handled outside iommu driver via receiver of 1001ae7f09b1SLu Baolu * the fault notifiers, we skip the page response here. 1002ae7f09b1SLu Baolu */ 10030f4834abSLu Baolu if (intel_svm_prq_report(iommu, sdev->dev, req)) 1004d5b9e4bfSLu Baolu handle_bad_prq_event(iommu, req, QI_RESP_INVALID); 1005e93a67f5SLu Baolu 1006e93a67f5SLu Baolu trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1, 1007e93a67f5SLu Baolu req->priv_data[0], req->priv_data[1], 1008e93a67f5SLu Baolu sdev->prq_seq_number); 1009eb8d93eaSLu Baolu prq_advance: 1010672cf6dfSJoerg Roedel head = (head + sizeof(*req)) & PRQ_RING_MASK; 1011672cf6dfSJoerg Roedel } 1012672cf6dfSJoerg Roedel 1013672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); 1014672cf6dfSJoerg Roedel 1015672cf6dfSJoerg Roedel /* 1016672cf6dfSJoerg Roedel * Clear the page request overflow bit and wake up all threads that 1017672cf6dfSJoerg Roedel * are waiting for the completion of this handling. 1018672cf6dfSJoerg Roedel */ 101928a77185SLu Baolu if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { 102028a77185SLu Baolu pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", 102128a77185SLu Baolu iommu->name); 102228a77185SLu Baolu head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 102328a77185SLu Baolu tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 102428a77185SLu Baolu if (head == tail) { 1025d5b9e4bfSLu Baolu iopf_queue_discard_partial(iommu->iopf_queue); 1026672cf6dfSJoerg Roedel writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); 102728a77185SLu Baolu pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", 102828a77185SLu Baolu iommu->name); 102928a77185SLu Baolu } 103028a77185SLu Baolu } 1031672cf6dfSJoerg Roedel 1032672cf6dfSJoerg Roedel if (!completion_done(&iommu->prq_complete)) 1033672cf6dfSJoerg Roedel complete(&iommu->prq_complete); 1034672cf6dfSJoerg Roedel 1035672cf6dfSJoerg Roedel return IRQ_RETVAL(handled); 1036672cf6dfSJoerg Roedel } 1037672cf6dfSJoerg Roedel 103840483774SLu Baolu struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata) 1039672cf6dfSJoerg Roedel { 104040483774SLu Baolu struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); 10412a5054c6SFenghua Yu unsigned int flags = 0; 104240483774SLu Baolu struct iommu_sva *sva; 1043672cf6dfSJoerg Roedel int ret; 1044672cf6dfSJoerg Roedel 1045672cf6dfSJoerg Roedel if (drvdata) 10462a5054c6SFenghua Yu flags = *(unsigned int *)drvdata; 1047672cf6dfSJoerg Roedel 104840483774SLu Baolu if (flags & SVM_FLAG_SUPERVISOR_MODE) { 104940483774SLu Baolu if (!ecap_srs(iommu->ecap)) { 105040483774SLu Baolu dev_err(dev, "%s: Supervisor PASID not supported\n", 105140483774SLu Baolu iommu->name); 105240483774SLu Baolu return ERR_PTR(-EOPNOTSUPP); 105340483774SLu Baolu } 105440483774SLu Baolu 105540483774SLu Baolu if (mm) { 105640483774SLu Baolu dev_err(dev, "%s: Supervisor PASID with user provided mm\n", 105740483774SLu Baolu iommu->name); 105840483774SLu Baolu return ERR_PTR(-EINVAL); 105940483774SLu Baolu } 106040483774SLu Baolu 106140483774SLu Baolu mm = &init_mm; 106240483774SLu Baolu } 106340483774SLu Baolu 106440483774SLu Baolu mutex_lock(&pasid_mutex); 106540483774SLu Baolu ret = intel_svm_alloc_pasid(dev, mm, flags); 106640483774SLu Baolu if (ret) { 106740483774SLu Baolu mutex_unlock(&pasid_mutex); 106840483774SLu Baolu return ERR_PTR(ret); 106940483774SLu Baolu } 107040483774SLu Baolu 107140483774SLu Baolu sva = intel_svm_bind_mm(iommu, dev, mm, flags); 107240483774SLu Baolu if (IS_ERR_OR_NULL(sva)) 107340483774SLu Baolu intel_svm_free_pasid(mm); 1074672cf6dfSJoerg Roedel mutex_unlock(&pasid_mutex); 1075672cf6dfSJoerg Roedel 1076672cf6dfSJoerg Roedel return sva; 1077672cf6dfSJoerg Roedel } 1078672cf6dfSJoerg Roedel 1079672cf6dfSJoerg Roedel void intel_svm_unbind(struct iommu_sva *sva) 1080672cf6dfSJoerg Roedel { 108140483774SLu Baolu struct intel_svm_dev *sdev = to_intel_svm_dev(sva); 1082672cf6dfSJoerg Roedel 1083672cf6dfSJoerg Roedel mutex_lock(&pasid_mutex); 1084672cf6dfSJoerg Roedel intel_svm_unbind_mm(sdev->dev, sdev->pasid); 1085672cf6dfSJoerg Roedel mutex_unlock(&pasid_mutex); 1086672cf6dfSJoerg Roedel } 1087672cf6dfSJoerg Roedel 1088c7b6bac9SFenghua Yu u32 intel_svm_get_pasid(struct iommu_sva *sva) 1089672cf6dfSJoerg Roedel { 1090672cf6dfSJoerg Roedel struct intel_svm_dev *sdev; 1091c7b6bac9SFenghua Yu u32 pasid; 1092672cf6dfSJoerg Roedel 1093672cf6dfSJoerg Roedel mutex_lock(&pasid_mutex); 1094672cf6dfSJoerg Roedel sdev = to_intel_svm_dev(sva); 1095672cf6dfSJoerg Roedel pasid = sdev->pasid; 1096672cf6dfSJoerg Roedel mutex_unlock(&pasid_mutex); 1097672cf6dfSJoerg Roedel 1098672cf6dfSJoerg Roedel return pasid; 1099672cf6dfSJoerg Roedel } 11008b737121SLu Baolu 11018b737121SLu Baolu int intel_svm_page_response(struct device *dev, 11028b737121SLu Baolu struct iommu_fault_event *evt, 11038b737121SLu Baolu struct iommu_page_response *msg) 11048b737121SLu Baolu { 11058b737121SLu Baolu struct iommu_fault_page_request *prm; 11068b737121SLu Baolu struct intel_svm_dev *sdev = NULL; 11078b737121SLu Baolu struct intel_svm *svm = NULL; 11088b737121SLu Baolu struct intel_iommu *iommu; 11098b737121SLu Baolu bool private_present; 11108b737121SLu Baolu bool pasid_present; 11118b737121SLu Baolu bool last_page; 11128b737121SLu Baolu u8 bus, devfn; 11138b737121SLu Baolu int ret = 0; 11148b737121SLu Baolu u16 sid; 11158b737121SLu Baolu 11168b737121SLu Baolu if (!dev || !dev_is_pci(dev)) 11178b737121SLu Baolu return -ENODEV; 11188b737121SLu Baolu 11198b737121SLu Baolu iommu = device_to_iommu(dev, &bus, &devfn); 11208b737121SLu Baolu if (!iommu) 11218b737121SLu Baolu return -ENODEV; 11228b737121SLu Baolu 11238b737121SLu Baolu if (!msg || !evt) 11248b737121SLu Baolu return -EINVAL; 11258b737121SLu Baolu 11268b737121SLu Baolu mutex_lock(&pasid_mutex); 11278b737121SLu Baolu 11288b737121SLu Baolu prm = &evt->fault.prm; 11298b737121SLu Baolu sid = PCI_DEVID(bus, devfn); 11308b737121SLu Baolu pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; 11318b737121SLu Baolu private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; 11328b737121SLu Baolu last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; 11338b737121SLu Baolu 11348b737121SLu Baolu if (!pasid_present) { 11358b737121SLu Baolu ret = -EINVAL; 11368b737121SLu Baolu goto out; 11378b737121SLu Baolu } 11388b737121SLu Baolu 11398b737121SLu Baolu if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { 11408b737121SLu Baolu ret = -EINVAL; 11418b737121SLu Baolu goto out; 11428b737121SLu Baolu } 11438b737121SLu Baolu 11448b737121SLu Baolu ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev); 11458b737121SLu Baolu if (ret || !sdev) { 11468b737121SLu Baolu ret = -ENODEV; 11478b737121SLu Baolu goto out; 11488b737121SLu Baolu } 11498b737121SLu Baolu 11508b737121SLu Baolu /* 11518b737121SLu Baolu * For responses from userspace, need to make sure that the 11528b737121SLu Baolu * pasid has been bound to its mm. 11538b737121SLu Baolu */ 11548b737121SLu Baolu if (svm->flags & SVM_FLAG_GUEST_MODE) { 11558b737121SLu Baolu struct mm_struct *mm; 11568b737121SLu Baolu 11578b737121SLu Baolu mm = get_task_mm(current); 11588b737121SLu Baolu if (!mm) { 11598b737121SLu Baolu ret = -EINVAL; 11608b737121SLu Baolu goto out; 11618b737121SLu Baolu } 11628b737121SLu Baolu 11638b737121SLu Baolu if (mm != svm->mm) { 11648b737121SLu Baolu ret = -ENODEV; 11658b737121SLu Baolu mmput(mm); 11668b737121SLu Baolu goto out; 11678b737121SLu Baolu } 11688b737121SLu Baolu 11698b737121SLu Baolu mmput(mm); 11708b737121SLu Baolu } 11718b737121SLu Baolu 11728b737121SLu Baolu /* 11738b737121SLu Baolu * Per VT-d spec. v3.0 ch7.7, system software must respond 11748b737121SLu Baolu * with page group response if private data is present (PDP) 11758b737121SLu Baolu * or last page in group (LPIG) bit is set. This is an 11768b737121SLu Baolu * additional VT-d requirement beyond PCI ATS spec. 11778b737121SLu Baolu */ 11788b737121SLu Baolu if (last_page || private_present) { 11798b737121SLu Baolu struct qi_desc desc; 11808b737121SLu Baolu 11818b737121SLu Baolu desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | 11828b737121SLu Baolu QI_PGRP_PASID_P(pasid_present) | 11838b737121SLu Baolu QI_PGRP_PDP(private_present) | 11848b737121SLu Baolu QI_PGRP_RESP_CODE(msg->code) | 11858b737121SLu Baolu QI_PGRP_RESP_TYPE; 11868b737121SLu Baolu desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); 11878b737121SLu Baolu desc.qw2 = 0; 11888b737121SLu Baolu desc.qw3 = 0; 1189606636dcSGustavo A. R. Silva 1190606636dcSGustavo A. R. Silva if (private_present) { 1191606636dcSGustavo A. R. Silva desc.qw2 = prm->private_data[0]; 1192606636dcSGustavo A. R. Silva desc.qw3 = prm->private_data[1]; 1193606636dcSGustavo A. R. Silva } else if (prm->private_data[0]) { 11940f4834abSLu Baolu dmar_latency_update(iommu, DMAR_LATENCY_PRQ, 11950f4834abSLu Baolu ktime_to_ns(ktime_get()) - prm->private_data[0]); 1196606636dcSGustavo A. R. Silva } 11978b737121SLu Baolu 11988b737121SLu Baolu qi_submit_sync(iommu, &desc, 1, 0); 11998b737121SLu Baolu } 12008b737121SLu Baolu out: 12018b737121SLu Baolu mutex_unlock(&pasid_mutex); 12028b737121SLu Baolu return ret; 12038b737121SLu Baolu } 1204