Lines Matching full:iommu
22 #include "iommu.h"
25 #include "../iommu-pages.h"
30 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
35 iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); in intel_svm_enable_prq()
36 if (!iommu->prq) { in intel_svm_enable_prq()
37 pr_warn("IOMMU: %s: Failed to allocate page request queue\n", in intel_svm_enable_prq()
38 iommu->name); in intel_svm_enable_prq()
42 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
44 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", in intel_svm_enable_prq()
45 iommu->name); in intel_svm_enable_prq()
49 iommu->pr_irq = irq; in intel_svm_enable_prq()
51 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
52 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
53 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
55 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
59 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
61 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
64 iommu->prq_name, iommu); in intel_svm_enable_prq()
66 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", in intel_svm_enable_prq()
67 iommu->name); in intel_svm_enable_prq()
70 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
71 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
72 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
74 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
79 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
80 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
83 iommu->pr_irq = 0; in intel_svm_enable_prq()
85 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
86 iommu->prq = NULL; in intel_svm_enable_prq()
91 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
93 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
94 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
95 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
97 if (iommu->pr_irq) { in intel_svm_finish_prq()
98 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
99 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
100 iommu->pr_irq = 0; in intel_svm_finish_prq()
103 if (iommu->iopf_queue) { in intel_svm_finish_prq()
104 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
105 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
108 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
109 iommu->prq = NULL; in intel_svm_finish_prq()
114 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
116 if (!pasid_supported(iommu)) in intel_svm_check()
120 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
122 iommu->name); in intel_svm_check()
127 !cap_fl5lp_support(iommu->cap)) { in intel_svm_check()
129 iommu->name); in intel_svm_check()
133 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
150 * different from IOMMU subsystem using the last address of an address in intel_arch_invalidate_secondary_tlbs()
178 intel_pasid_tear_down_entry(info->iommu, dev_pasid->dev, in intel_mm_release()
204 struct intel_iommu *iommu = info->iommu; in intel_svm_set_dev_pasid() local
224 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid, in intel_svm_set_dev_pasid()
299 struct intel_iommu *iommu; in intel_drain_pasid_prq() local
313 iommu = info->iommu; in intel_drain_pasid_prq()
317 did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID; in intel_drain_pasid_prq()
325 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
326 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
327 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
331 req = &iommu->prq[head / sizeof(*req)]; in intel_drain_pasid_prq()
337 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
361 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
362 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_drain_pasid_prq()
363 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_drain_pasid_prq()
364 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
385 static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_svm_prq_report() argument
407 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
413 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
427 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
432 struct intel_iommu *iommu = d; in prq_event_thread() local
442 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
444 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
445 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
448 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
452 pr_err("IOMMU: %s: Page request without PASID\n", in prq_event_thread()
453 iommu->name); in prq_event_thread()
455 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
460 pr_err("IOMMU: %s: Address is not canonical\n", in prq_event_thread()
461 iommu->name); in prq_event_thread()
466 pr_err("IOMMU: %s: Page request in Privilege Mode\n", in prq_event_thread()
467 iommu->name); in prq_event_thread()
472 pr_err("IOMMU: %s: Execution request not supported\n", in prq_event_thread()
473 iommu->name); in prq_event_thread()
482 * If prq is to be handled outside iommu driver via receiver of in prq_event_thread()
485 mutex_lock(&iommu->iopf_lock); in prq_event_thread()
486 dev = device_rbtree_find(iommu, req->rid); in prq_event_thread()
488 mutex_unlock(&iommu->iopf_lock); in prq_event_thread()
492 intel_svm_prq_report(iommu, dev, req); in prq_event_thread()
493 trace_prq_report(iommu, dev, req->qw_0, req->qw_1, in prq_event_thread()
495 iommu->prq_seq_number++); in prq_event_thread()
496 mutex_unlock(&iommu->iopf_lock); in prq_event_thread()
501 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
507 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
508 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", in prq_event_thread()
509 iommu->name); in prq_event_thread()
510 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
511 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
513 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
514 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
515 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", in prq_event_thread()
516 iommu->name); in prq_event_thread()
520 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
521 complete(&iommu->prq_complete); in prq_event_thread()
530 struct intel_iommu *iommu = info->iommu; in intel_svm_page_response() local
551 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()