Lines Matching +full:stream +full:- +full:match +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-mapping.h>
41 #include "arm-smmu.h"
42 #include "../../dma-iommu.h"
51 #define QCOM_DUMMY_VAL -1
74 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get()
75 return pm_runtime_resume_and_get(smmu->dev); in arm_smmu_rpm_get()
82 if (pm_runtime_enabled(smmu->dev)) { in arm_smmu_rpm_put()
83 pm_runtime_mark_last_busy(smmu->dev); in arm_smmu_rpm_put()
84 __pm_runtime_put_autosuspend(smmu->dev); in arm_smmu_rpm_put()
99 * to 5-10sec worth of reprogramming the context bank, while in arm_smmu_rpm_use_autosuspend()
102 pm_runtime_set_autosuspend_delay(smmu->dev, 20); in arm_smmu_rpm_use_autosuspend()
103 pm_runtime_use_autosuspend(smmu->dev); in arm_smmu_rpm_use_autosuspend()
118 struct pci_bus *bus = to_pci_dev(dev)->bus; in dev_get_dev_node()
121 bus = bus->parent; in dev_get_dev_node()
122 return of_node_get(bus->bridge->parent->of_node); in dev_get_dev_node()
125 return of_node_get(dev->of_node); in dev_get_dev_node()
137 struct device_node *np = it->node; in __find_legacy_master_phandle()
140 of_for_each_phandle(it, err, dev->of_node, "mmu-masters", in __find_legacy_master_phandle()
141 "#stream-id-cells", -1) in __find_legacy_master_phandle()
142 if (it->node == np) { in __find_legacy_master_phandle()
146 it->node = np; in __find_legacy_master_phandle()
147 return err == -ENOENT ? 0 : err; in __find_legacy_master_phandle()
162 if (!np || !of_property_present(np, "#stream-id-cells")) { in arm_smmu_register_legacy_master()
164 return -ENODEV; in arm_smmu_register_legacy_master()
173 return -ENODEV; in arm_smmu_register_legacy_master()
178 /* "mmu-masters" assumes Stream ID == Requester ID */ in arm_smmu_register_legacy_master()
191 return -ENOMEM; in arm_smmu_register_legacy_master()
203 return -ENODEV; in arm_smmu_register_legacy_master()
219 if (smmu->impl && unlikely(smmu->impl->tlb_sync)) in __arm_smmu_tlb_sync()
220 return smmu->impl->tlb_sync(smmu, page, sync, status); in __arm_smmu_tlb_sync()
224 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { in __arm_smmu_tlb_sync()
232 dev_err_ratelimited(smmu->dev, in __arm_smmu_tlb_sync()
233 "TLB sync timed out -- SMMU may be deadlocked\n"); in __arm_smmu_tlb_sync()
240 spin_lock_irqsave(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
243 spin_unlock_irqrestore(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
248 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_sync_context()
251 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
252 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), in arm_smmu_tlb_sync_context()
254 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
265 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, in arm_smmu_tlb_inv_context_s1()
266 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); in arm_smmu_tlb_inv_context_s1()
273 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context_s2()
277 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_inv_context_s2()
285 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s1()
286 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_range_s1()
287 int idx = cfg->cbndx; in arm_smmu_tlb_inv_range_s1()
289 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s1()
292 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_tlb_inv_range_s1()
294 iova |= cfg->asid; in arm_smmu_tlb_inv_range_s1()
298 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
301 iova |= (u64)cfg->asid << 48; in arm_smmu_tlb_inv_range_s1()
305 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
313 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s2()
314 int idx = smmu_domain->cfg.cbndx; in arm_smmu_tlb_inv_range_s2()
316 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s2()
321 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_tlb_inv_range_s2()
326 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
333 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_walk_s1()
335 if (cfg->flush_walk_prefer_tlbiasid) { in arm_smmu_tlb_inv_walk_s1()
374 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
377 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
385 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_add_page_s2_v1()
387 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_add_page_s2_v1()
390 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_add_page_s2_v1()
415 cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); in arm_smmu_read_context_fault_info()
416 cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); in arm_smmu_read_context_fault_info()
417 cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); in arm_smmu_read_context_fault_info()
418 cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); in arm_smmu_read_context_fault_info()
424 dev_err(smmu->dev, in arm_smmu_print_context_fault_info()
426 cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx); in arm_smmu_print_context_fault_info()
428 dev_err(smmu->dev, "FSR = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n", in arm_smmu_print_context_fault_info()
429 cfi->fsr, in arm_smmu_print_context_fault_info()
430 (cfi->fsr & ARM_SMMU_CB_FSR_MULTI) ? "MULTI " : "", in arm_smmu_print_context_fault_info()
431 (cfi->fsr & ARM_SMMU_CB_FSR_SS) ? "SS " : "", in arm_smmu_print_context_fault_info()
432 (u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr), in arm_smmu_print_context_fault_info()
433 (cfi->fsr & ARM_SMMU_CB_FSR_UUT) ? " UUT" : "", in arm_smmu_print_context_fault_info()
434 (cfi->fsr & ARM_SMMU_CB_FSR_ASF) ? " ASF" : "", in arm_smmu_print_context_fault_info()
435 (cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "", in arm_smmu_print_context_fault_info()
436 (cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "", in arm_smmu_print_context_fault_info()
437 (cfi->fsr & ARM_SMMU_CB_FSR_EF) ? " EF" : "", in arm_smmu_print_context_fault_info()
438 (cfi->fsr & ARM_SMMU_CB_FSR_PF) ? " PF" : "", in arm_smmu_print_context_fault_info()
439 (cfi->fsr & ARM_SMMU_CB_FSR_AFF) ? " AFF" : "", in arm_smmu_print_context_fault_info()
440 (cfi->fsr & ARM_SMMU_CB_FSR_TF) ? " TF" : "", in arm_smmu_print_context_fault_info()
441 cfi->cbfrsynra); in arm_smmu_print_context_fault_info()
443 dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n", in arm_smmu_print_context_fault_info()
444 cfi->fsynr, in arm_smmu_print_context_fault_info()
445 (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr), in arm_smmu_print_context_fault_info()
446 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "", in arm_smmu_print_context_fault_info()
447 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "", in arm_smmu_print_context_fault_info()
448 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "", in arm_smmu_print_context_fault_info()
449 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "", in arm_smmu_print_context_fault_info()
450 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "", in arm_smmu_print_context_fault_info()
451 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "", in arm_smmu_print_context_fault_info()
452 (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr)); in arm_smmu_print_context_fault_info()
459 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_context_fault()
462 int idx = smmu_domain->cfg.cbndx; in arm_smmu_context_fault()
470 ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova, in arm_smmu_context_fault()
473 if (ret == -ENOSYS && __ratelimit(&rs)) in arm_smmu_context_fault()
480 ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE); in arm_smmu_context_fault()
504 dev_err(smmu->dev, in arm_smmu_global_fault()
505 …"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may h… in arm_smmu_global_fault()
508 dev_err(smmu->dev, in arm_smmu_global_fault()
510 dev_err(smmu->dev, in arm_smmu_global_fault()
522 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_context_bank()
523 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; in arm_smmu_init_context_bank()
524 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_init_context_bank()
526 cb->cfg = cfg; in arm_smmu_init_context_bank()
530 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
531 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; in arm_smmu_init_context_bank()
533 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg); in arm_smmu_init_context_bank()
534 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg); in arm_smmu_init_context_bank()
535 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_init_context_bank()
536 cb->tcr[1] |= ARM_SMMU_TCR2_AS; in arm_smmu_init_context_bank()
538 cb->tcr[0] |= ARM_SMMU_TCR_EAE; in arm_smmu_init_context_bank()
541 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg); in arm_smmu_init_context_bank()
546 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
547 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr; in arm_smmu_init_context_bank()
548 cb->ttbr[1] = 0; in arm_smmu_init_context_bank()
550 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
551 cfg->asid); in arm_smmu_init_context_bank()
552 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
553 cfg->asid); in arm_smmu_init_context_bank()
555 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_smmu_init_context_bank()
556 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
558 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
561 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; in arm_smmu_init_context_bank()
564 /* MAIRs (stage-1 only) */ in arm_smmu_init_context_bank()
566 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
567 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; in arm_smmu_init_context_bank()
568 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; in arm_smmu_init_context_bank()
570 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair; in arm_smmu_init_context_bank()
571 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32; in arm_smmu_init_context_bank()
580 struct arm_smmu_cb *cb = &smmu->cbs[idx]; in arm_smmu_write_context_bank()
581 struct arm_smmu_cfg *cfg = cb->cfg; in arm_smmu_write_context_bank()
589 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_write_context_bank()
592 if (smmu->version > ARM_SMMU_V1) { in arm_smmu_write_context_bank()
593 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_write_context_bank()
597 /* 16-bit VMIDs live in CBA2R */ in arm_smmu_write_context_bank()
598 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_write_context_bank()
599 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid); in arm_smmu_write_context_bank()
605 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar); in arm_smmu_write_context_bank()
606 if (smmu->version < ARM_SMMU_V2) in arm_smmu_write_context_bank()
607 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx); in arm_smmu_write_context_bank()
618 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { in arm_smmu_write_context_bank()
619 /* 8-bit VMIDs live in CBAR */ in arm_smmu_write_context_bank()
620 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid); in arm_smmu_write_context_bank()
629 if (stage1 && smmu->version > ARM_SMMU_V1) in arm_smmu_write_context_bank()
630 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); in arm_smmu_write_context_bank()
631 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); in arm_smmu_write_context_bank()
634 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_write_context_bank()
635 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); in arm_smmu_write_context_bank()
636 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
637 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); in arm_smmu_write_context_bank()
639 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
642 cb->ttbr[1]); in arm_smmu_write_context_bank()
645 /* MAIRs (stage-1 only) */ in arm_smmu_write_context_bank()
647 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); in arm_smmu_write_context_bank()
648 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); in arm_smmu_write_context_bank()
659 if (smmu->impl && smmu->impl->write_sctlr) in arm_smmu_write_context_bank()
660 smmu->impl->write_sctlr(smmu, idx, reg); in arm_smmu_write_context_bank()
669 if (smmu->impl && smmu->impl->alloc_context_bank) in arm_smmu_alloc_context_bank()
670 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_alloc_context_bank()
672 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); in arm_smmu_alloc_context_bank()
684 struct iommu_domain *domain = &smmu_domain->domain; in arm_smmu_init_domain_context()
685 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_domain_context()
688 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
689 if (smmu_domain->smmu) in arm_smmu_init_domain_context()
708 * Note that you can't actually request stage-2 mappings. in arm_smmu_init_domain_context()
710 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_init_domain_context()
711 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_init_domain_context()
712 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_init_domain_context()
713 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_init_domain_context()
718 * the decision into the io-pgtable code where it arguably belongs, in arm_smmu_init_domain_context()
723 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) in arm_smmu_init_domain_context()
724 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; in arm_smmu_init_domain_context()
727 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && in arm_smmu_init_domain_context()
728 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) in arm_smmu_init_domain_context()
729 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; in arm_smmu_init_domain_context()
730 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && in arm_smmu_init_domain_context()
731 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | in arm_smmu_init_domain_context()
734 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64; in arm_smmu_init_domain_context()
736 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) { in arm_smmu_init_domain_context()
737 ret = -EINVAL; in arm_smmu_init_domain_context()
741 switch (smmu_domain->stage) { in arm_smmu_init_domain_context()
743 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; in arm_smmu_init_domain_context()
744 start = smmu->num_s2_context_banks; in arm_smmu_init_domain_context()
745 ias = smmu->va_size; in arm_smmu_init_domain_context()
746 oas = smmu->ipa_size; in arm_smmu_init_domain_context()
747 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
749 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { in arm_smmu_init_domain_context()
758 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; in arm_smmu_init_domain_context()
766 cfg->cbar = CBAR_TYPE_S2_TRANS; in arm_smmu_init_domain_context()
768 ias = smmu->ipa_size; in arm_smmu_init_domain_context()
769 oas = smmu->pa_size; in arm_smmu_init_domain_context()
770 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
777 if (smmu->version == ARM_SMMU_V2) in arm_smmu_init_domain_context()
778 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; in arm_smmu_init_domain_context()
780 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; in arm_smmu_init_domain_context()
783 ret = -EINVAL; in arm_smmu_init_domain_context()
792 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
794 cfg->cbndx = ret; in arm_smmu_init_domain_context()
795 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_init_domain_context()
796 cfg->irptndx = atomic_inc_return(&smmu->irptndx); in arm_smmu_init_domain_context()
797 cfg->irptndx %= smmu->num_context_irqs; in arm_smmu_init_domain_context()
799 cfg->irptndx = cfg->cbndx; in arm_smmu_init_domain_context()
802 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) in arm_smmu_init_domain_context()
803 cfg->vmid = cfg->cbndx + 1; in arm_smmu_init_domain_context()
805 cfg->asid = cfg->cbndx; in arm_smmu_init_domain_context()
808 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_init_domain_context()
811 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, in arm_smmu_init_domain_context()
812 .tlb = smmu_domain->flush_ops, in arm_smmu_init_domain_context()
813 .iommu_dev = smmu->dev, in arm_smmu_init_domain_context()
816 if (smmu->impl && smmu->impl->init_context) { in arm_smmu_init_domain_context()
817 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); in arm_smmu_init_domain_context()
822 if (smmu_domain->pgtbl_quirks) in arm_smmu_init_domain_context()
823 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks; in arm_smmu_init_domain_context()
827 ret = -ENOMEM; in arm_smmu_init_domain_context()
832 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_init_domain_context()
835 domain->geometry.aperture_start = ~0UL << ias; in arm_smmu_init_domain_context()
836 domain->geometry.aperture_end = ~0UL; in arm_smmu_init_domain_context()
838 domain->geometry.aperture_end = (1UL << ias) - 1; in arm_smmu_init_domain_context()
841 domain->geometry.force_aperture = true; in arm_smmu_init_domain_context()
845 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_init_domain_context()
849 * handler seeing a half-initialised domain state. in arm_smmu_init_domain_context()
851 irq = smmu->irqs[cfg->irptndx]; in arm_smmu_init_domain_context()
853 if (smmu->impl && smmu->impl->context_fault) in arm_smmu_init_domain_context()
854 context_fault = smmu->impl->context_fault; in arm_smmu_init_domain_context()
858 if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq) in arm_smmu_init_domain_context()
859 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_init_domain_context()
862 "arm-smmu-context-fault", in arm_smmu_init_domain_context()
865 ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED, in arm_smmu_init_domain_context()
866 "arm-smmu-context-fault", smmu_domain); in arm_smmu_init_domain_context()
869 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", in arm_smmu_init_domain_context()
870 cfg->irptndx, irq); in arm_smmu_init_domain_context()
871 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX; in arm_smmu_init_domain_context()
874 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
877 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_init_domain_context()
881 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_init_domain_context()
882 smmu_domain->smmu = NULL; in arm_smmu_init_domain_context()
884 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
890 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_destroy_domain_context()
891 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_destroy_domain_context()
905 smmu->cbs[cfg->cbndx].cfg = NULL; in arm_smmu_destroy_domain_context()
906 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_destroy_domain_context()
908 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { in arm_smmu_destroy_domain_context()
909 irq = smmu->irqs[cfg->irptndx]; in arm_smmu_destroy_domain_context()
910 devm_free_irq(smmu->dev, irq, smmu_domain); in arm_smmu_destroy_domain_context()
913 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_destroy_domain_context()
914 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_destroy_domain_context()
923 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_domain_alloc_paging()
934 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc_paging()
935 spin_lock_init(&smmu_domain->cb_lock); in arm_smmu_domain_alloc_paging()
936 smmu_domain->domain.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_domain_alloc_paging()
938 return &smmu_domain->domain; in arm_smmu_domain_alloc_paging()
955 struct arm_smmu_smr *smr = smmu->smrs + idx; in arm_smmu_write_smr()
956 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) | in arm_smmu_write_smr()
957 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask); in arm_smmu_write_smr()
959 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) in arm_smmu_write_smr()
966 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; in arm_smmu_write_s2cr()
969 if (smmu->impl && smmu->impl->write_s2cr) { in arm_smmu_write_s2cr()
970 smmu->impl->write_s2cr(smmu, idx); in arm_smmu_write_s2cr()
974 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | in arm_smmu_write_s2cr()
975 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | in arm_smmu_write_s2cr()
976 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); in arm_smmu_write_s2cr()
978 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && in arm_smmu_write_s2cr()
979 smmu->smrs[idx].valid) in arm_smmu_write_s2cr()
987 if (smmu->smrs) in arm_smmu_write_sme()
992 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1000 if (!smmu->smrs) in arm_smmu_test_smr_masks()
1008 * these SMRs for the ID/mask values we're already trusting to be OK. in arm_smmu_test_smr_masks()
1010 for (i = 0; i < smmu->num_mapping_groups; i++) in arm_smmu_test_smr_masks()
1011 if (!smmu->smrs[i].valid) in arm_smmu_test_smr_masks()
1016 * SMR.ID bits may not be preserved if the corresponding MASK in arm_smmu_test_smr_masks()
1020 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask); in arm_smmu_test_smr_masks()
1023 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr); in arm_smmu_test_smr_masks()
1025 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask); in arm_smmu_test_smr_masks()
1028 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); in arm_smmu_test_smr_masks()
1031 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) in arm_smmu_find_sme() argument
1033 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_find_sme()
1034 int i, free_idx = -ENOSPC; in arm_smmu_find_sme()
1036 /* Stream indexing is blissfully easy */ in arm_smmu_find_sme()
1041 for (i = 0; i < smmu->num_mapping_groups; ++i) { in arm_smmu_find_sme()
1058 if ((mask & smrs[i].mask) == mask && in arm_smmu_find_sme()
1059 !((id ^ smrs[i].id) & ~smrs[i].mask)) in arm_smmu_find_sme()
1063 * though, then there always exists at least one stream ID in arm_smmu_find_sme()
1066 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask))) in arm_smmu_find_sme()
1067 return -EINVAL; in arm_smmu_find_sme()
1075 if (--smmu->s2crs[idx].count) in arm_smmu_free_sme()
1078 smmu->s2crs[idx] = s2cr_init_val; in arm_smmu_free_sme()
1079 if (smmu->smrs) in arm_smmu_free_sme()
1080 smmu->smrs[idx].valid = false; in arm_smmu_free_sme()
1089 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_alloc_smes()
1090 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_master_alloc_smes()
1093 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1094 /* Figure out a viable stream map entry allocation */ in arm_smmu_master_alloc_smes()
1096 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_master_alloc_smes()
1097 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_master_alloc_smes() local
1100 ret = -EEXIST; in arm_smmu_master_alloc_smes()
1104 ret = arm_smmu_find_sme(smmu, sid, mask); in arm_smmu_master_alloc_smes()
1109 if (smrs && smmu->s2crs[idx].count == 0) { in arm_smmu_master_alloc_smes()
1111 smrs[idx].mask = mask; in arm_smmu_master_alloc_smes()
1114 smmu->s2crs[idx].count++; in arm_smmu_master_alloc_smes()
1115 cfg->smendx[i] = (s16)idx; in arm_smmu_master_alloc_smes()
1122 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1126 while (i--) { in arm_smmu_master_alloc_smes()
1127 arm_smmu_free_sme(smmu, cfg->smendx[i]); in arm_smmu_master_alloc_smes()
1128 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_alloc_smes()
1130 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1137 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_free_smes()
1140 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1144 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_free_smes()
1146 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1153 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_install_s2crs()
1154 struct arm_smmu_s2cr *s2cr = smmu->s2crs; in arm_smmu_master_install_s2crs()
1178 * domains between of_xlate() and probe_device() - we have no way to cope in arm_smmu_attach_dev()
1185 return -ENODEV; in arm_smmu_attach_dev()
1187 smmu = cfg->smmu; in arm_smmu_attach_dev()
1202 if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
1203 ret = -EINVAL; in arm_smmu_attach_dev()
1209 smmu_domain->cfg.cbndx, fwspec); in arm_smmu_attach_dev()
1224 return -ENODEV; in arm_smmu_attach_dev_type()
1225 smmu = cfg->smmu; in arm_smmu_attach_dev_type()
1270 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map_pages()
1271 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_map_pages()
1275 return -ENODEV; in arm_smmu_map_pages()
1278 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages()
1288 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_unmap_pages()
1289 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_unmap_pages()
1296 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather); in arm_smmu_unmap_pages()
1305 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_flush_iotlb_all()
1307 if (smmu_domain->flush_ops) { in arm_smmu_flush_iotlb_all()
1309 smmu_domain->flush_ops->tlb_flush_all(smmu_domain); in arm_smmu_flush_iotlb_all()
1318 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iotlb_sync()
1324 if (smmu->version == ARM_SMMU_V2 || in arm_smmu_iotlb_sync()
1325 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iotlb_sync()
1336 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iova_to_phys_hard()
1337 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_iova_to_phys_hard()
1338 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys_hard()
1339 struct device *dev = smmu->dev; in arm_smmu_iova_to_phys_hard()
1344 int ret, idx = cfg->cbndx; in arm_smmu_iova_to_phys_hard()
1351 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1353 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_iova_to_phys_hard()
1361 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1366 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys_hard()
1370 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1388 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys()
1393 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && in arm_smmu_iova_to_phys()
1394 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iova_to_phys()
1397 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
1412 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK || in arm_smmu_capable()
1442 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() in arm_smmu_probe_device()
1450 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
1453 ret = -EINVAL; in arm_smmu_probe_device()
1454 for (i = 0; i < fwspec->num_ids; i++) { in arm_smmu_probe_device()
1455 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_probe_device()
1456 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_probe_device() local
1458 if (sid & ~smmu->streamid_mask) { in arm_smmu_probe_device()
1459 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_probe_device()
1460 sid, smmu->streamid_mask); in arm_smmu_probe_device()
1463 if (mask & ~smmu->smr_mask_mask) { in arm_smmu_probe_device()
1464 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_probe_device()
1465 mask, smmu->smr_mask_mask); in arm_smmu_probe_device()
1470 ret = -ENOMEM; in arm_smmu_probe_device()
1476 cfg->smmu = smmu; in arm_smmu_probe_device()
1478 while (i--) in arm_smmu_probe_device()
1479 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_probe_device()
1491 device_link_add(dev, smmu->dev, in arm_smmu_probe_device()
1494 return &smmu->iommu; in arm_smmu_probe_device()
1508 ret = arm_smmu_rpm_get(cfg->smmu); in arm_smmu_release_device()
1514 arm_smmu_rpm_put(cfg->smmu); in arm_smmu_release_device()
1525 smmu = cfg->smmu; in arm_smmu_probe_finalize()
1527 if (smmu->impl && smmu->impl->probe_finalize) in arm_smmu_probe_finalize()
1528 smmu->impl->probe_finalize(smmu, dev); in arm_smmu_probe_finalize()
1535 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_device_group()
1539 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1541 if (group && smmu->s2crs[idx].group && in arm_smmu_device_group()
1542 group != smmu->s2crs[idx].group) { in arm_smmu_device_group()
1543 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1544 return ERR_PTR(-EINVAL); in arm_smmu_device_group()
1547 group = smmu->s2crs[idx].group; in arm_smmu_device_group()
1551 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1565 smmu->s2crs[idx].group = group; in arm_smmu_device_group()
1567 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1577 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()
1578 if (smmu_domain->smmu) in arm_smmu_set_pgtable_quirks()
1579 ret = -EPERM; in arm_smmu_set_pgtable_quirks()
1581 smmu_domain->pgtbl_quirks = quirks; in arm_smmu_set_pgtable_quirks()
1582 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()
1590 u32 mask, fwid = 0; in arm_smmu_of_xlate() local
1592 if (args->args_count > 0) in arm_smmu_of_xlate()
1593 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]); in arm_smmu_of_xlate()
1595 if (args->args_count > 1) in arm_smmu_of_xlate()
1596 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]); in arm_smmu_of_xlate()
1597 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) in arm_smmu_of_xlate()
1598 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask); in arm_smmu_of_xlate()
1614 list_add_tail(&region->list, head); in arm_smmu_get_resv_regions()
1622 const struct arm_smmu_impl *impl = cfg->smmu->impl; in arm_smmu_def_domain_type()
1627 if (impl && impl->def_domain_type) in arm_smmu_def_domain_type()
1628 return impl->def_domain_type(dev); in arm_smmu_def_domain_type()
1668 * Reset stream mapping groups: Initial values mark all SMRn as in arm_smmu_device_reset()
1671 for (i = 0; i < smmu->num_mapping_groups; ++i) in arm_smmu_device_reset()
1675 for (i = 0; i < smmu->num_context_banks; ++i) { in arm_smmu_device_reset()
1706 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_device_reset()
1709 if (smmu->features & ARM_SMMU_FEAT_EXIDS) in arm_smmu_device_reset()
1712 if (smmu->impl && smmu->impl->reset) in arm_smmu_device_reset()
1713 smmu->impl->reset(smmu); in arm_smmu_device_reset()
1743 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_cfg_probe()
1746 dev_notice(smmu->dev, "probing hardware configuration...\n"); in arm_smmu_device_cfg_probe()
1747 dev_notice(smmu->dev, "SMMUv%d with:\n", in arm_smmu_device_cfg_probe()
1748 smmu->version == ARM_SMMU_V2 ? 2 : 1); in arm_smmu_device_cfg_probe()
1760 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_cfg_probe()
1761 dev_notice(smmu->dev, "\tstage 1 translation\n"); in arm_smmu_device_cfg_probe()
1765 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_cfg_probe()
1766 dev_notice(smmu->dev, "\tstage 2 translation\n"); in arm_smmu_device_cfg_probe()
1770 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; in arm_smmu_device_cfg_probe()
1771 dev_notice(smmu->dev, "\tnested translation\n"); in arm_smmu_device_cfg_probe()
1774 if (!(smmu->features & in arm_smmu_device_cfg_probe()
1776 dev_err(smmu->dev, "\tno translation support!\n"); in arm_smmu_device_cfg_probe()
1777 return -ENODEV; in arm_smmu_device_cfg_probe()
1781 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) { in arm_smmu_device_cfg_probe()
1782 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; in arm_smmu_device_cfg_probe()
1783 dev_notice(smmu->dev, "\taddress translation ops\n"); in arm_smmu_device_cfg_probe()
1794 dev_notice(smmu->dev, "\t%scoherent table walk\n", in arm_smmu_device_cfg_probe()
1795 cttw_fw ? "" : "non-"); in arm_smmu_device_cfg_probe()
1797 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1800 /* Max. number of entries we have for stream matching/indexing */ in arm_smmu_device_cfg_probe()
1801 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) { in arm_smmu_device_cfg_probe()
1802 smmu->features |= ARM_SMMU_FEAT_EXIDS; in arm_smmu_device_cfg_probe()
1807 smmu->streamid_mask = size - 1; in arm_smmu_device_cfg_probe()
1809 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; in arm_smmu_device_cfg_probe()
1812 dev_err(smmu->dev, in arm_smmu_device_cfg_probe()
1813 "stream-matching supported, but no SMRs present!\n"); in arm_smmu_device_cfg_probe()
1814 return -ENODEV; in arm_smmu_device_cfg_probe()
1817 /* Zero-initialised to mark as invalid */ in arm_smmu_device_cfg_probe()
1818 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), in arm_smmu_device_cfg_probe()
1820 if (!smmu->smrs) in arm_smmu_device_cfg_probe()
1821 return -ENOMEM; in arm_smmu_device_cfg_probe()
1823 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1826 /* s2cr->type == 0 means translation, so initialise explicitly */ in arm_smmu_device_cfg_probe()
1827 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), in arm_smmu_device_cfg_probe()
1829 if (!smmu->s2crs) in arm_smmu_device_cfg_probe()
1830 return -ENOMEM; in arm_smmu_device_cfg_probe()
1832 smmu->s2crs[i] = s2cr_init_val; in arm_smmu_device_cfg_probe()
1834 smmu->num_mapping_groups = size; in arm_smmu_device_cfg_probe()
1835 mutex_init(&smmu->stream_map_mutex); in arm_smmu_device_cfg_probe()
1836 spin_lock_init(&smmu->global_sync_lock); in arm_smmu_device_cfg_probe()
1838 if (smmu->version < ARM_SMMU_V2 || in arm_smmu_device_cfg_probe()
1840 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; in arm_smmu_device_cfg_probe()
1842 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; in arm_smmu_device_cfg_probe()
1847 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12; in arm_smmu_device_cfg_probe()
1851 if (smmu->numpage != 2 * size << smmu->pgshift) in arm_smmu_device_cfg_probe()
1852 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1854 2 * size << smmu->pgshift, smmu->numpage); in arm_smmu_device_cfg_probe()
1856 smmu->numpage = size; in arm_smmu_device_cfg_probe()
1858 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id); in arm_smmu_device_cfg_probe()
1859 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id); in arm_smmu_device_cfg_probe()
1860 if (smmu->num_s2_context_banks > smmu->num_context_banks) { in arm_smmu_device_cfg_probe()
1861 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); in arm_smmu_device_cfg_probe()
1862 return -ENODEV; in arm_smmu_device_cfg_probe()
1864 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", in arm_smmu_device_cfg_probe()
1865 smmu->num_context_banks, smmu->num_s2_context_banks); in arm_smmu_device_cfg_probe()
1866 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, in arm_smmu_device_cfg_probe()
1867 sizeof(*smmu->cbs), GFP_KERNEL); in arm_smmu_device_cfg_probe()
1868 if (!smmu->cbs) in arm_smmu_device_cfg_probe()
1869 return -ENOMEM; in arm_smmu_device_cfg_probe()
1874 smmu->ipa_size = size; in arm_smmu_device_cfg_probe()
1876 /* The output mask is also applied for bypass */ in arm_smmu_device_cfg_probe()
1878 smmu->pa_size = size; in arm_smmu_device_cfg_probe()
1881 smmu->features |= ARM_SMMU_FEAT_VMID16; in arm_smmu_device_cfg_probe()
1888 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) in arm_smmu_device_cfg_probe()
1889 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1890 "failed to set DMA mask for table walker\n"); in arm_smmu_device_cfg_probe()
1892 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_device_cfg_probe()
1893 smmu->va_size = smmu->ipa_size; in arm_smmu_device_cfg_probe()
1894 if (smmu->version == ARM_SMMU_V1_64K) in arm_smmu_device_cfg_probe()
1895 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1898 smmu->va_size = arm_smmu_id_size_to_bits(size); in arm_smmu_device_cfg_probe()
1900 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; in arm_smmu_device_cfg_probe()
1902 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; in arm_smmu_device_cfg_probe()
1904 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1907 if (smmu->impl && smmu->impl->cfg_probe) { in arm_smmu_device_cfg_probe()
1908 ret = smmu->impl->cfg_probe(smmu); in arm_smmu_device_cfg_probe()
1914 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) in arm_smmu_device_cfg_probe()
1915 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; in arm_smmu_device_cfg_probe()
1916 if (smmu->features & in arm_smmu_device_cfg_probe()
1918 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_cfg_probe()
1919 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) in arm_smmu_device_cfg_probe()
1920 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_cfg_probe()
1921 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) in arm_smmu_device_cfg_probe()
1922 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_cfg_probe()
1924 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", in arm_smmu_device_cfg_probe()
1925 smmu->pgsize_bitmap); in arm_smmu_device_cfg_probe()
1928 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) in arm_smmu_device_cfg_probe()
1929 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", in arm_smmu_device_cfg_probe()
1930 smmu->va_size, smmu->ipa_size); in arm_smmu_device_cfg_probe()
1932 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) in arm_smmu_device_cfg_probe()
1933 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", in arm_smmu_device_cfg_probe()
1934 smmu->ipa_size, smmu->pa_size); in arm_smmu_device_cfg_probe()
1955 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1956 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1957 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1958 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1959 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1960 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1961 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1962 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1975 smmu->version = ARM_SMMU_V1; in acpi_smmu_get_data()
1976 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1979 smmu->version = ARM_SMMU_V1_64K; in acpi_smmu_get_data()
1980 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1983 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1984 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1987 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1988 smmu->model = ARM_MMU500; in acpi_smmu_get_data()
1991 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1992 smmu->model = CAVIUM_SMMUV2; in acpi_smmu_get_data()
1995 ret = -ENODEV; in acpi_smmu_get_data()
2004 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
2011 iort_smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_device_acpi_probe()
2013 ret = acpi_smmu_get_data(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
2021 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) in arm_smmu_device_acpi_probe()
2022 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_acpi_probe()
2030 return -ENODEV; in arm_smmu_device_acpi_probe()
2038 struct device *dev = smmu->dev; in arm_smmu_device_dt_probe()
2041 if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs)) in arm_smmu_device_dt_probe()
2042 return dev_err_probe(dev, -ENODEV, in arm_smmu_device_dt_probe()
2043 "missing #global-interrupts property\n"); in arm_smmu_device_dt_probe()
2047 smmu->version = data->version; in arm_smmu_device_dt_probe()
2048 smmu->model = data->model; in arm_smmu_device_dt_probe()
2050 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); in arm_smmu_device_dt_probe()
2053 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n", in arm_smmu_device_dt_probe()
2061 return -ENODEV; in arm_smmu_device_dt_probe()
2064 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
2065 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_dt_probe()
2078 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_smr()
2095 for (i = 0; i < rmr->num_sids; i++) { in arm_smmu_rmr_install_bypass_smr()
2096 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0); in arm_smmu_rmr_install_bypass_smr()
2100 if (smmu->s2crs[idx].count == 0) { in arm_smmu_rmr_install_bypass_smr()
2101 smmu->smrs[idx].id = rmr->sids[i]; in arm_smmu_rmr_install_bypass_smr()
2102 smmu->smrs[idx].mask = 0; in arm_smmu_rmr_install_bypass_smr()
2103 smmu->smrs[idx].valid = true; in arm_smmu_rmr_install_bypass_smr()
2105 smmu->s2crs[idx].count++; in arm_smmu_rmr_install_bypass_smr()
2106 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS; in arm_smmu_rmr_install_bypass_smr()
2107 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT; in arm_smmu_rmr_install_bypass_smr()
2113 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt, in arm_smmu_rmr_install_bypass_smr()
2115 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_smr()
2122 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
2130 return -ENOMEM; in arm_smmu_device_probe()
2132 smmu->dev = dev; in arm_smmu_device_probe()
2134 if (dev->of_node) in arm_smmu_device_probe()
2141 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in arm_smmu_device_probe()
2142 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
2143 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
2144 smmu->ioaddr = res->start; in arm_smmu_device_probe()
2147 * The resource size should effectively match the value of SMMU_TOP; in arm_smmu_device_probe()
2150 smmu->numpage = resource_size(res); in arm_smmu_device_probe()
2158 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs; in arm_smmu_device_probe()
2159 if (smmu->num_context_irqs <= 0) in arm_smmu_device_probe()
2160 return dev_err_probe(dev, -ENODEV, in arm_smmu_device_probe()
2164 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs, in arm_smmu_device_probe()
2165 sizeof(*smmu->irqs), GFP_KERNEL); in arm_smmu_device_probe()
2166 if (!smmu->irqs) in arm_smmu_device_probe()
2167 return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n", in arm_smmu_device_probe()
2168 smmu->num_context_irqs); in arm_smmu_device_probe()
2170 for (i = 0; i < smmu->num_context_irqs; i++) { in arm_smmu_device_probe()
2175 smmu->irqs[i] = irq; in arm_smmu_device_probe()
2178 err = devm_clk_bulk_get_all(dev, &smmu->clks); in arm_smmu_device_probe()
2183 smmu->num_clks = err; in arm_smmu_device_probe()
2185 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks); in arm_smmu_device_probe()
2193 if (smmu->version == ARM_SMMU_V2) { in arm_smmu_device_probe()
2194 if (smmu->num_context_banks > smmu->num_context_irqs) { in arm_smmu_device_probe()
2197 smmu->num_context_irqs, smmu->num_context_banks); in arm_smmu_device_probe()
2198 return -ENODEV; in arm_smmu_device_probe()
2202 smmu->num_context_irqs = smmu->num_context_banks; in arm_smmu_device_probe()
2205 if (smmu->impl && smmu->impl->global_fault) in arm_smmu_device_probe()
2206 global_fault = smmu->impl->global_fault; in arm_smmu_device_probe()
2217 "arm-smmu global fault", smmu); in arm_smmu_device_probe()
2232 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, in arm_smmu_device_probe()
2233 "smmu.%pa", &smmu->ioaddr); in arm_smmu_device_probe()
2237 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, in arm_smmu_device_probe()
2240 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_probe()
2245 * We want to avoid touching dev->power.lock in fastpaths unless in arm_smmu_device_probe()
2246 * it's really going to do something useful - pm_runtime_enabled() in arm_smmu_device_probe()
2250 if (dev->pm_domain) { in arm_smmu_device_probe()
2263 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) in arm_smmu_device_shutdown()
2264 dev_notice(&pdev->dev, "disabling translation\n"); in arm_smmu_device_shutdown()
2271 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_device_shutdown()
2272 pm_runtime_force_suspend(smmu->dev); in arm_smmu_device_shutdown()
2274 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_device_shutdown()
2276 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_device_shutdown()
2283 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
2284 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
2294 ret = clk_bulk_enable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_resume()
2307 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_suspend()
2317 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_resume()
2326 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_resume()
2344 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_suspend()
2356 .name = "arm-smmu",
2369 MODULE_ALIAS("platform:arm-smmu");