Lines Matching full:smmu
33 #include "arm-smmu-v3.h"
111 static void parse_driver_options(struct arm_smmu_device *smmu)
116 if (of_property_read_bool(smmu->dev->of_node,
118 smmu->options |= arm_smmu_options[i].opt;
119 dev_notice(smmu->dev, "option %s\n",
214 static void queue_poll_init(struct arm_smmu_device *smmu,
219 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
371 static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu,
376 if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq)
377 cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent);
379 return cmdq ?: &smmu->cmdq;
382 static bool arm_smmu_cmdq_needs_busy_polling(struct arm_smmu_device *smmu,
385 if (cmdq == &smmu->cmdq)
388 return smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV;
391 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
403 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
409 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
413 void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
432 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
437 dev_err(smmu->dev, "retrying command fetch\n");
459 dev_err(smmu->dev, "skipping command in error state:\n");
461 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
465 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
471 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
473 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq);
567 * a. If we have MSIs, the SMMU can write back into the CMD_SYNC
637 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
656 queue_poll_init(smmu, &qp);
669 * Wait until the SMMU signals a CMD_SYNC completion MSI.
672 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
680 queue_poll_init(smmu, &qp);
693 * Wait until the SMMU cons index passes llq->prod.
696 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
704 queue_poll_init(smmu, &qp);
746 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
750 if (smmu->options & ARM_SMMU_OPT_MSIPOLL &&
751 !arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
752 return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
754 return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
790 int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
811 if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
812 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
837 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, cmdq, prod);
853 /* 4. If we are the owner, take control of the SMMU hardware */
887 ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq);
889 dev_err_ratelimited(smmu->dev,
910 static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
917 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
923 smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync);
926 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
929 return __arm_smmu_cmdq_issue_cmd(smmu, ent, false);
932 static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
935 return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
938 static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu,
943 cmds->cmdq = arm_smmu_get_cmdq(smmu, ent);
946 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
952 (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC);
956 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
958 arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
962 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
964 arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
969 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
977 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
980 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
1009 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
1019 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
1022 .opcode = smmu->features & ARM_SMMU_FEAT_E2H ?
1027 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
1225 struct arm_smmu_device *smmu = master->smmu;
1234 arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd);
1237 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
1240 arm_smmu_cmdq_batch_submit(smmu, &cmds);
1279 struct arm_smmu_device *smmu = master->smmu;
1296 *l2ptr = dma_alloc_coherent(smmu->dev, sizeof(**l2ptr),
1435 struct arm_smmu_device *smmu = master->smmu;
1441 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
1447 cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size,
1464 cd_table->l2.l1tab = dma_alloc_coherent(smmu->dev, l1size,
1483 struct arm_smmu_device *smmu = master->smmu;
1491 dma_free_coherent(smmu->dev,
1498 dma_free_coherent(smmu->dev,
1503 dma_free_coherent(smmu->dev,
1540 arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd);
1552 struct arm_smmu_device *smmu = master->smmu;
1564 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
1571 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1585 void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
1593 if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
1605 struct arm_smmu_device *smmu = master->smmu;
1620 ((smmu->features & ARM_SMMU_FEAT_STALLS &&
1627 if ((smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) &&
1632 if (smmu->features & ARM_SMMU_FEAT_E2H) {
1668 struct arm_smmu_device *smmu = master->smmu;
1681 if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
1710 * because the STE table has not been installed in the SMMU yet.
1723 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1726 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1733 *l2table = dmam_alloc_coherent(smmu->dev, sizeof(**l2table),
1736 dev_err(smmu->dev,
1770 arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
1774 lockdep_assert_held(&smmu->streams_mutex);
1776 node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key);
1783 static void arm_smmu_decode_event(struct arm_smmu_device *smmu, u64 *raw,
1809 mutex_lock(&smmu->streams_mutex);
1810 master = arm_smmu_find_master(smmu, event->sid);
1813 mutex_unlock(&smmu->streams_mutex);
1816 static int arm_smmu_handle_event(struct arm_smmu_device *smmu, u64 *evt,
1865 mutex_lock(&smmu->streams_mutex);
1866 master = arm_smmu_find_master(smmu, event->sid);
1879 mutex_unlock(&smmu->streams_mutex);
1883 static void arm_smmu_dump_raw_event(struct arm_smmu_device *smmu, u64 *raw,
1888 dev_err(smmu->dev, "event 0x%02x received:\n", event->id);
1891 dev_err(smmu->dev, "\t0x%016llx\n", raw[i]);
1898 static void arm_smmu_dump_event(struct arm_smmu_device *smmu, u64 *raw,
1905 arm_smmu_dump_raw_event(smmu, raw, evt);
1912 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x iova: %#llx ipa: %#llx",
1916 dev_err(smmu->dev, "%s %s %s %s \"%s\"%s%s stag: %#x",
1929 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x fetch_addr: %#llx",
1936 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x",
1946 struct arm_smmu_device *smmu = dev;
1947 struct arm_smmu_queue *q = &smmu->evtq.q;
1954 arm_smmu_decode_event(smmu, evt, &event);
1955 if (arm_smmu_handle_event(smmu, evt, &event))
1956 arm_smmu_dump_event(smmu, evt, &event, &rs);
1967 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1975 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1987 dev_info(smmu->dev, "unexpected PRI request received:\n");
1988 dev_info(smmu->dev,
2009 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2015 struct arm_smmu_device *smmu = dev;
2016 struct arm_smmu_queue *q = &smmu->priq.q;
2022 arm_smmu_handle_ppr(smmu, evt);
2025 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
2033 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
2038 struct arm_smmu_device *smmu = dev;
2040 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
2041 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
2047 dev_warn(smmu->dev,
2052 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
2053 arm_smmu_device_disable(smmu);
2057 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
2060 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
2063 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
2066 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
2069 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
2072 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
2075 arm_smmu_cmdq_skip_err(smmu);
2077 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
2083 struct arm_smmu_device *smmu = dev;
2086 if (smmu->features & ARM_SMMU_FEAT_PRI)
2174 arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd);
2177 arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
2180 return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
2194 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
2214 arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd);
2237 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
2242 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
2249 struct arm_smmu_device *smmu = smmu_domain->smmu;
2255 * to the SMMU. We are relying on the dma_wmb() implicit during cmd
2260 arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid);
2264 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
2274 struct arm_smmu_device *smmu = smmu_domain->smmu;
2282 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
2305 arm_smmu_cmdq_batch_init(smmu, &cmds, cmd);
2308 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
2334 arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
2337 arm_smmu_cmdq_batch_submit(smmu, &cmds);
2351 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
2366 arm_smmu_cmdq_issue_cmd_with_sync(smmu_domain->smmu, &cmd);
2381 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
2414 static bool arm_smmu_dbm_capable(struct arm_smmu_device *smmu)
2418 return (smmu->features & features) == features;
2429 return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
2436 return arm_smmu_dbm_capable(master->smmu);
2479 struct arm_smmu_device *smmu = smmu_domain->smmu;
2492 ida_free(&smmu->vmid_map, cfg->vmid);
2498 static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
2508 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
2514 static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
2521 vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
2531 struct arm_smmu_device *smmu, u32 flags)
2537 int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
2542 .pgsize_bitmap = smmu->pgsize_bitmap,
2543 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
2545 .iommu_dev = smmu->dev,
2550 unsigned long ias = (smmu->features &
2554 pgtbl_cfg.oas = smmu->ias;
2564 pgtbl_cfg.ias = smmu->ias;
2565 pgtbl_cfg.oas = smmu->oas;
2568 if ((smmu->features & ARM_SMMU_FEAT_S2FWB) &&
2586 ret = finalise_stage_fn(smmu, smmu_domain);
2593 smmu_domain->smmu = smmu;
2598 arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
2600 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2602 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2616 struct arm_smmu_device *smmu = master->smmu;
2628 arm_smmu_get_step_for_sid(smmu, sid);
2644 struct arm_smmu_device *smmu = master->smmu;
2647 if (!(smmu->features & ARM_SMMU_FEAT_ATS))
2660 struct arm_smmu_device *smmu = master->smmu;
2663 stu = __ffs(smmu->pgsize_bitmap);
2701 master->smmu->ssid_bits);
2789 ret = iopf_queue_add_device(master->smmu->evtq.iopf, master->dev);
2810 iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev);
2881 * The SMMU does not support enabling ATS with bypass/abort.
2992 * SMMU is translating for the new domain and both the old&new
3010 struct arm_smmu_device *smmu;
3023 smmu = master->smmu;
3025 if (smmu_domain->smmu != smmu)
3080 struct arm_smmu_device *smmu = master->smmu;
3083 if (smmu_domain->smmu != smmu)
3138 if (smmu_domain->smmu != master->smmu)
3257 arm_smmu_make_bypass_ste(master->smmu, &ste);
3299 struct arm_smmu_device *smmu = master->smmu;
3318 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
3324 if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) {
3334 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) {
3347 ret = arm_smmu_domain_finalise(smmu_domain, smmu, flags);
3386 if (smmu_domain->smmu)
3425 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
3427 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
3428 return arm_smmu_strtab_l1_idx(sid) < smmu->strtab_cfg.l2.num_l1_ents;
3429 return sid < smmu->strtab_cfg.linear.num_ents;
3432 static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
3434 /* Check the SIDs are in range of the SMMU and our stream table */
3435 if (!arm_smmu_sid_in_range(smmu, sid))
3439 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
3440 return arm_smmu_init_l2_strtab(smmu, sid);
3445 static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
3458 mutex_lock(&smmu->streams_mutex);
3467 ret = arm_smmu_init_sid_strtab(smmu, sid);
3472 existing = rb_find_add(&new_stream->node, &smmu->streams,
3493 rb_erase(&master->streams[i].node, &smmu->streams);
3496 mutex_unlock(&smmu->streams_mutex);
3504 struct arm_smmu_device *smmu = master->smmu;
3507 if (!smmu || !master->streams)
3510 mutex_lock(&smmu->streams_mutex);
3512 rb_erase(&master->streams[i].node, &smmu->streams);
3513 mutex_unlock(&smmu->streams_mutex);
3521 struct arm_smmu_device *smmu;
3528 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
3529 if (!smmu)
3537 master->smmu = smmu;
3540 ret = arm_smmu_insert_master(smmu, master);
3545 master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
3557 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
3561 if ((smmu->features & ARM_SMMU_FEAT_STALLS &&
3563 smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
3567 unsigned int stu = __ffs(smmu->pgsize_bitmap);
3572 return &smmu->iommu;
3715 int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
3724 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
3733 dev_err(smmu->dev,
3740 dev_info(smmu->dev, "allocated %u entries for %s\n",
3756 int arm_smmu_cmdq_init(struct arm_smmu_device *smmu,
3764 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
3772 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
3777 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
3783 ret = arm_smmu_cmdq_init(smmu, &smmu->cmdq);
3788 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1,
3794 if ((smmu->features & ARM_SMMU_FEAT_SVA) &&
3795 (smmu->features & ARM_SMMU_FEAT_STALLS)) {
3796 smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
3797 if (!smmu->evtq.iopf)
3802 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
3805 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
3810 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
3813 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
3815 arm_smmu_strtab_l1_idx((1ULL << smmu->sid_bits) - 1);
3820 dev_warn(smmu->dev,
3823 smmu->sid_bits);
3826 cfg->l2.l1tab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->l2.l1_dma,
3829 dev_err(smmu->dev,
3835 cfg->l2.l2ptrs = devm_kcalloc(smmu->dev, cfg->l2.num_l1_ents,
3843 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
3846 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
3848 size = (1 << smmu->sid_bits) * sizeof(struct arm_smmu_ste);
3849 cfg->linear.table = dmam_alloc_coherent(smmu->dev, size,
3853 dev_err(smmu->dev,
3858 cfg->linear.num_ents = 1 << smmu->sid_bits;
3864 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
3868 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
3869 ret = arm_smmu_init_strtab_2lvl(smmu);
3871 ret = arm_smmu_init_strtab_linear(smmu);
3875 ida_init(&smmu->vmid_map);
3880 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
3884 mutex_init(&smmu->streams_mutex);
3885 smmu->streams = RB_ROOT;
3887 ret = arm_smmu_init_queues(smmu);
3891 ret = arm_smmu_init_strtab(smmu);
3895 if (smmu->impl_ops && smmu->impl_ops->init_structures)
3896 return smmu->impl_ops->init_structures(smmu);
3901 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
3906 writel_relaxed(val, smmu->base + reg_off);
3907 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
3912 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
3915 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
3929 dev_err(smmu->dev, "GBPA not responding to update\n");
3944 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
3950 writeq_relaxed(doorbell, smmu->base + cfg[0]);
3951 writel_relaxed(msg->data, smmu->base + cfg[1]);
3952 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
3955 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
3958 struct device *dev = smmu->dev;
3961 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
3962 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
3964 if (smmu->features & ARM_SMMU_FEAT_PRI)
3965 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
3969 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
3973 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
3984 smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
3985 smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX);
3986 smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
3992 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
3996 arm_smmu_setup_msis(smmu);
3999 irq = smmu->evtq.q.irq;
4001 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
4004 "arm-smmu-v3-evtq", smmu);
4006 dev_warn(smmu->dev, "failed to enable evtq irq\n");
4008 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
4011 irq = smmu->gerr_irq;
4013 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
4014 0, "arm-smmu-v3-gerror", smmu);
4016 dev_warn(smmu->dev, "failed to enable gerror irq\n");
4018 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
4021 if (smmu->features & ARM_SMMU_FEAT_PRI) {
4022 irq = smmu->priq.q.irq;
4024 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
4027 "arm-smmu-v3-priq",
4028 smmu);
4030 dev_warn(smmu->dev,
4033 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
4038 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
4044 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
4047 dev_err(smmu->dev, "failed to disable irqs\n");
4051 irq = smmu->combined_irq;
4057 ret = devm_request_threaded_irq(smmu->dev, irq,
4061 "arm-smmu-v3-combined-irq", smmu);
4063 dev_warn(smmu->dev, "failed to enable combined irq\n");
4065 arm_smmu_setup_unique_irqs(smmu);
4067 if (smmu->features & ARM_SMMU_FEAT_PRI)
4070 /* Enable interrupt generation on the SMMU */
4071 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
4074 dev_warn(smmu->dev, "failed to enable irqs\n");
4079 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
4083 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
4085 dev_err(smmu->dev, "failed to clear cr0\n");
4090 static void arm_smmu_write_strtab(struct arm_smmu_device *smmu)
4092 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
4096 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
4106 FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
4110 smmu->base + ARM_SMMU_STRTAB_BASE);
4111 writel_relaxed(reg, smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
4114 static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
4120 /* Clear CR0 and sync (disables SMMU and queue processing) */
4121 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
4123 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
4124 arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
4127 ret = arm_smmu_device_disable(smmu);
4138 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
4143 if (smmu->features & ARM_SMMU_FEAT_E2H)
4146 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
4149 arm_smmu_write_strtab(smmu);
4152 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
4153 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
4154 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
4157 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
4160 dev_err(smmu->dev, "failed to enable command queue\n");
4166 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
4169 if (smmu->features & ARM_SMMU_FEAT_HYP) {
4171 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
4175 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
4178 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
4179 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
4180 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
4183 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
4186 dev_err(smmu->dev, "failed to enable event queue\n");
4191 if (smmu->features & ARM_SMMU_FEAT_PRI) {
4192 writeq_relaxed(smmu->priq.q.q_base,
4193 smmu->base + ARM_SMMU_PRIQ_BASE);
4194 writel_relaxed(smmu->priq.q.llq.prod,
4195 smmu->page1 + ARM_SMMU_PRIQ_PROD);
4196 writel_relaxed(smmu->priq.q.llq.cons,
4197 smmu->page1 + ARM_SMMU_PRIQ_CONS);
4200 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
4203 dev_err(smmu->dev, "failed to enable PRI queue\n");
4208 if (smmu->features & ARM_SMMU_FEAT_ATS) {
4210 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
4213 dev_err(smmu->dev, "failed to enable ATS check\n");
4218 ret = arm_smmu_setup_irqs(smmu);
4220 dev_err(smmu->dev, "failed to setup irqs\n");
4227 /* Enable the SMMU interface */
4229 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
4232 dev_err(smmu->dev, "failed to enable SMMU interface\n");
4236 if (smmu->impl_ops && smmu->impl_ops->device_reset) {
4237 ret = smmu->impl_ops->device_reset(smmu);
4239 dev_err(smmu->dev, "failed to reset impl\n");
4251 static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
4256 reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
4268 smmu->features &= ~ARM_SMMU_FEAT_SEV;
4271 smmu->features &= ~ARM_SMMU_FEAT_NESTING;
4275 smmu->features &= ~ARM_SMMU_FEAT_BTM;
4276 smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC;
4278 smmu->features &= ~ARM_SMMU_FEAT_NESTING;
4285 static void arm_smmu_get_httu(struct arm_smmu_device *smmu, u32 reg)
4287 u32 fw_features = smmu->features & (ARM_SMMU_FEAT_HA | ARM_SMMU_FEAT_HD);
4298 if (smmu->dev->of_node)
4299 smmu->features |= hw_features;
4302 dev_warn(smmu->dev,
4307 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
4310 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
4313 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
4317 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
4320 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
4329 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
4333 smmu->features |= ARM_SMMU_FEAT_TT_BE;
4337 smmu->features |= ARM_SMMU_FEAT_TT_LE;
4341 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
4347 smmu->features |= ARM_SMMU_FEAT_PRI;
4350 smmu->features |= ARM_SMMU_FEAT_ATS;
4353 smmu->features |= ARM_SMMU_FEAT_SEV;
4356 smmu->features |= ARM_SMMU_FEAT_MSI;
4358 smmu->options |= ARM_SMMU_OPT_MSIPOLL;
4362 smmu->features |= ARM_SMMU_FEAT_HYP;
4364 smmu->features |= ARM_SMMU_FEAT_E2H;
4367 arm_smmu_get_httu(smmu, reg);
4374 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
4379 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
4382 smmu->features |= ARM_SMMU_FEAT_STALLS;
4386 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
4389 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
4392 dev_err(smmu->dev, "no translation support!\n");
4399 smmu->ias = 40;
4404 dev_err(smmu->dev, "AArch64 table format not supported!\n");
4409 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
4410 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
4413 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
4415 dev_err(smmu->dev, "embedded implementation not supported\n");
4420 smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR;
4423 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
4425 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
4432 dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
4437 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
4439 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
4443 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
4444 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
4445 smmu->iommu.max_pasids = 1UL << smmu->ssid_bits;
4448 * If the SMMU supports fewer bits than would fill a single L2 stream
4451 if (smmu->sid_bits <= STRTAB_SPLIT)
4452 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
4455 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
4457 smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
4459 smmu->features |= ARM_SMMU_FEAT_S2FWB;
4462 smmu->features |= ARM_SMMU_FEAT_BBML2;
4465 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
4468 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
4472 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
4474 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
4476 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
4480 smmu->features |= ARM_SMMU_FEAT_VAX;
4485 smmu->oas = 32;
4488 smmu->oas = 36;
4491 smmu->oas = 40;
4494 smmu->oas = 42;
4497 smmu->oas = 44;
4500 smmu->oas = 52;
4501 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */
4504 dev_info(smmu->dev,
4508 smmu->oas = 48;
4512 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
4513 dev_warn(smmu->dev,
4516 smmu->ias = max(smmu->ias, smmu->oas);
4518 if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) &&
4519 (smmu->features & ARM_SMMU_FEAT_TRANS_S2))
4520 smmu->features |= ARM_SMMU_FEAT_NESTING;
4522 arm_smmu_device_iidr_probe(smmu);
4524 if (arm_smmu_sva_supported(smmu))
4525 smmu->features |= ARM_SMMU_FEAT_SVA;
4527 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
4528 smmu->ias, smmu->oas, smmu->features);
4535 struct arm_smmu_device *smmu)
4540 /* Look for an NVDA200C node whose _UID matches the SMMU node ID */
4544 smmu->impl_dev = &adev->dev;
4545 smmu->options |= ARM_SMMU_OPT_TEGRA241_CMDQV;
4546 dev_info(smmu->dev, "found companion CMDQV device: %s\n",
4547 dev_name(smmu->impl_dev));
4553 struct arm_smmu_device *smmu)
4559 struct arm_smmu_device *smmu)
4566 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
4569 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
4573 * Tegra241 implementation stores its SMMU options and impl_dev
4576 acpi_smmu_dsdt_probe_tegra241_cmdqv(node, smmu);
4580 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
4585 struct arm_smmu_device *smmu)
4588 struct device *dev = smmu->dev;
4597 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
4601 smmu->features |= ARM_SMMU_FEAT_HD;
4604 smmu->features |= ARM_SMMU_FEAT_HA;
4607 return acpi_smmu_iort_probe_model(node, smmu);
4611 struct arm_smmu_device *smmu)
4618 struct arm_smmu_device *smmu)
4631 parse_driver_options(smmu);
4634 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
4639 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
4641 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
4655 static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
4661 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
4669 ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
4671 dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
4680 arm_smmu_make_bypass_ste(smmu,
4681 arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
4685 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
4690 struct arm_smmu_device *smmu = data;
4692 if (smmu->impl_ops && smmu->impl_ops->device_remove)
4693 smmu->impl_ops->device_remove(smmu);
4701 static struct arm_smmu_device *arm_smmu_impl_probe(struct arm_smmu_device *smmu)
4707 if (smmu->impl_dev && (smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV))
4708 new_smmu = tegra241_cmdqv_probe(smmu);
4711 return smmu;
4740 struct arm_smmu_device *smmu;
4743 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4744 if (!smmu)
4746 smmu->dev = dev;
4749 ret = arm_smmu_device_dt_probe(pdev, smmu);
4751 ret = arm_smmu_device_acpi_probe(pdev, smmu);
4756 smmu = arm_smmu_impl_probe(smmu);
4757 if (IS_ERR(smmu))
4758 return PTR_ERR(smmu);
4764 if (resource_size(res) < arm_smmu_resource_size(smmu)) {
4774 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
4775 if (IS_ERR(smmu->base))
4776 return PTR_ERR(smmu->base);
4778 if (arm_smmu_resource_size(smmu) > SZ_64K) {
4779 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
4781 if (IS_ERR(smmu->page1))
4782 return PTR_ERR(smmu->page1);
4784 smmu->page1 = smmu->base;
4791 smmu->combined_irq = irq;
4795 smmu->evtq.q.irq = irq;
4799 smmu->priq.q.irq = irq;
4803 smmu->gerr_irq = irq;
4806 ret = arm_smmu_device_hw_probe(smmu);
4811 ret = arm_smmu_init_structures(smmu);
4816 platform_set_drvdata(pdev, smmu);
4819 arm_smmu_rmr_install_bypass_ste(smmu);
4822 ret = arm_smmu_device_reset(smmu);
4827 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
4832 ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
4841 iommu_device_sysfs_remove(&smmu->iommu);
4843 arm_smmu_device_disable(smmu);
4845 iopf_queue_free(smmu->evtq.iopf);
4851 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
4853 iommu_device_unregister(&smmu->iommu);
4854 iommu_device_sysfs_remove(&smmu->iommu);
4855 arm_smmu_device_disable(smmu);
4856 iopf_queue_free(smmu->evtq.iopf);
4857 ida_destroy(&smmu->vmid_map);
4862 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
4864 arm_smmu_device_disable(smmu);
4868 { .compatible = "arm,smmu-v3", },
4881 .name = "arm-smmu-v3",
4894 MODULE_ALIAS("platform:arm-smmu-v3");