Lines Matching +full:soc +full:- +full:s

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
17 #include <linux/dma-mapping.h>
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
22 #include "iommu-pages.h"
27 const struct tegra_smmu_group_soc *soc; member
37 const struct tegra_smmu_soc *soc; member
78 writel(value, smmu->regs + offset); in smmu_writel()
83 return readl(smmu->regs + offset); in smmu_readl()
93 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
125 /* per-SWGROUP SMMU_*_ASID register */
140 #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
171 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); in iova_pd_index()
176 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); in iova_pt_index()
182 return (addr & smmu->pfn_mask) == addr; in smmu_dma_addr_valid()
187 return (dma_addr_t)(pde & smmu->pfn_mask) << 12; in smmu_pde_to_dma()
200 offset &= ~(smmu->mc->soc->atom_size - 1); in smmu_flush_ptc()
202 if (smmu->mc->soc->num_address_bits > 32) { in smmu_flush_ptc()
225 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_asid()
240 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_section()
255 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_group()
273 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); in tegra_smmu_alloc_asid()
274 if (id >= smmu->soc->num_asids) in tegra_smmu_alloc_asid()
275 return -ENOSPC; in tegra_smmu_alloc_asid()
277 set_bit(id, smmu->asids); in tegra_smmu_alloc_asid()
285 clear_bit(id, smmu->asids); in tegra_smmu_free_asid()
296 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; in tegra_smmu_domain_alloc_paging()
298 as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD); in tegra_smmu_domain_alloc_paging()
299 if (!as->pd) { in tegra_smmu_domain_alloc_paging()
304 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
305 if (!as->count) { in tegra_smmu_domain_alloc_paging()
306 iommu_free_pages(as->pd); in tegra_smmu_domain_alloc_paging()
311 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
312 if (!as->pts) { in tegra_smmu_domain_alloc_paging()
313 kfree(as->count); in tegra_smmu_domain_alloc_paging()
314 iommu_free_pages(as->pd); in tegra_smmu_domain_alloc_paging()
319 spin_lock_init(&as->lock); in tegra_smmu_domain_alloc_paging()
321 as->domain.pgsize_bitmap = SZ_4K; in tegra_smmu_domain_alloc_paging()
324 as->domain.geometry.aperture_start = 0; in tegra_smmu_domain_alloc_paging()
325 as->domain.geometry.aperture_end = 0xffffffff; in tegra_smmu_domain_alloc_paging()
326 as->domain.geometry.force_aperture = true; in tegra_smmu_domain_alloc_paging()
328 return &as->domain; in tegra_smmu_domain_alloc_paging()
337 WARN_ON_ONCE(as->use_count); in tegra_smmu_domain_free()
338 kfree(as->count); in tegra_smmu_domain_free()
339 kfree(as->pts); in tegra_smmu_domain_free()
349 for (i = 0; i < smmu->soc->num_swgroups; i++) { in tegra_smmu_find_swgroup()
350 if (smmu->soc->swgroups[i].swgroup == swgroup) { in tegra_smmu_find_swgroup()
351 group = &smmu->soc->swgroups[i]; in tegra_smmu_find_swgroup()
368 value = smmu_readl(smmu, group->reg); in tegra_smmu_enable()
372 smmu_writel(smmu, value, group->reg); in tegra_smmu_enable()
374 pr_warn("%s group from swgroup %u not found\n", __func__, in tegra_smmu_enable()
380 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_enable()
381 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_enable()
383 if (client->swgroup != swgroup) in tegra_smmu_enable()
386 value = smmu_readl(smmu, client->regs.smmu.reg); in tegra_smmu_enable()
387 value |= BIT(client->regs.smmu.bit); in tegra_smmu_enable()
388 smmu_writel(smmu, value, client->regs.smmu.reg); in tegra_smmu_enable()
401 value = smmu_readl(smmu, group->reg); in tegra_smmu_disable()
405 smmu_writel(smmu, value, group->reg); in tegra_smmu_disable()
408 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_disable()
409 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_disable()
411 if (client->swgroup != swgroup) in tegra_smmu_disable()
414 value = smmu_readl(smmu, client->regs.smmu.reg); in tegra_smmu_disable()
415 value &= ~BIT(client->regs.smmu.bit); in tegra_smmu_disable()
416 smmu_writel(smmu, value, client->regs.smmu.reg); in tegra_smmu_disable()
426 mutex_lock(&smmu->lock); in tegra_smmu_as_prepare()
428 if (as->use_count > 0) { in tegra_smmu_as_prepare()
429 as->use_count++; in tegra_smmu_as_prepare()
433 as->pd_dma = in tegra_smmu_as_prepare()
434 dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_prepare()
435 if (dma_mapping_error(smmu->dev, as->pd_dma)) { in tegra_smmu_as_prepare()
436 err = -ENOMEM; in tegra_smmu_as_prepare()
440 /* We can't handle 64-bit DMA addresses */ in tegra_smmu_as_prepare()
441 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { in tegra_smmu_as_prepare()
442 err = -ENOMEM; in tegra_smmu_as_prepare()
446 err = tegra_smmu_alloc_asid(smmu, &as->id); in tegra_smmu_as_prepare()
450 smmu_flush_ptc(smmu, as->pd_dma, 0); in tegra_smmu_as_prepare()
451 smmu_flush_tlb_asid(smmu, as->id); in tegra_smmu_as_prepare()
453 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); in tegra_smmu_as_prepare()
454 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); in tegra_smmu_as_prepare()
458 as->smmu = smmu; in tegra_smmu_as_prepare()
459 as->use_count++; in tegra_smmu_as_prepare()
461 mutex_unlock(&smmu->lock); in tegra_smmu_as_prepare()
466 dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_prepare()
468 mutex_unlock(&smmu->lock); in tegra_smmu_as_prepare()
476 mutex_lock(&smmu->lock); in tegra_smmu_as_unprepare()
478 if (--as->use_count > 0) { in tegra_smmu_as_unprepare()
479 mutex_unlock(&smmu->lock); in tegra_smmu_as_unprepare()
483 tegra_smmu_free_asid(smmu, as->id); in tegra_smmu_as_unprepare()
485 dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_unprepare()
487 as->smmu = NULL; in tegra_smmu_as_unprepare()
489 mutex_unlock(&smmu->lock); in tegra_smmu_as_unprepare()
502 return -ENOENT; in tegra_smmu_attach_dev()
504 for (index = 0; index < fwspec->num_ids; index++) { in tegra_smmu_attach_dev()
509 tegra_smmu_enable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
513 return -ENODEV; in tegra_smmu_attach_dev()
518 while (index--) { in tegra_smmu_attach_dev()
519 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
536 return -ENODEV; in tegra_smmu_identity_attach()
542 smmu = as->smmu; in tegra_smmu_identity_attach()
543 for (index = 0; index < fwspec->num_ids; index++) { in tegra_smmu_identity_attach()
544 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_identity_attach()
563 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pde()
564 u32 *pd = &as->pd->val[pd_index]; in tegra_smmu_set_pde()
571 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, in tegra_smmu_set_pde()
575 smmu_flush_ptc(smmu, as->pd_dma, offset); in tegra_smmu_set_pde()
576 smmu_flush_tlb_section(smmu, as->id, iova); in tegra_smmu_set_pde()
582 return &pt->val[iova_pt_index(iova)]; in tegra_smmu_pte_offset()
589 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_lookup()
592 pt = as->pts[pd_index]; in tegra_smmu_pte_lookup()
596 *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]); in tegra_smmu_pte_lookup()
605 struct tegra_smmu *smmu = as->smmu; in as_get_pte()
607 if (!as->pts[pde]) { in as_get_pte()
610 dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT, in as_get_pte()
612 if (dma_mapping_error(smmu->dev, dma)) { in as_get_pte()
618 dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT, in as_get_pte()
624 as->pts[pde] = pt; in as_get_pte()
631 *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]); in as_get_pte()
634 return tegra_smmu_pte_offset(as->pts[pde], iova); in as_get_pte()
641 as->count[pd_index]++; in tegra_smmu_pte_get_use()
647 struct tegra_pt *pt = as->pts[pde]; in tegra_smmu_pte_put_use()
653 if (--as->count[pde] == 0) { in tegra_smmu_pte_put_use()
654 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_put_use()
655 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]); in tegra_smmu_pte_put_use()
659 dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT, in tegra_smmu_pte_put_use()
662 as->pts[pde] = NULL; in tegra_smmu_pte_put_use()
669 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pte()
674 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, in tegra_smmu_set_pte()
677 smmu_flush_tlb_group(smmu, as->id, iova); in tegra_smmu_set_pte()
686 struct tegra_pt *pt = as->pts[pde]; in as_get_pde_page()
695 * spinlock needs to be unlocked and re-locked after allocation. in as_get_pde_page()
698 spin_unlock_irqrestore(&as->lock, *flags); in as_get_pde_page()
703 spin_lock_irqsave(&as->lock, *flags); in as_get_pde_page()
710 if (as->pts[pde]) { in as_get_pde_page()
714 pt = as->pts[pde]; in as_get_pde_page()
733 return -ENOMEM; in __tegra_smmu_map()
737 return -ENOMEM; in __tegra_smmu_map()
739 /* If we aren't overwriting a pre-existing entry, increment use */ in __tegra_smmu_map()
783 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_map()
785 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_map()
799 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_unmap()
801 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_unmap()
818 pfn = *pte & as->smmu->pfn_mask; in tegra_smmu_iova_to_phys()
834 put_device(&pdev->dev); in tegra_smmu_find()
838 return mc->smmu; in tegra_smmu_find()
844 const struct iommu_ops *ops = smmu->iommu.ops; in tegra_smmu_configure()
847 err = iommu_fwspec_init(dev, dev_fwnode(smmu->dev)); in tegra_smmu_configure()
853 err = ops->of_xlate(dev, args); in tegra_smmu_configure()
864 struct device_node *np = dev->of_node; in tegra_smmu_probe_device()
870 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, in tegra_smmu_probe_device()
888 return ERR_PTR(-ENODEV); in tegra_smmu_probe_device()
890 return &smmu->iommu; in tegra_smmu_probe_device()
898 for (i = 0; i < smmu->soc->num_groups; i++) in tegra_smmu_find_group()
899 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) in tegra_smmu_find_group()
900 if (smmu->soc->groups[i].swgroups[j] == swgroup) in tegra_smmu_find_group()
901 return &smmu->soc->groups[i]; in tegra_smmu_find_group()
909 struct tegra_smmu *smmu = group->smmu; in tegra_smmu_group_release()
911 mutex_lock(&smmu->lock); in tegra_smmu_group_release()
912 list_del(&group->list); in tegra_smmu_group_release()
913 mutex_unlock(&smmu->lock); in tegra_smmu_group_release()
920 const struct tegra_smmu_group_soc *soc; in tegra_smmu_device_group() local
921 unsigned int swgroup = fwspec->ids[0]; in tegra_smmu_device_group()
926 soc = tegra_smmu_find_group(smmu, swgroup); in tegra_smmu_device_group()
928 mutex_lock(&smmu->lock); in tegra_smmu_device_group()
931 list_for_each_entry(group, &smmu->groups, list) in tegra_smmu_device_group()
932 if ((group->swgroup == swgroup) || (soc && group->soc == soc)) { in tegra_smmu_device_group()
933 grp = iommu_group_ref_get(group->group); in tegra_smmu_device_group()
934 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
938 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); in tegra_smmu_device_group()
940 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
944 INIT_LIST_HEAD(&group->list); in tegra_smmu_device_group()
945 group->swgroup = swgroup; in tegra_smmu_device_group()
946 group->smmu = smmu; in tegra_smmu_device_group()
947 group->soc = soc; in tegra_smmu_device_group()
950 group->group = pci_device_group(dev); in tegra_smmu_device_group()
952 group->group = generic_device_group(dev); in tegra_smmu_device_group()
954 if (IS_ERR(group->group)) { in tegra_smmu_device_group()
955 devm_kfree(smmu->dev, group); in tegra_smmu_device_group()
956 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
960 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release); in tegra_smmu_device_group()
961 if (soc) in tegra_smmu_device_group()
962 iommu_group_set_name(group->group, soc->name); in tegra_smmu_device_group()
963 list_add_tail(&group->list, &smmu->groups); in tegra_smmu_device_group()
964 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
966 return group->group; in tegra_smmu_device_group()
972 struct platform_device *iommu_pdev = of_find_device_by_node(args->np); in tegra_smmu_of_xlate()
974 u32 id = args->args[0]; in tegra_smmu_of_xlate()
977 * Note: we are here releasing the reference of &iommu_pdev->dev, which in tegra_smmu_of_xlate()
978 * is mc->dev. Although some functions in tegra_smmu_ops may keep using in tegra_smmu_of_xlate()
979 * its private data beyond this point, it's still safe to do so because in tegra_smmu_of_xlate()
983 put_device(&iommu_pdev->dev); in tegra_smmu_of_xlate()
985 dev_iommu_priv_set(dev, mc->smmu); in tegra_smmu_of_xlate()
1019 { .compatible = "nvidia,tegra30-ahb", }, in tegra_smmu_ahb_enable()
1031 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) in tegra_smmu_swgroups_show() argument
1033 struct tegra_smmu *smmu = s->private; in tegra_smmu_swgroups_show()
1037 seq_printf(s, "swgroup enabled ASID\n"); in tegra_smmu_swgroups_show()
1038 seq_printf(s, "------------------------\n"); in tegra_smmu_swgroups_show()
1040 for (i = 0; i < smmu->soc->num_swgroups; i++) { in tegra_smmu_swgroups_show()
1041 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; in tegra_smmu_swgroups_show()
1045 value = smmu_readl(smmu, group->reg); in tegra_smmu_swgroups_show()
1054 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, in tegra_smmu_swgroups_show()
1063 static int tegra_smmu_clients_show(struct seq_file *s, void *data) in tegra_smmu_clients_show() argument
1065 struct tegra_smmu *smmu = s->private; in tegra_smmu_clients_show()
1069 seq_printf(s, "client enabled\n"); in tegra_smmu_clients_show()
1070 seq_printf(s, "--------------------\n"); in tegra_smmu_clients_show()
1072 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_clients_show()
1073 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_clients_show()
1076 value = smmu_readl(smmu, client->regs.smmu.reg); in tegra_smmu_clients_show()
1078 if (value & BIT(client->regs.smmu.bit)) in tegra_smmu_clients_show()
1083 seq_printf(s, "%-12s %s\n", client->name, status); in tegra_smmu_clients_show()
1093 smmu->debugfs = debugfs_create_dir("smmu", NULL); in tegra_smmu_debugfs_init()
1095 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, in tegra_smmu_debugfs_init()
1097 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, in tegra_smmu_debugfs_init()
1103 debugfs_remove_recursive(smmu->debugfs); in tegra_smmu_debugfs_exit()
1107 const struct tegra_smmu_soc *soc, in tegra_smmu_probe() argument
1116 return ERR_PTR(-ENOMEM); in tegra_smmu_probe()
1124 * callback via the IOMMU device's .drvdata field. in tegra_smmu_probe()
1126 mc->smmu = smmu; in tegra_smmu_probe()
1128 smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL); in tegra_smmu_probe()
1129 if (!smmu->asids) in tegra_smmu_probe()
1130 return ERR_PTR(-ENOMEM); in tegra_smmu_probe()
1132 INIT_LIST_HEAD(&smmu->groups); in tegra_smmu_probe()
1133 mutex_init(&smmu->lock); in tegra_smmu_probe()
1135 smmu->regs = mc->regs; in tegra_smmu_probe()
1136 smmu->soc = soc; in tegra_smmu_probe()
1137 smmu->dev = dev; in tegra_smmu_probe()
1138 smmu->mc = mc; in tegra_smmu_probe()
1140 smmu->pfn_mask = in tegra_smmu_probe()
1141 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1; in tegra_smmu_probe()
1143 mc->soc->num_address_bits, smmu->pfn_mask); in tegra_smmu_probe()
1144 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1; in tegra_smmu_probe()
1145 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, in tegra_smmu_probe()
1146 smmu->tlb_mask); in tegra_smmu_probe()
1150 if (soc->supports_request_limit) in tegra_smmu_probe()
1158 if (soc->supports_round_robin_arbitration) in tegra_smmu_probe()
1170 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); in tegra_smmu_probe()
1174 err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev); in tegra_smmu_probe()
1176 iommu_device_sysfs_remove(&smmu->iommu); in tegra_smmu_probe()
1188 iommu_device_unregister(&smmu->iommu); in tegra_smmu_remove()
1189 iommu_device_sysfs_remove(&smmu->iommu); in tegra_smmu_remove()