Lines Matching +full:- +full:cfg
1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic ARM page table allocator.
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
14 #include <linux/io-pgtable.h>
19 #include <linux/dma-mapping.h>
23 #include "io-pgtable-arm.h"
24 #include "iommu-pages.h"
42 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
46 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
48 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
58 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
62 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
95 /* Stage-1 PTE */
105 /* Stage-2 PTE */
168 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) in iopte_leaf()
176 if (lvl == (ARM_LPAE_MAX_LEVELS - 1)) in iopte_table()
187 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; in paddr_to_iopte()
198 /* Rotate the packed high-order bits back to the top */ in iopte_to_paddr()
199 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); in iopte_to_paddr()
210 struct io_pgtable_cfg *cfg, in __arm_lpae_alloc_pages() argument
213 struct device *dev = cfg->iommu_dev; in __arm_lpae_alloc_pages()
220 if (cfg->alloc) in __arm_lpae_alloc_pages()
221 pages = cfg->alloc(cookie, size, gfp); in __arm_lpae_alloc_pages()
228 if (!cfg->coherent_walk) { in __arm_lpae_alloc_pages()
248 if (cfg->free) in __arm_lpae_alloc_pages()
249 cfg->free(cookie, pages, size); in __arm_lpae_alloc_pages()
257 struct io_pgtable_cfg *cfg, in __arm_lpae_free_pages() argument
260 if (!cfg->coherent_walk) in __arm_lpae_free_pages()
261 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), in __arm_lpae_free_pages()
264 if (cfg->free) in __arm_lpae_free_pages()
265 cfg->free(cookie, pages, size); in __arm_lpae_free_pages()
271 struct io_pgtable_cfg *cfg) in __arm_lpae_sync_pte() argument
273 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), in __arm_lpae_sync_pte()
277 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries) in __arm_lpae_clear_pte() argument
282 if (!cfg->coherent_walk && num_entries) in __arm_lpae_clear_pte()
283 __arm_lpae_sync_pte(ptep, num_entries, cfg); in __arm_lpae_clear_pte()
296 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_lpae_init_pte() local
300 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) in __arm_lpae_init_pte()
308 if (!cfg->coherent_walk) in __arm_lpae_init_pte()
309 __arm_lpae_sync_pte(ptep, num_entries, cfg); in __arm_lpae_init_pte()
320 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { in arm_lpae_init_pte()
323 return -EEXIST; in arm_lpae_init_pte()
332 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte()
336 return -EINVAL; in arm_lpae_init_pte()
350 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_install_table() local
353 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) in arm_lpae_install_table()
365 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) in arm_lpae_install_table()
369 __arm_lpae_sync_pte(ptep, 1, cfg); in arm_lpae_install_table()
384 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_lpae_map() local
393 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; in __arm_lpae_map()
403 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) in __arm_lpae_map()
404 return -EINVAL; in __arm_lpae_map()
409 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie); in __arm_lpae_map()
411 return -ENOMEM; in __arm_lpae_map()
415 __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie); in __arm_lpae_map()
416 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { in __arm_lpae_map()
417 __arm_lpae_sync_pte(ptep, 1, cfg); in __arm_lpae_map()
420 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { in __arm_lpae_map()
425 return -EEXIST; in __arm_lpae_map()
438 if (data->iop.fmt == ARM_64_LPAE_S1 || in arm_lpae_prot_to_pte()
439 data->iop.fmt == ARM_32_LPAE_S1) { in arm_lpae_prot_to_pte()
443 else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD) in arm_lpae_prot_to_pte()
457 * having stage-1-like attributes but stage-2-like permissions. in arm_lpae_prot_to_pte()
459 if (data->iop.fmt == ARM_64_LPAE_S2 || in arm_lpae_prot_to_pte()
460 data->iop.fmt == ARM_32_LPAE_S2) { in arm_lpae_prot_to_pte()
482 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) in arm_lpae_prot_to_pte()
490 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) in arm_lpae_prot_to_pte()
493 if (data->iop.fmt != ARM_MALI_LPAE) in arm_lpae_prot_to_pte()
504 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_map_pages() local
505 arm_lpae_iopte *ptep = data->pgd; in arm_lpae_map_pages()
506 int ret, lvl = data->start_level; in arm_lpae_map_pages()
508 long iaext = (s64)iova >> cfg->ias; in arm_lpae_map_pages()
510 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) in arm_lpae_map_pages()
511 return -EINVAL; in arm_lpae_map_pages()
513 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_lpae_map_pages()
515 if (WARN_ON(iaext || paddr >> cfg->oas)) in arm_lpae_map_pages()
516 return -ERANGE; in arm_lpae_map_pages()
519 return -EINVAL; in arm_lpae_map_pages()
539 if (lvl == data->start_level) in __arm_lpae_free_pgtable()
547 if (lvl == ARM_LPAE_MAX_LEVELS - 1) in __arm_lpae_free_pgtable()
555 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) in __arm_lpae_free_pgtable()
561 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie); in __arm_lpae_free_pgtable()
568 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); in arm_lpae_free_pgtable()
578 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_split_blk_unmap() local
584 int i, unmap_idx_start = -1, num_entries = 0, max_entries; in arm_lpae_split_blk_unmap()
589 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie); in arm_lpae_split_blk_unmap()
595 max_entries = ptes_per_table - unmap_idx_start; in arm_lpae_split_blk_unmap()
612 __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie); in arm_lpae_split_blk_unmap()
624 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); in arm_lpae_split_blk_unmap()
638 struct io_pgtable *iop = &data->iop; in __arm_lpae_unmap()
653 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; in __arm_lpae_unmap()
656 /* Find and handle non-leaf entries */ in __arm_lpae_unmap()
662 if (!iopte_leaf(pte, lvl, iop->fmt)) { in __arm_lpae_unmap()
663 __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1); in __arm_lpae_unmap()
673 __arm_lpae_clear_pte(ptep, &iop->cfg, i); in __arm_lpae_unmap()
680 } else if (iopte_leaf(pte, lvl, iop->fmt)) { in __arm_lpae_unmap()
699 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_unmap_pages() local
700 arm_lpae_iopte *ptep = data->pgd; in arm_lpae_unmap_pages()
701 long iaext = (s64)iova >> cfg->ias; in arm_lpae_unmap_pages()
703 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in arm_lpae_unmap_pages()
706 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_lpae_unmap_pages()
712 data->start_level, ptep); in arm_lpae_unmap_pages()
719 arm_lpae_iopte pte, *ptep = data->pgd; in arm_lpae_iova_to_phys()
720 int lvl = data->start_level; in arm_lpae_iova_to_phys()
736 if (iopte_leaf(pte, lvl, data->iop.fmt)) in arm_lpae_iova_to_phys()
747 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); in arm_lpae_iova_to_phys()
767 struct io_pgtable *iop = &data->iop; in io_pgtable_visit_dirty()
770 if (iopte_leaf(pte, lvl, iop->fmt)) { in io_pgtable_visit_dirty()
774 iommu_dirty_bitmap_record(walk_data->dirty, in io_pgtable_visit_dirty()
775 walk_data->addr, size); in io_pgtable_visit_dirty()
776 if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR)) in io_pgtable_visit_dirty()
779 walk_data->addr += size; in io_pgtable_visit_dirty()
784 return -EINVAL; in io_pgtable_visit_dirty()
799 return -EINVAL; in __arm_lpae_iopte_walk_dirty()
801 if (lvl == data->start_level) in __arm_lpae_iopte_walk_dirty()
806 for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data); in __arm_lpae_iopte_walk_dirty()
807 (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) { in __arm_lpae_iopte_walk_dirty()
822 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_read_and_clear_dirty() local
829 arm_lpae_iopte *ptep = data->pgd; in arm_lpae_read_and_clear_dirty()
830 int lvl = data->start_level; in arm_lpae_read_and_clear_dirty()
833 return -EINVAL; in arm_lpae_read_and_clear_dirty()
834 if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1))) in arm_lpae_read_and_clear_dirty()
835 return -EINVAL; in arm_lpae_read_and_clear_dirty()
836 if (data->iop.fmt != ARM_64_LPAE_S1) in arm_lpae_read_and_clear_dirty()
837 return -EINVAL; in arm_lpae_read_and_clear_dirty()
842 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) in arm_lpae_restrict_pgsizes() argument
854 if (cfg->pgsize_bitmap & PAGE_SIZE) in arm_lpae_restrict_pgsizes()
856 else if (cfg->pgsize_bitmap & ~PAGE_MASK) in arm_lpae_restrict_pgsizes()
857 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); in arm_lpae_restrict_pgsizes()
858 else if (cfg->pgsize_bitmap & PAGE_MASK) in arm_lpae_restrict_pgsizes()
859 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); in arm_lpae_restrict_pgsizes()
873 if (cfg->oas > 48) in arm_lpae_restrict_pgsizes()
880 cfg->pgsize_bitmap &= page_sizes; in arm_lpae_restrict_pgsizes()
881 cfg->ias = min(cfg->ias, max_addr_bits); in arm_lpae_restrict_pgsizes()
882 cfg->oas = min(cfg->oas, max_addr_bits); in arm_lpae_restrict_pgsizes()
886 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) in arm_lpae_alloc_pgtable() argument
891 arm_lpae_restrict_pgsizes(cfg); in arm_lpae_alloc_pgtable()
893 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) in arm_lpae_alloc_pgtable()
896 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) in arm_lpae_alloc_pgtable()
899 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) in arm_lpae_alloc_pgtable()
906 pg_shift = __ffs(cfg->pgsize_bitmap); in arm_lpae_alloc_pgtable()
907 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); in arm_lpae_alloc_pgtable()
909 va_bits = cfg->ias - pg_shift; in arm_lpae_alloc_pgtable()
910 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); in arm_lpae_alloc_pgtable()
911 data->start_level = ARM_LPAE_MAX_LEVELS - levels; in arm_lpae_alloc_pgtable()
914 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); in arm_lpae_alloc_pgtable()
916 data->iop.ops = (struct io_pgtable_ops) { in arm_lpae_alloc_pgtable()
927 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) in arm_64_lpae_alloc_pgtable_s1() argument
931 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; in arm_64_lpae_alloc_pgtable_s1()
934 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | in arm_64_lpae_alloc_pgtable_s1()
940 data = arm_lpae_alloc_pgtable(cfg); in arm_64_lpae_alloc_pgtable_s1()
945 if (cfg->coherent_walk) { in arm_64_lpae_alloc_pgtable_s1()
946 tcr->sh = ARM_LPAE_TCR_SH_IS; in arm_64_lpae_alloc_pgtable_s1()
947 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s1()
948 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s1()
949 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA) in arm_64_lpae_alloc_pgtable_s1()
952 tcr->sh = ARM_LPAE_TCR_SH_OS; in arm_64_lpae_alloc_pgtable_s1()
953 tcr->irgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s1()
954 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) in arm_64_lpae_alloc_pgtable_s1()
955 tcr->orgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s1()
957 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s1()
960 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; in arm_64_lpae_alloc_pgtable_s1()
963 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; in arm_64_lpae_alloc_pgtable_s1()
966 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; in arm_64_lpae_alloc_pgtable_s1()
969 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; in arm_64_lpae_alloc_pgtable_s1()
973 switch (cfg->oas) { in arm_64_lpae_alloc_pgtable_s1()
975 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; in arm_64_lpae_alloc_pgtable_s1()
978 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; in arm_64_lpae_alloc_pgtable_s1()
981 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; in arm_64_lpae_alloc_pgtable_s1()
984 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; in arm_64_lpae_alloc_pgtable_s1()
987 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; in arm_64_lpae_alloc_pgtable_s1()
990 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; in arm_64_lpae_alloc_pgtable_s1()
993 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; in arm_64_lpae_alloc_pgtable_s1()
999 tcr->tsz = 64ULL - cfg->ias; in arm_64_lpae_alloc_pgtable_s1()
1011 cfg->arm_lpae_s1_cfg.mair = reg; in arm_64_lpae_alloc_pgtable_s1()
1014 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), in arm_64_lpae_alloc_pgtable_s1()
1015 GFP_KERNEL, cfg, cookie); in arm_64_lpae_alloc_pgtable_s1()
1016 if (!data->pgd) in arm_64_lpae_alloc_pgtable_s1()
1023 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); in arm_64_lpae_alloc_pgtable_s1()
1024 return &data->iop; in arm_64_lpae_alloc_pgtable_s1()
1032 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) in arm_64_lpae_alloc_pgtable_s2() argument
1036 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; in arm_64_lpae_alloc_pgtable_s2()
1039 if (cfg->quirks) in arm_64_lpae_alloc_pgtable_s2()
1042 data = arm_lpae_alloc_pgtable(cfg); in arm_64_lpae_alloc_pgtable_s2()
1048 * the depth of the stage-2 walk. in arm_64_lpae_alloc_pgtable_s2()
1050 if (data->start_level == 0) { in arm_64_lpae_alloc_pgtable_s2()
1055 data->pgd_bits += data->bits_per_level; in arm_64_lpae_alloc_pgtable_s2()
1056 data->start_level++; in arm_64_lpae_alloc_pgtable_s2()
1061 if (cfg->coherent_walk) { in arm_64_lpae_alloc_pgtable_s2()
1062 vtcr->sh = ARM_LPAE_TCR_SH_IS; in arm_64_lpae_alloc_pgtable_s2()
1063 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s2()
1064 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s2()
1066 vtcr->sh = ARM_LPAE_TCR_SH_OS; in arm_64_lpae_alloc_pgtable_s2()
1067 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s2()
1068 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s2()
1071 sl = data->start_level; in arm_64_lpae_alloc_pgtable_s2()
1075 vtcr->tg = ARM_LPAE_TCR_TG0_4K; in arm_64_lpae_alloc_pgtable_s2()
1079 vtcr->tg = ARM_LPAE_TCR_TG0_16K; in arm_64_lpae_alloc_pgtable_s2()
1082 vtcr->tg = ARM_LPAE_TCR_TG0_64K; in arm_64_lpae_alloc_pgtable_s2()
1086 switch (cfg->oas) { in arm_64_lpae_alloc_pgtable_s2()
1088 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; in arm_64_lpae_alloc_pgtable_s2()
1091 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; in arm_64_lpae_alloc_pgtable_s2()
1094 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; in arm_64_lpae_alloc_pgtable_s2()
1097 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; in arm_64_lpae_alloc_pgtable_s2()
1100 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; in arm_64_lpae_alloc_pgtable_s2()
1103 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; in arm_64_lpae_alloc_pgtable_s2()
1106 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; in arm_64_lpae_alloc_pgtable_s2()
1112 vtcr->tsz = 64ULL - cfg->ias; in arm_64_lpae_alloc_pgtable_s2()
1113 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; in arm_64_lpae_alloc_pgtable_s2()
1116 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), in arm_64_lpae_alloc_pgtable_s2()
1117 GFP_KERNEL, cfg, cookie); in arm_64_lpae_alloc_pgtable_s2()
1118 if (!data->pgd) in arm_64_lpae_alloc_pgtable_s2()
1125 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); in arm_64_lpae_alloc_pgtable_s2()
1126 return &data->iop; in arm_64_lpae_alloc_pgtable_s2()
1134 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) in arm_32_lpae_alloc_pgtable_s1() argument
1136 if (cfg->ias > 32 || cfg->oas > 40) in arm_32_lpae_alloc_pgtable_s1()
1139 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); in arm_32_lpae_alloc_pgtable_s1()
1140 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); in arm_32_lpae_alloc_pgtable_s1()
1144 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) in arm_32_lpae_alloc_pgtable_s2() argument
1146 if (cfg->ias > 40 || cfg->oas > 40) in arm_32_lpae_alloc_pgtable_s2()
1149 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); in arm_32_lpae_alloc_pgtable_s2()
1150 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); in arm_32_lpae_alloc_pgtable_s2()
1154 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) in arm_mali_lpae_alloc_pgtable() argument
1159 if (cfg->quirks) in arm_mali_lpae_alloc_pgtable()
1162 if (cfg->ias > 48 || cfg->oas > 40) in arm_mali_lpae_alloc_pgtable()
1165 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); in arm_mali_lpae_alloc_pgtable()
1167 data = arm_lpae_alloc_pgtable(cfg); in arm_mali_lpae_alloc_pgtable()
1171 /* Mali seems to need a full 4-level table regardless of IAS */ in arm_mali_lpae_alloc_pgtable()
1172 if (data->start_level > 0) { in arm_mali_lpae_alloc_pgtable()
1173 data->start_level = 0; in arm_mali_lpae_alloc_pgtable()
1174 data->pgd_bits = 0; in arm_mali_lpae_alloc_pgtable()
1177 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the in arm_mali_lpae_alloc_pgtable()
1178 * best we can do is mimic the out-of-tree driver and hope that the in arm_mali_lpae_alloc_pgtable()
1179 * "implementation-defined caching policy" is good enough. Similarly, in arm_mali_lpae_alloc_pgtable()
1183 cfg->arm_mali_lpae_cfg.memattr = in arm_mali_lpae_alloc_pgtable()
1191 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, in arm_mali_lpae_alloc_pgtable()
1192 cfg, cookie); in arm_mali_lpae_alloc_pgtable()
1193 if (!data->pgd) in arm_mali_lpae_alloc_pgtable()
1199 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | in arm_mali_lpae_alloc_pgtable()
1202 if (cfg->coherent_walk) in arm_mali_lpae_alloc_pgtable()
1203 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; in arm_mali_lpae_alloc_pgtable()
1205 return &data->iop; in arm_mali_lpae_alloc_pgtable()
1255 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); in dummy_tlb_flush()
1274 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_dump_ops() local
1276 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", in arm_lpae_dump_ops()
1277 cfg->pgsize_bitmap, cfg->ias); in arm_lpae_dump_ops()
1279 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), in arm_lpae_dump_ops()
1280 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); in arm_lpae_dump_ops()
1287 -EFAULT; \
1290 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) in arm_lpae_run_tests() argument
1305 cfg_cookie = cfg; in arm_lpae_run_tests()
1306 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); in arm_lpae_run_tests()
1309 return -ENOMEM; in arm_lpae_run_tests()
1316 if (ops->iova_to_phys(ops, 42)) in arm_lpae_run_tests()
1319 if (ops->iova_to_phys(ops, SZ_1G + 42)) in arm_lpae_run_tests()
1322 if (ops->iova_to_phys(ops, SZ_2G + 42)) in arm_lpae_run_tests()
1329 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { in arm_lpae_run_tests()
1332 if (ops->map_pages(ops, iova, iova, size, 1, in arm_lpae_run_tests()
1339 if (!ops->map_pages(ops, iova, iova + size, size, 1, in arm_lpae_run_tests()
1344 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_lpae_run_tests()
1351 size = 1UL << __ffs(cfg->pgsize_bitmap); in arm_lpae_run_tests()
1352 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) in arm_lpae_run_tests()
1356 if (ops->map_pages(ops, SZ_1G + size, size, size, 1, in arm_lpae_run_tests()
1360 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) in arm_lpae_run_tests()
1365 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { in arm_lpae_run_tests()
1368 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) in arm_lpae_run_tests()
1371 if (ops->iova_to_phys(ops, iova + 42)) in arm_lpae_run_tests()
1375 if (ops->map_pages(ops, iova, iova, size, 1, in arm_lpae_run_tests()
1379 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_lpae_run_tests()
1406 struct io_pgtable_cfg cfg = { in arm_lpae_do_selftests() local
1418 cfg.pgsize_bitmap = pgsize[i]; in arm_lpae_do_selftests()
1419 cfg.ias = ias[j]; in arm_lpae_do_selftests()
1422 if (arm_lpae_run_tests(&cfg)) in arm_lpae_do_selftests()
1430 return fail ? -EFAULT : 0; in arm_lpae_do_selftests()