Lines Matching +full:data +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic ARM page table allocator.
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
14 #include <linux/io-pgtable.h>
20 #include <linux/dma-mapping.h>
24 #include "io-pgtable-arm.h"
25 #include "iommu-pages.h"
43 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
47 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
49 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
59 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
63 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
65 /* Calculate the block/page mapping size at level l for pagetable in d. */
91 /* Stage-1 PTE */
101 /* Stage-2 PTE */
109 * 0101 = Normal Non-cachable / Inner Non-cachable
110 * 0001 = Device / Device-nGnRE
113 * 0101 Normal* is forced Normal-NC, Device unchanged
114 * 0001 Force Device-nGnRE
174 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) in iopte_leaf()
182 if (lvl == (ARM_LPAE_MAX_LEVELS - 1)) in iopte_table()
188 struct arm_lpae_io_pgtable *data) in paddr_to_iopte() argument
193 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; in paddr_to_iopte()
197 struct arm_lpae_io_pgtable *data) in iopte_to_paddr() argument
201 if (ARM_LPAE_GRANULE(data) < SZ_64K) in iopte_to_paddr()
204 /* Rotate the packed high-order bits back to the top */ in iopte_to_paddr()
205 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); in iopte_to_paddr()
213 static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data) in arm_lpae_max_entries() argument
215 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); in arm_lpae_max_entries()
217 return ptes_per_table - (i & (ptes_per_table - 1)); in arm_lpae_max_entries()
222 * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0.
223 * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup
224 * a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas)
225 * b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas)
226 * c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas)
227 * d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas)
230 struct arm_lpae_io_pgtable *data) in arm_lpae_concat_mandatory() argument
232 unsigned int ias = cfg->ias; in arm_lpae_concat_mandatory()
233 unsigned int oas = cfg->oas; in arm_lpae_concat_mandatory()
236 if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0)) in arm_lpae_concat_mandatory()
240 if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0)) in arm_lpae_concat_mandatory()
244 return (ARM_LPAE_GRANULE(data) == SZ_16K) && in arm_lpae_concat_mandatory()
245 (data->start_level == 1) && (oas == 40); in arm_lpae_concat_mandatory()
253 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, in __arm_lpae_alloc_pages() argument
257 struct device *dev = cfg->iommu_dev; in __arm_lpae_alloc_pages()
263 * For very small starting-level translation tables the HW requires a in __arm_lpae_alloc_pages()
266 alloc_size = max(size, 64); in __arm_lpae_alloc_pages()
267 if (cfg->alloc) in __arm_lpae_alloc_pages()
268 pages = cfg->alloc(cookie, alloc_size, gfp); in __arm_lpae_alloc_pages()
276 if (!cfg->coherent_walk) { in __arm_lpae_alloc_pages()
277 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); in __arm_lpae_alloc_pages()
293 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); in __arm_lpae_alloc_pages()
296 if (cfg->free) in __arm_lpae_alloc_pages()
297 cfg->free(cookie, pages, size); in __arm_lpae_alloc_pages()
304 static void __arm_lpae_free_pages(void *pages, size_t size, in __arm_lpae_free_pages() argument
308 if (!cfg->coherent_walk) in __arm_lpae_free_pages()
309 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), in __arm_lpae_free_pages()
310 size, DMA_TO_DEVICE); in __arm_lpae_free_pages()
312 if (cfg->free) in __arm_lpae_free_pages()
313 cfg->free(cookie, pages, size); in __arm_lpae_free_pages()
321 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), in __arm_lpae_sync_pte()
330 if (!cfg->coherent_walk && num_entries) in __arm_lpae_clear_pte()
334 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
336 unsigned long iova, size_t size, size_t pgcount,
339 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, in __arm_lpae_init_pte() argument
344 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_lpae_init_pte()
345 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); in __arm_lpae_init_pte()
348 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) in __arm_lpae_init_pte()
354 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); in __arm_lpae_init_pte()
356 if (!cfg->coherent_walk) in __arm_lpae_init_pte()
360 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, in arm_lpae_init_pte() argument
368 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { in arm_lpae_init_pte()
370 WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN)); in arm_lpae_init_pte()
371 return -EEXIST; in arm_lpae_init_pte()
378 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); in arm_lpae_init_pte()
380 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte()
381 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1, in arm_lpae_init_pte()
384 return -EINVAL; in arm_lpae_init_pte()
388 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep); in arm_lpae_init_pte()
395 struct arm_lpae_io_pgtable *data) in arm_lpae_install_table() argument
398 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_install_table()
400 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; in arm_lpae_install_table()
401 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) in arm_lpae_install_table()
413 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) in arm_lpae_install_table()
424 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, in __arm_lpae_map() argument
425 phys_addr_t paddr, size_t size, size_t pgcount, in __arm_lpae_map() argument
430 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); in __arm_lpae_map()
431 size_t tblsz = ARM_LPAE_GRANULE(data); in __arm_lpae_map()
432 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_lpae_map()
436 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_map()
440 if (size == block_size) { in __arm_lpae_map()
441 max_entries = arm_lpae_max_entries(map_idx_start, data); in __arm_lpae_map()
443 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); in __arm_lpae_map()
445 *mapped += num_entries * size; in __arm_lpae_map()
451 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) in __arm_lpae_map()
452 return -EINVAL; in __arm_lpae_map()
457 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie); in __arm_lpae_map()
459 return -ENOMEM; in __arm_lpae_map()
461 pte = arm_lpae_install_table(cptep, ptep, 0, data); in __arm_lpae_map()
463 __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie); in __arm_lpae_map()
464 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { in __arm_lpae_map()
468 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { in __arm_lpae_map()
469 cptep = iopte_deref(pte, data); in __arm_lpae_map()
472 WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN)); in __arm_lpae_map()
473 return -EEXIST; in __arm_lpae_map()
477 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, in __arm_lpae_map()
481 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, in arm_lpae_prot_to_pte() argument
486 if (data->iop.fmt == ARM_64_LPAE_S1 || in arm_lpae_prot_to_pte()
487 data->iop.fmt == ARM_32_LPAE_S1) { in arm_lpae_prot_to_pte()
491 else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD) in arm_lpae_prot_to_pte()
505 * having stage-1-like attributes but stage-2-like permissions. in arm_lpae_prot_to_pte()
507 if (data->iop.fmt == ARM_64_LPAE_S2 || in arm_lpae_prot_to_pte()
508 data->iop.fmt == ARM_32_LPAE_S2) { in arm_lpae_prot_to_pte()
512 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB) in arm_lpae_prot_to_pte()
534 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) in arm_lpae_prot_to_pte()
542 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) in arm_lpae_prot_to_pte()
545 if (data->iop.fmt != ARM_MALI_LPAE) in arm_lpae_prot_to_pte()
555 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); in arm_lpae_map_pages() local
556 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_map_pages()
557 arm_lpae_iopte *ptep = data->pgd; in arm_lpae_map_pages()
558 int ret, lvl = data->start_level; in arm_lpae_map_pages()
560 long iaext = (s64)iova >> cfg->ias; in arm_lpae_map_pages()
562 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) in arm_lpae_map_pages()
563 return -EINVAL; in arm_lpae_map_pages()
565 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_lpae_map_pages()
567 if (WARN_ON(iaext || paddr >> cfg->oas)) in arm_lpae_map_pages()
568 return -ERANGE; in arm_lpae_map_pages()
571 return -EINVAL; in arm_lpae_map_pages()
573 prot = arm_lpae_prot_to_pte(data, iommu_prot); in arm_lpae_map_pages()
574 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, in arm_lpae_map_pages()
585 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, in __arm_lpae_free_pgtable() argument
591 if (lvl == data->start_level) in __arm_lpae_free_pgtable()
592 table_size = ARM_LPAE_PGD_SIZE(data); in __arm_lpae_free_pgtable()
594 table_size = ARM_LPAE_GRANULE(data); in __arm_lpae_free_pgtable()
599 if (lvl == ARM_LPAE_MAX_LEVELS - 1) in __arm_lpae_free_pgtable()
607 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) in __arm_lpae_free_pgtable()
610 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); in __arm_lpae_free_pgtable()
613 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie); in __arm_lpae_free_pgtable()
618 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); in arm_lpae_free_pgtable() local
620 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); in arm_lpae_free_pgtable()
621 kfree(data); in arm_lpae_free_pgtable()
624 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, in __arm_lpae_unmap() argument
626 unsigned long iova, size_t size, size_t pgcount, in __arm_lpae_unmap() argument
630 struct io_pgtable *iop = &data->iop; in __arm_lpae_unmap()
637 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_unmap()
641 WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN)); in __arm_lpae_unmap()
642 return -ENOENT; in __arm_lpae_unmap()
645 /* If the size matches this level, we're in the right place */ in __arm_lpae_unmap()
646 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { in __arm_lpae_unmap()
647 max_entries = arm_lpae_max_entries(unmap_idx_start, data); in __arm_lpae_unmap()
650 /* Find and handle non-leaf entries */ in __arm_lpae_unmap()
654 WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN)); in __arm_lpae_unmap()
658 if (!iopte_leaf(pte, lvl, iop->fmt)) { in __arm_lpae_unmap()
659 __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1); in __arm_lpae_unmap()
662 io_pgtable_tlb_flush_walk(iop, iova + i * size, size, in __arm_lpae_unmap()
663 ARM_LPAE_GRANULE(data)); in __arm_lpae_unmap()
664 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); in __arm_lpae_unmap()
669 __arm_lpae_clear_pte(ptep, &iop->cfg, i); in __arm_lpae_unmap()
673 io_pgtable_tlb_add_page(iop, gather, iova + j * size, size); in __arm_lpae_unmap()
675 return i * size; in __arm_lpae_unmap()
676 } else if (iopte_leaf(pte, lvl, iop->fmt)) { in __arm_lpae_unmap()
682 ptep = iopte_deref(pte, data); in __arm_lpae_unmap()
683 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); in __arm_lpae_unmap()
690 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); in arm_lpae_unmap_pages() local
691 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_unmap_pages()
692 arm_lpae_iopte *ptep = data->pgd; in arm_lpae_unmap_pages()
693 long iaext = (s64)iova >> cfg->ias; in arm_lpae_unmap_pages()
695 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in arm_lpae_unmap_pages()
698 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_lpae_unmap_pages()
703 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, in arm_lpae_unmap_pages()
704 data->start_level, ptep); in arm_lpae_unmap_pages()
709 void *data; member
711 arm_lpae_iopte *ptep, size_t size);
717 static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
728 arm_lpae_iopte *ptep, size_t size) in visit_iova_to_phys() argument
730 struct iova_to_phys_data *data = walk_data->data; in visit_iova_to_phys() local
731 data->pte = *ptep; in visit_iova_to_phys()
732 data->lvl = lvl; in visit_iova_to_phys()
739 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); in arm_lpae_iova_to_phys() local
742 .data = &d, in arm_lpae_iova_to_phys()
749 ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level); in arm_lpae_iova_to_phys()
753 iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1); in arm_lpae_iova_to_phys()
754 return iopte_to_paddr(d.pte, data) | iova; in arm_lpae_iova_to_phys()
758 arm_lpae_iopte *ptep, size_t size) in visit_pgtable_walk() argument
760 struct arm_lpae_io_pgtable_walk_data *data = walk_data->data; in visit_pgtable_walk() local
761 data->ptes[lvl] = *ptep; in visit_pgtable_walk()
768 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); in arm_lpae_pgtable_walk() local
770 .data = wd, in arm_lpae_pgtable_walk()
776 return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level); in arm_lpae_pgtable_walk()
779 static int io_pgtable_visit(struct arm_lpae_io_pgtable *data, in io_pgtable_visit() argument
783 struct io_pgtable *iop = &data->iop; in io_pgtable_visit()
786 size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data); in io_pgtable_visit() local
787 int ret = walk_data->visit(walk_data, lvl, ptep, size); in io_pgtable_visit()
791 if (iopte_leaf(pte, lvl, iop->fmt)) { in io_pgtable_visit()
792 walk_data->addr += size; in io_pgtable_visit()
797 return -EINVAL; in io_pgtable_visit()
800 ptep = iopte_deref(pte, data); in io_pgtable_visit()
801 return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1); in io_pgtable_visit()
804 static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data, in __arm_lpae_iopte_walk() argument
813 return -EINVAL; in __arm_lpae_iopte_walk()
815 if (lvl == data->start_level) in __arm_lpae_iopte_walk()
816 max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); in __arm_lpae_iopte_walk()
818 max_entries = ARM_LPAE_PTES_PER_TABLE(data); in __arm_lpae_iopte_walk()
820 for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data); in __arm_lpae_iopte_walk()
821 (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) { in __arm_lpae_iopte_walk()
822 ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl); in __arm_lpae_iopte_walk()
831 arm_lpae_iopte *ptep, size_t size) in visit_dirty() argument
833 struct iommu_dirty_bitmap *dirty = walk_data->data; in visit_dirty()
835 if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt)) in visit_dirty()
839 iommu_dirty_bitmap_record(dirty, walk_data->addr, size); in visit_dirty()
840 if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR)) in visit_dirty()
848 unsigned long iova, size_t size, in arm_lpae_read_and_clear_dirty() argument
852 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); in arm_lpae_read_and_clear_dirty() local
853 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_read_and_clear_dirty()
855 .iop = &data->iop, in arm_lpae_read_and_clear_dirty()
856 .data = dirty, in arm_lpae_read_and_clear_dirty()
860 .end = iova + size, in arm_lpae_read_and_clear_dirty()
862 arm_lpae_iopte *ptep = data->pgd; in arm_lpae_read_and_clear_dirty()
863 int lvl = data->start_level; in arm_lpae_read_and_clear_dirty()
865 if (WARN_ON(!size)) in arm_lpae_read_and_clear_dirty()
866 return -EINVAL; in arm_lpae_read_and_clear_dirty()
867 if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1))) in arm_lpae_read_and_clear_dirty()
868 return -EINVAL; in arm_lpae_read_and_clear_dirty()
869 if (data->iop.fmt != ARM_64_LPAE_S1) in arm_lpae_read_and_clear_dirty()
870 return -EINVAL; in arm_lpae_read_and_clear_dirty()
872 return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl); in arm_lpae_read_and_clear_dirty()
883 * the CPU page size if possible, otherwise prefer smaller sizes. in arm_lpae_restrict_pgsizes()
887 if (cfg->pgsize_bitmap & PAGE_SIZE) in arm_lpae_restrict_pgsizes()
889 else if (cfg->pgsize_bitmap & ~PAGE_MASK) in arm_lpae_restrict_pgsizes()
890 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); in arm_lpae_restrict_pgsizes()
891 else if (cfg->pgsize_bitmap & PAGE_MASK) in arm_lpae_restrict_pgsizes()
892 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); in arm_lpae_restrict_pgsizes()
906 if (cfg->oas > 48) in arm_lpae_restrict_pgsizes()
913 cfg->pgsize_bitmap &= page_sizes; in arm_lpae_restrict_pgsizes()
914 cfg->ias = min(cfg->ias, max_addr_bits); in arm_lpae_restrict_pgsizes()
915 cfg->oas = min(cfg->oas, max_addr_bits); in arm_lpae_restrict_pgsizes()
921 struct arm_lpae_io_pgtable *data; in arm_lpae_alloc_pgtable() local
926 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) in arm_lpae_alloc_pgtable()
929 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) in arm_lpae_alloc_pgtable()
932 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) in arm_lpae_alloc_pgtable()
935 data = kmalloc(sizeof(*data), GFP_KERNEL); in arm_lpae_alloc_pgtable()
936 if (!data) in arm_lpae_alloc_pgtable()
939 pg_shift = __ffs(cfg->pgsize_bitmap); in arm_lpae_alloc_pgtable()
940 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); in arm_lpae_alloc_pgtable()
942 va_bits = cfg->ias - pg_shift; in arm_lpae_alloc_pgtable()
943 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); in arm_lpae_alloc_pgtable()
944 data->start_level = ARM_LPAE_MAX_LEVELS - levels; in arm_lpae_alloc_pgtable()
946 /* Calculate the actual size of our pgd (without concatenation) */ in arm_lpae_alloc_pgtable()
947 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); in arm_lpae_alloc_pgtable()
949 data->iop.ops = (struct io_pgtable_ops) { in arm_lpae_alloc_pgtable()
957 return data; in arm_lpae_alloc_pgtable()
964 struct arm_lpae_io_pgtable *data; in arm_64_lpae_alloc_pgtable_s1() local
965 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; in arm_64_lpae_alloc_pgtable_s1()
968 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | in arm_64_lpae_alloc_pgtable_s1()
975 data = arm_lpae_alloc_pgtable(cfg); in arm_64_lpae_alloc_pgtable_s1()
976 if (!data) in arm_64_lpae_alloc_pgtable_s1()
980 if (cfg->coherent_walk) { in arm_64_lpae_alloc_pgtable_s1()
981 tcr->sh = ARM_LPAE_TCR_SH_IS; in arm_64_lpae_alloc_pgtable_s1()
982 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s1()
983 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s1()
984 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA) in arm_64_lpae_alloc_pgtable_s1()
987 tcr->sh = ARM_LPAE_TCR_SH_OS; in arm_64_lpae_alloc_pgtable_s1()
988 tcr->irgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s1()
989 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) in arm_64_lpae_alloc_pgtable_s1()
990 tcr->orgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s1()
992 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s1()
995 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; in arm_64_lpae_alloc_pgtable_s1()
996 switch (ARM_LPAE_GRANULE(data)) { in arm_64_lpae_alloc_pgtable_s1()
998 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; in arm_64_lpae_alloc_pgtable_s1()
1001 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; in arm_64_lpae_alloc_pgtable_s1()
1004 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; in arm_64_lpae_alloc_pgtable_s1()
1008 switch (cfg->oas) { in arm_64_lpae_alloc_pgtable_s1()
1010 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; in arm_64_lpae_alloc_pgtable_s1()
1013 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; in arm_64_lpae_alloc_pgtable_s1()
1016 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; in arm_64_lpae_alloc_pgtable_s1()
1019 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; in arm_64_lpae_alloc_pgtable_s1()
1022 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; in arm_64_lpae_alloc_pgtable_s1()
1025 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; in arm_64_lpae_alloc_pgtable_s1()
1028 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; in arm_64_lpae_alloc_pgtable_s1()
1034 tcr->tsz = 64ULL - cfg->ias; in arm_64_lpae_alloc_pgtable_s1()
1046 cfg->arm_lpae_s1_cfg.mair = reg; in arm_64_lpae_alloc_pgtable_s1()
1049 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), in arm_64_lpae_alloc_pgtable_s1()
1051 if (!data->pgd) in arm_64_lpae_alloc_pgtable_s1()
1058 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); in arm_64_lpae_alloc_pgtable_s1()
1059 return &data->iop; in arm_64_lpae_alloc_pgtable_s1()
1062 kfree(data); in arm_64_lpae_alloc_pgtable_s1()
1070 struct arm_lpae_io_pgtable *data; in arm_64_lpae_alloc_pgtable_s2() local
1071 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; in arm_64_lpae_alloc_pgtable_s2()
1073 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB | in arm_64_lpae_alloc_pgtable_s2()
1077 data = arm_lpae_alloc_pgtable(cfg); in arm_64_lpae_alloc_pgtable_s2()
1078 if (!data) in arm_64_lpae_alloc_pgtable_s2()
1081 if (arm_lpae_concat_mandatory(cfg, data)) { in arm_64_lpae_alloc_pgtable_s2()
1082 if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) > in arm_64_lpae_alloc_pgtable_s2()
1085 data->pgd_bits += data->bits_per_level; in arm_64_lpae_alloc_pgtable_s2()
1086 data->start_level++; in arm_64_lpae_alloc_pgtable_s2()
1090 if (cfg->coherent_walk) { in arm_64_lpae_alloc_pgtable_s2()
1091 vtcr->sh = ARM_LPAE_TCR_SH_IS; in arm_64_lpae_alloc_pgtable_s2()
1092 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s2()
1093 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; in arm_64_lpae_alloc_pgtable_s2()
1095 vtcr->sh = ARM_LPAE_TCR_SH_OS; in arm_64_lpae_alloc_pgtable_s2()
1096 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s2()
1097 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; in arm_64_lpae_alloc_pgtable_s2()
1100 sl = data->start_level; in arm_64_lpae_alloc_pgtable_s2()
1102 switch (ARM_LPAE_GRANULE(data)) { in arm_64_lpae_alloc_pgtable_s2()
1104 vtcr->tg = ARM_LPAE_TCR_TG0_4K; in arm_64_lpae_alloc_pgtable_s2()
1105 sl++; /* SL0 format is different for 4K granule size */ in arm_64_lpae_alloc_pgtable_s2()
1108 vtcr->tg = ARM_LPAE_TCR_TG0_16K; in arm_64_lpae_alloc_pgtable_s2()
1111 vtcr->tg = ARM_LPAE_TCR_TG0_64K; in arm_64_lpae_alloc_pgtable_s2()
1115 switch (cfg->oas) { in arm_64_lpae_alloc_pgtable_s2()
1117 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; in arm_64_lpae_alloc_pgtable_s2()
1120 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; in arm_64_lpae_alloc_pgtable_s2()
1123 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; in arm_64_lpae_alloc_pgtable_s2()
1126 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; in arm_64_lpae_alloc_pgtable_s2()
1129 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; in arm_64_lpae_alloc_pgtable_s2()
1132 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; in arm_64_lpae_alloc_pgtable_s2()
1135 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; in arm_64_lpae_alloc_pgtable_s2()
1141 vtcr->tsz = 64ULL - cfg->ias; in arm_64_lpae_alloc_pgtable_s2()
1142 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; in arm_64_lpae_alloc_pgtable_s2()
1145 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), in arm_64_lpae_alloc_pgtable_s2()
1147 if (!data->pgd) in arm_64_lpae_alloc_pgtable_s2()
1154 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); in arm_64_lpae_alloc_pgtable_s2()
1155 return &data->iop; in arm_64_lpae_alloc_pgtable_s2()
1158 kfree(data); in arm_64_lpae_alloc_pgtable_s2()
1165 if (cfg->ias > 32 || cfg->oas > 40) in arm_32_lpae_alloc_pgtable_s1()
1168 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); in arm_32_lpae_alloc_pgtable_s1()
1175 if (cfg->ias > 40 || cfg->oas > 40) in arm_32_lpae_alloc_pgtable_s2()
1178 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); in arm_32_lpae_alloc_pgtable_s2()
1185 struct arm_lpae_io_pgtable *data; in arm_mali_lpae_alloc_pgtable() local
1188 if (cfg->quirks) in arm_mali_lpae_alloc_pgtable()
1191 if (cfg->ias > 48 || cfg->oas > 40) in arm_mali_lpae_alloc_pgtable()
1194 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); in arm_mali_lpae_alloc_pgtable()
1196 data = arm_lpae_alloc_pgtable(cfg); in arm_mali_lpae_alloc_pgtable()
1197 if (!data) in arm_mali_lpae_alloc_pgtable()
1200 /* Mali seems to need a full 4-level table regardless of IAS */ in arm_mali_lpae_alloc_pgtable()
1201 if (data->start_level > 0) { in arm_mali_lpae_alloc_pgtable()
1202 data->start_level = 0; in arm_mali_lpae_alloc_pgtable()
1203 data->pgd_bits = 0; in arm_mali_lpae_alloc_pgtable()
1206 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the in arm_mali_lpae_alloc_pgtable()
1207 * best we can do is mimic the out-of-tree driver and hope that the in arm_mali_lpae_alloc_pgtable()
1208 * "implementation-defined caching policy" is good enough. Similarly, in arm_mali_lpae_alloc_pgtable()
1212 cfg->arm_mali_lpae_cfg.memattr = in arm_mali_lpae_alloc_pgtable()
1220 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, in arm_mali_lpae_alloc_pgtable()
1222 if (!data->pgd) in arm_mali_lpae_alloc_pgtable()
1228 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | in arm_mali_lpae_alloc_pgtable()
1231 if (cfg->coherent_walk) in arm_mali_lpae_alloc_pgtable()
1232 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; in arm_mali_lpae_alloc_pgtable()
1234 return &data->iop; in arm_mali_lpae_alloc_pgtable()
1237 kfree(data); in arm_mali_lpae_alloc_pgtable()
1280 static void __init dummy_tlb_flush(unsigned long iova, size_t size, in dummy_tlb_flush() argument
1284 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); in dummy_tlb_flush()
1302 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); in arm_lpae_dump_ops() local
1303 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_lpae_dump_ops()
1305 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", in arm_lpae_dump_ops()
1306 cfg->pgsize_bitmap, cfg->ias); in arm_lpae_dump_ops()
1307 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", in arm_lpae_dump_ops()
1308 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), in arm_lpae_dump_ops()
1309 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); in arm_lpae_dump_ops()
1315 -EFAULT; \
1327 size_t size, mapped; in arm_lpae_run_tests() local
1335 return -ENOMEM; in arm_lpae_run_tests()
1342 if (ops->iova_to_phys(ops, 42)) in arm_lpae_run_tests()
1345 if (ops->iova_to_phys(ops, SZ_1G + 42)) in arm_lpae_run_tests()
1348 if (ops->iova_to_phys(ops, SZ_2G + 42)) in arm_lpae_run_tests()
1355 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { in arm_lpae_run_tests()
1356 size = 1UL << j; in arm_lpae_run_tests()
1358 if (ops->map_pages(ops, iova, iova, size, 1, in arm_lpae_run_tests()
1365 if (!ops->map_pages(ops, iova, iova + size, size, 1, in arm_lpae_run_tests()
1370 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_lpae_run_tests()
1378 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { in arm_lpae_run_tests()
1379 size = 1UL << j; in arm_lpae_run_tests()
1381 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) in arm_lpae_run_tests()
1384 if (ops->iova_to_phys(ops, iova + 42)) in arm_lpae_run_tests()
1388 if (ops->map_pages(ops, iova, iova, size, 1, in arm_lpae_run_tests()
1392 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_lpae_run_tests()
1403 size = 1UL << __fls(cfg->pgsize_bitmap); in arm_lpae_run_tests()
1404 iova = (1UL << cfg->ias) - size; in arm_lpae_run_tests()
1405 if (ops->map_pages(ops, iova, iova, size, 1, in arm_lpae_run_tests()
1410 if (mapped != size) in arm_lpae_run_tests()
1412 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) in arm_lpae_run_tests()
1441 dev = faux_device_create("io-pgtable-test", NULL, 0); in arm_lpae_do_selftests()
1443 return -ENOMEM; in arm_lpae_do_selftests()
1445 cfg.iommu_dev = &dev->dev; in arm_lpae_do_selftests()
1449 /* Don't use ias > oas as it is not valid for stage-2. */ in arm_lpae_do_selftests()
1467 return fail ? -EFAULT : 0; in arm_lpae_do_selftests()