1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPU-agnostic ARM page table allocator. 4 * 5 * Copyright (C) 2014 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 11 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 14 #include <linux/io-pgtable.h> 15 #include <linux/kernel.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/dma-mapping.h> 20 21 #include <asm/barrier.h> 22 23 #include "io-pgtable-arm.h" 24 25 #define ARM_LPAE_MAX_ADDR_BITS 52 26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 27 #define ARM_LPAE_MAX_LEVELS 4 28 29 /* Struct accessors */ 30 #define io_pgtable_to_data(x) \ 31 container_of((x), struct arm_lpae_io_pgtable, iop) 32 33 #define io_pgtable_ops_to_data(x) \ 34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 35 36 /* 37 * Calculate the right shift amount to get to the portion describing level l 38 * in a virtual address mapped by the pagetable in d. 39 */ 40 #define ARM_LPAE_LVL_SHIFT(l,d) \ 41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ 42 ilog2(sizeof(arm_lpae_iopte))) 43 44 #define ARM_LPAE_GRANULE(d) \ 45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level) 46 #define ARM_LPAE_PGD_SIZE(d) \ 47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits) 48 49 #define ARM_LPAE_PTES_PER_TABLE(d) \ 50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte))) 51 52 /* 53 * Calculate the index at level l used to map virtual address a using the 54 * pagetable in d. 55 */ 56 #define ARM_LPAE_PGD_IDX(l,d) \ 57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) 58 59 #define ARM_LPAE_LVL_IDX(a,l,d) \ 60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 62 63 /* Calculate the block/page mapping size at level l for pagetable in d. */ 64 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) 65 66 /* Page table bits */ 67 #define ARM_LPAE_PTE_TYPE_SHIFT 0 68 #define ARM_LPAE_PTE_TYPE_MASK 0x3 69 70 #define ARM_LPAE_PTE_TYPE_BLOCK 1 71 #define ARM_LPAE_PTE_TYPE_TABLE 3 72 #define ARM_LPAE_PTE_TYPE_PAGE 3 73 74 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) 75 76 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 77 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 78 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 79 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 80 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 81 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 82 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 83 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 84 85 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 86 /* Ignore the contiguous bit for block splitting */ 87 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 88 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 89 ARM_LPAE_PTE_ATTR_HI_MASK) 90 /* Software bit for solving coherency races */ 91 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) 92 93 /* Stage-1 PTE */ 94 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 95 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 97 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 98 99 /* Stage-2 PTE */ 100 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 101 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 102 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 103 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 104 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 105 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 106 107 /* Register bits */ 108 #define ARM_LPAE_VTCR_SL0_MASK 0x3 109 110 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 111 112 #define ARM_LPAE_VTCR_PS_SHIFT 16 113 #define ARM_LPAE_VTCR_PS_MASK 0x7 114 115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 116 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 117 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 118 #define ARM_LPAE_MAIR_ATTR_NC 0x44 119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4 120 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 121 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 122 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 123 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 124 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3 125 126 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) 127 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) 128 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) 129 130 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL 131 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL 132 133 /* IOPTE accessors */ 134 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) 135 136 #define iopte_type(pte) \ 137 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 138 139 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 140 141 struct arm_lpae_io_pgtable { 142 struct io_pgtable iop; 143 144 int pgd_bits; 145 int start_level; 146 int bits_per_level; 147 148 void *pgd; 149 }; 150 151 typedef u64 arm_lpae_iopte; 152 153 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, 154 enum io_pgtable_fmt fmt) 155 { 156 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) 157 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE; 158 159 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; 160 } 161 162 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, 163 struct arm_lpae_io_pgtable *data) 164 { 165 arm_lpae_iopte pte = paddr; 166 167 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ 168 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; 169 } 170 171 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, 172 struct arm_lpae_io_pgtable *data) 173 { 174 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; 175 176 if (ARM_LPAE_GRANULE(data) < SZ_64K) 177 return paddr; 178 179 /* Rotate the packed high-order bits back to the top */ 180 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); 181 } 182 183 static bool selftest_running = false; 184 185 static dma_addr_t __arm_lpae_dma_addr(void *pages) 186 { 187 return (dma_addr_t)virt_to_phys(pages); 188 } 189 190 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 191 struct io_pgtable_cfg *cfg, 192 void *cookie) 193 { 194 struct device *dev = cfg->iommu_dev; 195 int order = get_order(size); 196 dma_addr_t dma; 197 void *pages; 198 199 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 200 201 if (cfg->alloc) { 202 pages = cfg->alloc(cookie, size, gfp); 203 } else { 204 struct page *p; 205 206 p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order); 207 pages = p ? page_address(p) : NULL; 208 } 209 210 if (!pages) 211 return NULL; 212 213 if (!cfg->coherent_walk) { 214 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 215 if (dma_mapping_error(dev, dma)) 216 goto out_free; 217 /* 218 * We depend on the IOMMU being able to work with any physical 219 * address directly, so if the DMA layer suggests otherwise by 220 * translating or truncating them, that bodes very badly... 221 */ 222 if (dma != virt_to_phys(pages)) 223 goto out_unmap; 224 } 225 226 return pages; 227 228 out_unmap: 229 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 230 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 231 232 out_free: 233 if (cfg->free) 234 cfg->free(cookie, pages, size); 235 else 236 free_pages((unsigned long)pages, order); 237 238 return NULL; 239 } 240 241 static void __arm_lpae_free_pages(void *pages, size_t size, 242 struct io_pgtable_cfg *cfg, 243 void *cookie) 244 { 245 if (!cfg->coherent_walk) 246 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 247 size, DMA_TO_DEVICE); 248 249 if (cfg->free) 250 cfg->free(cookie, pages, size); 251 else 252 free_pages((unsigned long)pages, get_order(size)); 253 } 254 255 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, 256 struct io_pgtable_cfg *cfg) 257 { 258 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), 259 sizeof(*ptep) * num_entries, DMA_TO_DEVICE); 260 } 261 262 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) 263 { 264 265 *ptep = 0; 266 267 if (!cfg->coherent_walk) 268 __arm_lpae_sync_pte(ptep, 1, cfg); 269 } 270 271 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 272 struct iommu_iotlb_gather *gather, 273 unsigned long iova, size_t size, size_t pgcount, 274 int lvl, arm_lpae_iopte *ptep); 275 276 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 277 phys_addr_t paddr, arm_lpae_iopte prot, 278 int lvl, int num_entries, arm_lpae_iopte *ptep) 279 { 280 arm_lpae_iopte pte = prot; 281 struct io_pgtable_cfg *cfg = &data->iop.cfg; 282 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 283 int i; 284 285 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) 286 pte |= ARM_LPAE_PTE_TYPE_PAGE; 287 else 288 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 289 290 for (i = 0; i < num_entries; i++) 291 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); 292 293 if (!cfg->coherent_walk) 294 __arm_lpae_sync_pte(ptep, num_entries, cfg); 295 } 296 297 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 298 unsigned long iova, phys_addr_t paddr, 299 arm_lpae_iopte prot, int lvl, int num_entries, 300 arm_lpae_iopte *ptep) 301 { 302 int i; 303 304 for (i = 0; i < num_entries; i++) 305 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { 306 /* We require an unmap first */ 307 WARN_ON(!selftest_running); 308 return -EEXIST; 309 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { 310 /* 311 * We need to unmap and free the old table before 312 * overwriting it with a block entry. 313 */ 314 arm_lpae_iopte *tblp; 315 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 316 317 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 318 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1, 319 lvl, tblp) != sz) { 320 WARN_ON(1); 321 return -EINVAL; 322 } 323 } 324 325 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep); 326 return 0; 327 } 328 329 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, 330 arm_lpae_iopte *ptep, 331 arm_lpae_iopte curr, 332 struct arm_lpae_io_pgtable *data) 333 { 334 arm_lpae_iopte old, new; 335 struct io_pgtable_cfg *cfg = &data->iop.cfg; 336 337 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; 338 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 339 new |= ARM_LPAE_PTE_NSTABLE; 340 341 /* 342 * Ensure the table itself is visible before its PTE can be. 343 * Whilst we could get away with cmpxchg64_release below, this 344 * doesn't have any ordering semantics when !CONFIG_SMP. 345 */ 346 dma_wmb(); 347 348 old = cmpxchg64_relaxed(ptep, curr, new); 349 350 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) 351 return old; 352 353 /* Even if it's not ours, there's no point waiting; just kick it */ 354 __arm_lpae_sync_pte(ptep, 1, cfg); 355 if (old == curr) 356 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); 357 358 return old; 359 } 360 361 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 362 phys_addr_t paddr, size_t size, size_t pgcount, 363 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep, 364 gfp_t gfp, size_t *mapped) 365 { 366 arm_lpae_iopte *cptep, pte; 367 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 368 size_t tblsz = ARM_LPAE_GRANULE(data); 369 struct io_pgtable_cfg *cfg = &data->iop.cfg; 370 int ret = 0, num_entries, max_entries, map_idx_start; 371 372 /* Find our entry at the current level */ 373 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 374 ptep += map_idx_start; 375 376 /* If we can install a leaf entry at this level, then do so */ 377 if (size == block_size) { 378 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; 379 num_entries = min_t(int, pgcount, max_entries); 380 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); 381 if (!ret) 382 *mapped += num_entries * size; 383 384 return ret; 385 } 386 387 /* We can't allocate tables at the final level */ 388 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 389 return -EINVAL; 390 391 /* Grab a pointer to the next level */ 392 pte = READ_ONCE(*ptep); 393 if (!pte) { 394 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie); 395 if (!cptep) 396 return -ENOMEM; 397 398 pte = arm_lpae_install_table(cptep, ptep, 0, data); 399 if (pte) 400 __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie); 401 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { 402 __arm_lpae_sync_pte(ptep, 1, cfg); 403 } 404 405 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { 406 cptep = iopte_deref(pte, data); 407 } else if (pte) { 408 /* We require an unmap first */ 409 WARN_ON(!selftest_running); 410 return -EEXIST; 411 } 412 413 /* Rinse, repeat */ 414 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, 415 cptep, gfp, mapped); 416 } 417 418 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 419 int prot) 420 { 421 arm_lpae_iopte pte; 422 423 if (data->iop.fmt == ARM_64_LPAE_S1 || 424 data->iop.fmt == ARM_32_LPAE_S1) { 425 pte = ARM_LPAE_PTE_nG; 426 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 427 pte |= ARM_LPAE_PTE_AP_RDONLY; 428 if (!(prot & IOMMU_PRIV)) 429 pte |= ARM_LPAE_PTE_AP_UNPRIV; 430 } else { 431 pte = ARM_LPAE_PTE_HAP_FAULT; 432 if (prot & IOMMU_READ) 433 pte |= ARM_LPAE_PTE_HAP_READ; 434 if (prot & IOMMU_WRITE) 435 pte |= ARM_LPAE_PTE_HAP_WRITE; 436 } 437 438 /* 439 * Note that this logic is structured to accommodate Mali LPAE 440 * having stage-1-like attributes but stage-2-like permissions. 441 */ 442 if (data->iop.fmt == ARM_64_LPAE_S2 || 443 data->iop.fmt == ARM_32_LPAE_S2) { 444 if (prot & IOMMU_MMIO) 445 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 446 else if (prot & IOMMU_CACHE) 447 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 448 else 449 pte |= ARM_LPAE_PTE_MEMATTR_NC; 450 } else { 451 if (prot & IOMMU_MMIO) 452 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 453 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 454 else if (prot & IOMMU_CACHE) 455 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 456 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 457 } 458 459 /* 460 * Also Mali has its own notions of shareability wherein its Inner 461 * domain covers the cores within the GPU, and its Outer domain is 462 * "outside the GPU" (i.e. either the Inner or System domain in CPU 463 * terms, depending on coherency). 464 */ 465 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) 466 pte |= ARM_LPAE_PTE_SH_IS; 467 else 468 pte |= ARM_LPAE_PTE_SH_OS; 469 470 if (prot & IOMMU_NOEXEC) 471 pte |= ARM_LPAE_PTE_XN; 472 473 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) 474 pte |= ARM_LPAE_PTE_NS; 475 476 if (data->iop.fmt != ARM_MALI_LPAE) 477 pte |= ARM_LPAE_PTE_AF; 478 479 return pte; 480 } 481 482 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, 483 phys_addr_t paddr, size_t pgsize, size_t pgcount, 484 int iommu_prot, gfp_t gfp, size_t *mapped) 485 { 486 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 487 struct io_pgtable_cfg *cfg = &data->iop.cfg; 488 arm_lpae_iopte *ptep = data->pgd; 489 int ret, lvl = data->start_level; 490 arm_lpae_iopte prot; 491 long iaext = (s64)iova >> cfg->ias; 492 493 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) 494 return -EINVAL; 495 496 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 497 iaext = ~iaext; 498 if (WARN_ON(iaext || paddr >> cfg->oas)) 499 return -ERANGE; 500 501 /* If no access, then nothing to do */ 502 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 503 return 0; 504 505 prot = arm_lpae_prot_to_pte(data, iommu_prot); 506 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, 507 ptep, gfp, mapped); 508 /* 509 * Synchronise all PTE updates for the new mapping before there's 510 * a chance for anything to kick off a table walk for the new iova. 511 */ 512 wmb(); 513 514 return ret; 515 } 516 517 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 518 arm_lpae_iopte *ptep) 519 { 520 arm_lpae_iopte *start, *end; 521 unsigned long table_size; 522 523 if (lvl == data->start_level) 524 table_size = ARM_LPAE_PGD_SIZE(data); 525 else 526 table_size = ARM_LPAE_GRANULE(data); 527 528 start = ptep; 529 530 /* Only leaf entries at the last level */ 531 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 532 end = ptep; 533 else 534 end = (void *)ptep + table_size; 535 536 while (ptep != end) { 537 arm_lpae_iopte pte = *ptep++; 538 539 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) 540 continue; 541 542 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 543 } 544 545 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie); 546 } 547 548 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 549 { 550 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 551 552 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); 553 kfree(data); 554 } 555 556 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 557 struct iommu_iotlb_gather *gather, 558 unsigned long iova, size_t size, 559 arm_lpae_iopte blk_pte, int lvl, 560 arm_lpae_iopte *ptep, size_t pgcount) 561 { 562 struct io_pgtable_cfg *cfg = &data->iop.cfg; 563 arm_lpae_iopte pte, *tablep; 564 phys_addr_t blk_paddr; 565 size_t tablesz = ARM_LPAE_GRANULE(data); 566 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 567 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); 568 int i, unmap_idx_start = -1, num_entries = 0, max_entries; 569 570 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 571 return 0; 572 573 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie); 574 if (!tablep) 575 return 0; /* Bytes unmapped */ 576 577 if (size == split_sz) { 578 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 579 max_entries = ptes_per_table - unmap_idx_start; 580 num_entries = min_t(int, pgcount, max_entries); 581 } 582 583 blk_paddr = iopte_to_paddr(blk_pte, data); 584 pte = iopte_prot(blk_pte); 585 586 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { 587 /* Unmap! */ 588 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) 589 continue; 590 591 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); 592 } 593 594 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); 595 if (pte != blk_pte) { 596 __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie); 597 /* 598 * We may race against someone unmapping another part of this 599 * block, but anything else is invalid. We can't misinterpret 600 * a page entry here since we're never at the last level. 601 */ 602 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) 603 return 0; 604 605 tablep = iopte_deref(pte, data); 606 } else if (unmap_idx_start >= 0) { 607 for (i = 0; i < num_entries; i++) 608 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); 609 610 return num_entries * size; 611 } 612 613 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); 614 } 615 616 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 617 struct iommu_iotlb_gather *gather, 618 unsigned long iova, size_t size, size_t pgcount, 619 int lvl, arm_lpae_iopte *ptep) 620 { 621 arm_lpae_iopte pte; 622 struct io_pgtable *iop = &data->iop; 623 int i = 0, num_entries, max_entries, unmap_idx_start; 624 625 /* Something went horribly wrong and we ran out of page table */ 626 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 627 return 0; 628 629 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 630 ptep += unmap_idx_start; 631 pte = READ_ONCE(*ptep); 632 if (WARN_ON(!pte)) 633 return 0; 634 635 /* If the size matches this level, we're in the right place */ 636 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { 637 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; 638 num_entries = min_t(int, pgcount, max_entries); 639 640 while (i < num_entries) { 641 pte = READ_ONCE(*ptep); 642 if (WARN_ON(!pte)) 643 break; 644 645 __arm_lpae_clear_pte(ptep, &iop->cfg); 646 647 if (!iopte_leaf(pte, lvl, iop->fmt)) { 648 /* Also flush any partial walks */ 649 io_pgtable_tlb_flush_walk(iop, iova + i * size, size, 650 ARM_LPAE_GRANULE(data)); 651 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 652 } else if (!iommu_iotlb_gather_queued(gather)) { 653 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); 654 } 655 656 ptep++; 657 i++; 658 } 659 660 return i * size; 661 } else if (iopte_leaf(pte, lvl, iop->fmt)) { 662 /* 663 * Insert a table at the next level to map the old region, 664 * minus the part we want to unmap 665 */ 666 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, 667 lvl + 1, ptep, pgcount); 668 } 669 670 /* Keep on walkin' */ 671 ptep = iopte_deref(pte, data); 672 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); 673 } 674 675 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, 676 size_t pgsize, size_t pgcount, 677 struct iommu_iotlb_gather *gather) 678 { 679 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 680 struct io_pgtable_cfg *cfg = &data->iop.cfg; 681 arm_lpae_iopte *ptep = data->pgd; 682 long iaext = (s64)iova >> cfg->ias; 683 684 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) 685 return 0; 686 687 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 688 iaext = ~iaext; 689 if (WARN_ON(iaext)) 690 return 0; 691 692 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, 693 data->start_level, ptep); 694 } 695 696 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 697 unsigned long iova) 698 { 699 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 700 arm_lpae_iopte pte, *ptep = data->pgd; 701 int lvl = data->start_level; 702 703 do { 704 /* Valid IOPTE pointer? */ 705 if (!ptep) 706 return 0; 707 708 /* Grab the IOPTE we're interested in */ 709 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 710 pte = READ_ONCE(*ptep); 711 712 /* Valid entry? */ 713 if (!pte) 714 return 0; 715 716 /* Leaf entry? */ 717 if (iopte_leaf(pte, lvl, data->iop.fmt)) 718 goto found_translation; 719 720 /* Take it to the next level */ 721 ptep = iopte_deref(pte, data); 722 } while (++lvl < ARM_LPAE_MAX_LEVELS); 723 724 /* Ran out of page tables to walk */ 725 return 0; 726 727 found_translation: 728 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 729 return iopte_to_paddr(pte, data) | iova; 730 } 731 732 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 733 { 734 unsigned long granule, page_sizes; 735 unsigned int max_addr_bits = 48; 736 737 /* 738 * We need to restrict the supported page sizes to match the 739 * translation regime for a particular granule. Aim to match 740 * the CPU page size if possible, otherwise prefer smaller sizes. 741 * While we're at it, restrict the block sizes to match the 742 * chosen granule. 743 */ 744 if (cfg->pgsize_bitmap & PAGE_SIZE) 745 granule = PAGE_SIZE; 746 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 747 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 748 else if (cfg->pgsize_bitmap & PAGE_MASK) 749 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 750 else 751 granule = 0; 752 753 switch (granule) { 754 case SZ_4K: 755 page_sizes = (SZ_4K | SZ_2M | SZ_1G); 756 break; 757 case SZ_16K: 758 page_sizes = (SZ_16K | SZ_32M); 759 break; 760 case SZ_64K: 761 max_addr_bits = 52; 762 page_sizes = (SZ_64K | SZ_512M); 763 if (cfg->oas > 48) 764 page_sizes |= 1ULL << 42; /* 4TB */ 765 break; 766 default: 767 page_sizes = 0; 768 } 769 770 cfg->pgsize_bitmap &= page_sizes; 771 cfg->ias = min(cfg->ias, max_addr_bits); 772 cfg->oas = min(cfg->oas, max_addr_bits); 773 } 774 775 static struct arm_lpae_io_pgtable * 776 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 777 { 778 struct arm_lpae_io_pgtable *data; 779 int levels, va_bits, pg_shift; 780 781 arm_lpae_restrict_pgsizes(cfg); 782 783 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 784 return NULL; 785 786 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 787 return NULL; 788 789 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 790 return NULL; 791 792 data = kmalloc(sizeof(*data), GFP_KERNEL); 793 if (!data) 794 return NULL; 795 796 pg_shift = __ffs(cfg->pgsize_bitmap); 797 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); 798 799 va_bits = cfg->ias - pg_shift; 800 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 801 data->start_level = ARM_LPAE_MAX_LEVELS - levels; 802 803 /* Calculate the actual size of our pgd (without concatenation) */ 804 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); 805 806 data->iop.ops = (struct io_pgtable_ops) { 807 .map_pages = arm_lpae_map_pages, 808 .unmap_pages = arm_lpae_unmap_pages, 809 .iova_to_phys = arm_lpae_iova_to_phys, 810 }; 811 812 return data; 813 } 814 815 static struct io_pgtable * 816 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 817 { 818 u64 reg; 819 struct arm_lpae_io_pgtable *data; 820 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; 821 bool tg1; 822 823 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | 824 IO_PGTABLE_QUIRK_ARM_TTBR1 | 825 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 826 return NULL; 827 828 data = arm_lpae_alloc_pgtable(cfg); 829 if (!data) 830 return NULL; 831 832 /* TCR */ 833 if (cfg->coherent_walk) { 834 tcr->sh = ARM_LPAE_TCR_SH_IS; 835 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 836 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 837 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA) 838 goto out_free_data; 839 } else { 840 tcr->sh = ARM_LPAE_TCR_SH_OS; 841 tcr->irgn = ARM_LPAE_TCR_RGN_NC; 842 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 843 tcr->orgn = ARM_LPAE_TCR_RGN_NC; 844 else 845 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 846 } 847 848 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; 849 switch (ARM_LPAE_GRANULE(data)) { 850 case SZ_4K: 851 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; 852 break; 853 case SZ_16K: 854 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; 855 break; 856 case SZ_64K: 857 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; 858 break; 859 } 860 861 switch (cfg->oas) { 862 case 32: 863 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; 864 break; 865 case 36: 866 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; 867 break; 868 case 40: 869 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; 870 break; 871 case 42: 872 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; 873 break; 874 case 44: 875 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; 876 break; 877 case 48: 878 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; 879 break; 880 case 52: 881 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; 882 break; 883 default: 884 goto out_free_data; 885 } 886 887 tcr->tsz = 64ULL - cfg->ias; 888 889 /* MAIRs */ 890 reg = (ARM_LPAE_MAIR_ATTR_NC 891 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 892 (ARM_LPAE_MAIR_ATTR_WBRWA 893 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 894 (ARM_LPAE_MAIR_ATTR_DEVICE 895 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | 896 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA 897 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); 898 899 cfg->arm_lpae_s1_cfg.mair = reg; 900 901 /* Looking good; allocate a pgd */ 902 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 903 GFP_KERNEL, cfg, cookie); 904 if (!data->pgd) 905 goto out_free_data; 906 907 /* Ensure the empty pgd is visible before any actual TTBR write */ 908 wmb(); 909 910 /* TTBR */ 911 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); 912 return &data->iop; 913 914 out_free_data: 915 kfree(data); 916 return NULL; 917 } 918 919 static struct io_pgtable * 920 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 921 { 922 u64 sl; 923 struct arm_lpae_io_pgtable *data; 924 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; 925 926 /* The NS quirk doesn't apply at stage 2 */ 927 if (cfg->quirks) 928 return NULL; 929 930 data = arm_lpae_alloc_pgtable(cfg); 931 if (!data) 932 return NULL; 933 934 /* 935 * Concatenate PGDs at level 1 if possible in order to reduce 936 * the depth of the stage-2 walk. 937 */ 938 if (data->start_level == 0) { 939 unsigned long pgd_pages; 940 941 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); 942 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 943 data->pgd_bits += data->bits_per_level; 944 data->start_level++; 945 } 946 } 947 948 /* VTCR */ 949 if (cfg->coherent_walk) { 950 vtcr->sh = ARM_LPAE_TCR_SH_IS; 951 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 952 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 953 } else { 954 vtcr->sh = ARM_LPAE_TCR_SH_OS; 955 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; 956 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; 957 } 958 959 sl = data->start_level; 960 961 switch (ARM_LPAE_GRANULE(data)) { 962 case SZ_4K: 963 vtcr->tg = ARM_LPAE_TCR_TG0_4K; 964 sl++; /* SL0 format is different for 4K granule size */ 965 break; 966 case SZ_16K: 967 vtcr->tg = ARM_LPAE_TCR_TG0_16K; 968 break; 969 case SZ_64K: 970 vtcr->tg = ARM_LPAE_TCR_TG0_64K; 971 break; 972 } 973 974 switch (cfg->oas) { 975 case 32: 976 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; 977 break; 978 case 36: 979 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; 980 break; 981 case 40: 982 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; 983 break; 984 case 42: 985 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; 986 break; 987 case 44: 988 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; 989 break; 990 case 48: 991 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; 992 break; 993 case 52: 994 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; 995 break; 996 default: 997 goto out_free_data; 998 } 999 1000 vtcr->tsz = 64ULL - cfg->ias; 1001 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; 1002 1003 /* Allocate pgd pages */ 1004 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 1005 GFP_KERNEL, cfg, cookie); 1006 if (!data->pgd) 1007 goto out_free_data; 1008 1009 /* Ensure the empty pgd is visible before any actual TTBR write */ 1010 wmb(); 1011 1012 /* VTTBR */ 1013 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 1014 return &data->iop; 1015 1016 out_free_data: 1017 kfree(data); 1018 return NULL; 1019 } 1020 1021 static struct io_pgtable * 1022 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 1023 { 1024 if (cfg->ias > 32 || cfg->oas > 40) 1025 return NULL; 1026 1027 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1028 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 1029 } 1030 1031 static struct io_pgtable * 1032 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 1033 { 1034 if (cfg->ias > 40 || cfg->oas > 40) 1035 return NULL; 1036 1037 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1038 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 1039 } 1040 1041 static struct io_pgtable * 1042 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 1043 { 1044 struct arm_lpae_io_pgtable *data; 1045 1046 /* No quirks for Mali (hopefully) */ 1047 if (cfg->quirks) 1048 return NULL; 1049 1050 if (cfg->ias > 48 || cfg->oas > 40) 1051 return NULL; 1052 1053 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1054 1055 data = arm_lpae_alloc_pgtable(cfg); 1056 if (!data) 1057 return NULL; 1058 1059 /* Mali seems to need a full 4-level table regardless of IAS */ 1060 if (data->start_level > 0) { 1061 data->start_level = 0; 1062 data->pgd_bits = 0; 1063 } 1064 /* 1065 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the 1066 * best we can do is mimic the out-of-tree driver and hope that the 1067 * "implementation-defined caching policy" is good enough. Similarly, 1068 * we'll use it for the sake of a valid attribute for our 'device' 1069 * index, although callers should never request that in practice. 1070 */ 1071 cfg->arm_mali_lpae_cfg.memattr = 1072 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1073 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 1074 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 1075 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 1076 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1077 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 1078 1079 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, 1080 cfg, cookie); 1081 if (!data->pgd) 1082 goto out_free_data; 1083 1084 /* Ensure the empty pgd is visible before TRANSTAB can be written */ 1085 wmb(); 1086 1087 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | 1088 ARM_MALI_LPAE_TTBR_READ_INNER | 1089 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; 1090 if (cfg->coherent_walk) 1091 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; 1092 1093 return &data->iop; 1094 1095 out_free_data: 1096 kfree(data); 1097 return NULL; 1098 } 1099 1100 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 1101 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1102 .alloc = arm_64_lpae_alloc_pgtable_s1, 1103 .free = arm_lpae_free_pgtable, 1104 }; 1105 1106 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 1107 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1108 .alloc = arm_64_lpae_alloc_pgtable_s2, 1109 .free = arm_lpae_free_pgtable, 1110 }; 1111 1112 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 1113 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1114 .alloc = arm_32_lpae_alloc_pgtable_s1, 1115 .free = arm_lpae_free_pgtable, 1116 }; 1117 1118 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 1119 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1120 .alloc = arm_32_lpae_alloc_pgtable_s2, 1121 .free = arm_lpae_free_pgtable, 1122 }; 1123 1124 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { 1125 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1126 .alloc = arm_mali_lpae_alloc_pgtable, 1127 .free = arm_lpae_free_pgtable, 1128 }; 1129 1130 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 1131 1132 static struct io_pgtable_cfg *cfg_cookie __initdata; 1133 1134 static void __init dummy_tlb_flush_all(void *cookie) 1135 { 1136 WARN_ON(cookie != cfg_cookie); 1137 } 1138 1139 static void __init dummy_tlb_flush(unsigned long iova, size_t size, 1140 size_t granule, void *cookie) 1141 { 1142 WARN_ON(cookie != cfg_cookie); 1143 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 1144 } 1145 1146 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, 1147 unsigned long iova, size_t granule, 1148 void *cookie) 1149 { 1150 dummy_tlb_flush(iova, granule, granule, cookie); 1151 } 1152 1153 static const struct iommu_flush_ops dummy_tlb_ops __initconst = { 1154 .tlb_flush_all = dummy_tlb_flush_all, 1155 .tlb_flush_walk = dummy_tlb_flush, 1156 .tlb_add_page = dummy_tlb_add_page, 1157 }; 1158 1159 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 1160 { 1161 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 1162 struct io_pgtable_cfg *cfg = &data->iop.cfg; 1163 1164 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 1165 cfg->pgsize_bitmap, cfg->ias); 1166 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", 1167 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), 1168 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); 1169 } 1170 1171 #define __FAIL(ops, i) ({ \ 1172 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 1173 arm_lpae_dump_ops(ops); \ 1174 selftest_running = false; \ 1175 -EFAULT; \ 1176 }) 1177 1178 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 1179 { 1180 static const enum io_pgtable_fmt fmts[] __initconst = { 1181 ARM_64_LPAE_S1, 1182 ARM_64_LPAE_S2, 1183 }; 1184 1185 int i, j; 1186 unsigned long iova; 1187 size_t size, mapped; 1188 struct io_pgtable_ops *ops; 1189 1190 selftest_running = true; 1191 1192 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 1193 cfg_cookie = cfg; 1194 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 1195 if (!ops) { 1196 pr_err("selftest: failed to allocate io pgtable ops\n"); 1197 return -ENOMEM; 1198 } 1199 1200 /* 1201 * Initial sanity checks. 1202 * Empty page tables shouldn't provide any translations. 1203 */ 1204 if (ops->iova_to_phys(ops, 42)) 1205 return __FAIL(ops, i); 1206 1207 if (ops->iova_to_phys(ops, SZ_1G + 42)) 1208 return __FAIL(ops, i); 1209 1210 if (ops->iova_to_phys(ops, SZ_2G + 42)) 1211 return __FAIL(ops, i); 1212 1213 /* 1214 * Distinct mappings of different granule sizes. 1215 */ 1216 iova = 0; 1217 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1218 size = 1UL << j; 1219 1220 if (ops->map_pages(ops, iova, iova, size, 1, 1221 IOMMU_READ | IOMMU_WRITE | 1222 IOMMU_NOEXEC | IOMMU_CACHE, 1223 GFP_KERNEL, &mapped)) 1224 return __FAIL(ops, i); 1225 1226 /* Overlapping mappings */ 1227 if (!ops->map_pages(ops, iova, iova + size, size, 1, 1228 IOMMU_READ | IOMMU_NOEXEC, 1229 GFP_KERNEL, &mapped)) 1230 return __FAIL(ops, i); 1231 1232 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1233 return __FAIL(ops, i); 1234 1235 iova += SZ_1G; 1236 } 1237 1238 /* Partial unmap */ 1239 size = 1UL << __ffs(cfg->pgsize_bitmap); 1240 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) 1241 return __FAIL(ops, i); 1242 1243 /* Remap of partial unmap */ 1244 if (ops->map_pages(ops, SZ_1G + size, size, size, 1, 1245 IOMMU_READ, GFP_KERNEL, &mapped)) 1246 return __FAIL(ops, i); 1247 1248 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1249 return __FAIL(ops, i); 1250 1251 /* Full unmap */ 1252 iova = 0; 1253 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1254 size = 1UL << j; 1255 1256 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) 1257 return __FAIL(ops, i); 1258 1259 if (ops->iova_to_phys(ops, iova + 42)) 1260 return __FAIL(ops, i); 1261 1262 /* Remap full block */ 1263 if (ops->map_pages(ops, iova, iova, size, 1, 1264 IOMMU_WRITE, GFP_KERNEL, &mapped)) 1265 return __FAIL(ops, i); 1266 1267 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1268 return __FAIL(ops, i); 1269 1270 iova += SZ_1G; 1271 } 1272 1273 free_io_pgtable_ops(ops); 1274 } 1275 1276 selftest_running = false; 1277 return 0; 1278 } 1279 1280 static int __init arm_lpae_do_selftests(void) 1281 { 1282 static const unsigned long pgsize[] __initconst = { 1283 SZ_4K | SZ_2M | SZ_1G, 1284 SZ_16K | SZ_32M, 1285 SZ_64K | SZ_512M, 1286 }; 1287 1288 static const unsigned int ias[] __initconst = { 1289 32, 36, 40, 42, 44, 48, 1290 }; 1291 1292 int i, j, pass = 0, fail = 0; 1293 struct device dev; 1294 struct io_pgtable_cfg cfg = { 1295 .tlb = &dummy_tlb_ops, 1296 .oas = 48, 1297 .coherent_walk = true, 1298 .iommu_dev = &dev, 1299 }; 1300 1301 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */ 1302 set_dev_node(&dev, NUMA_NO_NODE); 1303 1304 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1305 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1306 cfg.pgsize_bitmap = pgsize[i]; 1307 cfg.ias = ias[j]; 1308 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1309 pgsize[i], ias[j]); 1310 if (arm_lpae_run_tests(&cfg)) 1311 fail++; 1312 else 1313 pass++; 1314 } 1315 } 1316 1317 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1318 return fail ? -EFAULT : 0; 1319 } 1320 subsys_initcall(arm_lpae_do_selftests); 1321 #endif 1322