1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPU-agnostic ARM page table allocator. 4 * 5 * Copyright (C) 2014 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 11 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 14 #include <linux/io-pgtable.h> 15 #include <linux/kernel.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/dma-mapping.h> 20 21 #include <asm/barrier.h> 22 23 #include "io-pgtable-arm.h" 24 #include "iommu-pages.h" 25 26 #define ARM_LPAE_MAX_ADDR_BITS 52 27 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 28 #define ARM_LPAE_MAX_LEVELS 4 29 30 /* Struct accessors */ 31 #define io_pgtable_to_data(x) \ 32 container_of((x), struct arm_lpae_io_pgtable, iop) 33 34 #define io_pgtable_ops_to_data(x) \ 35 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 36 37 /* 38 * Calculate the right shift amount to get to the portion describing level l 39 * in a virtual address mapped by the pagetable in d. 40 */ 41 #define ARM_LPAE_LVL_SHIFT(l,d) \ 42 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ 43 ilog2(sizeof(arm_lpae_iopte))) 44 45 #define ARM_LPAE_GRANULE(d) \ 46 (sizeof(arm_lpae_iopte) << (d)->bits_per_level) 47 #define ARM_LPAE_PGD_SIZE(d) \ 48 (sizeof(arm_lpae_iopte) << (d)->pgd_bits) 49 50 #define ARM_LPAE_PTES_PER_TABLE(d) \ 51 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte))) 52 53 /* 54 * Calculate the index at level l used to map virtual address a using the 55 * pagetable in d. 56 */ 57 #define ARM_LPAE_PGD_IDX(l,d) \ 58 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) 59 60 #define ARM_LPAE_LVL_IDX(a,l,d) \ 61 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 62 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 63 64 /* Calculate the block/page mapping size at level l for pagetable in d. */ 65 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) 66 67 /* Page table bits */ 68 #define ARM_LPAE_PTE_TYPE_SHIFT 0 69 #define ARM_LPAE_PTE_TYPE_MASK 0x3 70 71 #define ARM_LPAE_PTE_TYPE_BLOCK 1 72 #define ARM_LPAE_PTE_TYPE_TABLE 3 73 #define ARM_LPAE_PTE_TYPE_PAGE 3 74 75 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) 76 77 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 78 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 79 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 80 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 81 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 82 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 83 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 84 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 85 86 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 87 /* Ignore the contiguous bit for block splitting */ 88 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 89 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 90 ARM_LPAE_PTE_ATTR_HI_MASK) 91 /* Software bit for solving coherency races */ 92 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) 93 94 /* Stage-1 PTE */ 95 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 96 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 97 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 98 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 99 100 /* Stage-2 PTE */ 101 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 102 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 103 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 104 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 105 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 106 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 107 108 /* Register bits */ 109 #define ARM_LPAE_VTCR_SL0_MASK 0x3 110 111 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 112 113 #define ARM_LPAE_VTCR_PS_SHIFT 16 114 #define ARM_LPAE_VTCR_PS_MASK 0x7 115 116 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 117 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 118 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 119 #define ARM_LPAE_MAIR_ATTR_NC 0x44 120 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4 121 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 122 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 123 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 124 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 125 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3 126 127 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) 128 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) 129 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) 130 131 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL 132 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL 133 134 /* IOPTE accessors */ 135 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) 136 137 #define iopte_type(pte) \ 138 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 139 140 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 141 142 struct arm_lpae_io_pgtable { 143 struct io_pgtable iop; 144 145 int pgd_bits; 146 int start_level; 147 int bits_per_level; 148 149 void *pgd; 150 }; 151 152 typedef u64 arm_lpae_iopte; 153 154 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, 155 enum io_pgtable_fmt fmt) 156 { 157 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) 158 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE; 159 160 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; 161 } 162 163 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, 164 struct arm_lpae_io_pgtable *data) 165 { 166 arm_lpae_iopte pte = paddr; 167 168 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ 169 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; 170 } 171 172 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, 173 struct arm_lpae_io_pgtable *data) 174 { 175 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; 176 177 if (ARM_LPAE_GRANULE(data) < SZ_64K) 178 return paddr; 179 180 /* Rotate the packed high-order bits back to the top */ 181 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); 182 } 183 184 static bool selftest_running = false; 185 186 static dma_addr_t __arm_lpae_dma_addr(void *pages) 187 { 188 return (dma_addr_t)virt_to_phys(pages); 189 } 190 191 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 192 struct io_pgtable_cfg *cfg, 193 void *cookie) 194 { 195 struct device *dev = cfg->iommu_dev; 196 int order = get_order(size); 197 dma_addr_t dma; 198 void *pages; 199 200 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 201 202 if (cfg->alloc) 203 pages = cfg->alloc(cookie, size, gfp); 204 else 205 pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order); 206 207 if (!pages) 208 return NULL; 209 210 if (!cfg->coherent_walk) { 211 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 212 if (dma_mapping_error(dev, dma)) 213 goto out_free; 214 /* 215 * We depend on the IOMMU being able to work with any physical 216 * address directly, so if the DMA layer suggests otherwise by 217 * translating or truncating them, that bodes very badly... 218 */ 219 if (dma != virt_to_phys(pages)) 220 goto out_unmap; 221 } 222 223 return pages; 224 225 out_unmap: 226 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 227 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 228 229 out_free: 230 if (cfg->free) 231 cfg->free(cookie, pages, size); 232 else 233 iommu_free_pages(pages, order); 234 235 return NULL; 236 } 237 238 static void __arm_lpae_free_pages(void *pages, size_t size, 239 struct io_pgtable_cfg *cfg, 240 void *cookie) 241 { 242 if (!cfg->coherent_walk) 243 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 244 size, DMA_TO_DEVICE); 245 246 if (cfg->free) 247 cfg->free(cookie, pages, size); 248 else 249 iommu_free_pages(pages, get_order(size)); 250 } 251 252 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, 253 struct io_pgtable_cfg *cfg) 254 { 255 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), 256 sizeof(*ptep) * num_entries, DMA_TO_DEVICE); 257 } 258 259 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) 260 { 261 262 *ptep = 0; 263 264 if (!cfg->coherent_walk) 265 __arm_lpae_sync_pte(ptep, 1, cfg); 266 } 267 268 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 269 struct iommu_iotlb_gather *gather, 270 unsigned long iova, size_t size, size_t pgcount, 271 int lvl, arm_lpae_iopte *ptep); 272 273 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 274 phys_addr_t paddr, arm_lpae_iopte prot, 275 int lvl, int num_entries, arm_lpae_iopte *ptep) 276 { 277 arm_lpae_iopte pte = prot; 278 struct io_pgtable_cfg *cfg = &data->iop.cfg; 279 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 280 int i; 281 282 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) 283 pte |= ARM_LPAE_PTE_TYPE_PAGE; 284 else 285 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 286 287 for (i = 0; i < num_entries; i++) 288 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); 289 290 if (!cfg->coherent_walk) 291 __arm_lpae_sync_pte(ptep, num_entries, cfg); 292 } 293 294 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 295 unsigned long iova, phys_addr_t paddr, 296 arm_lpae_iopte prot, int lvl, int num_entries, 297 arm_lpae_iopte *ptep) 298 { 299 int i; 300 301 for (i = 0; i < num_entries; i++) 302 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { 303 /* We require an unmap first */ 304 WARN_ON(!selftest_running); 305 return -EEXIST; 306 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { 307 /* 308 * We need to unmap and free the old table before 309 * overwriting it with a block entry. 310 */ 311 arm_lpae_iopte *tblp; 312 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 313 314 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 315 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1, 316 lvl, tblp) != sz) { 317 WARN_ON(1); 318 return -EINVAL; 319 } 320 } 321 322 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep); 323 return 0; 324 } 325 326 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, 327 arm_lpae_iopte *ptep, 328 arm_lpae_iopte curr, 329 struct arm_lpae_io_pgtable *data) 330 { 331 arm_lpae_iopte old, new; 332 struct io_pgtable_cfg *cfg = &data->iop.cfg; 333 334 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; 335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 336 new |= ARM_LPAE_PTE_NSTABLE; 337 338 /* 339 * Ensure the table itself is visible before its PTE can be. 340 * Whilst we could get away with cmpxchg64_release below, this 341 * doesn't have any ordering semantics when !CONFIG_SMP. 342 */ 343 dma_wmb(); 344 345 old = cmpxchg64_relaxed(ptep, curr, new); 346 347 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) 348 return old; 349 350 /* Even if it's not ours, there's no point waiting; just kick it */ 351 __arm_lpae_sync_pte(ptep, 1, cfg); 352 if (old == curr) 353 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); 354 355 return old; 356 } 357 358 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 359 phys_addr_t paddr, size_t size, size_t pgcount, 360 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep, 361 gfp_t gfp, size_t *mapped) 362 { 363 arm_lpae_iopte *cptep, pte; 364 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 365 size_t tblsz = ARM_LPAE_GRANULE(data); 366 struct io_pgtable_cfg *cfg = &data->iop.cfg; 367 int ret = 0, num_entries, max_entries, map_idx_start; 368 369 /* Find our entry at the current level */ 370 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 371 ptep += map_idx_start; 372 373 /* If we can install a leaf entry at this level, then do so */ 374 if (size == block_size) { 375 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; 376 num_entries = min_t(int, pgcount, max_entries); 377 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); 378 if (!ret) 379 *mapped += num_entries * size; 380 381 return ret; 382 } 383 384 /* We can't allocate tables at the final level */ 385 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 386 return -EINVAL; 387 388 /* Grab a pointer to the next level */ 389 pte = READ_ONCE(*ptep); 390 if (!pte) { 391 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie); 392 if (!cptep) 393 return -ENOMEM; 394 395 pte = arm_lpae_install_table(cptep, ptep, 0, data); 396 if (pte) 397 __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie); 398 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { 399 __arm_lpae_sync_pte(ptep, 1, cfg); 400 } 401 402 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { 403 cptep = iopte_deref(pte, data); 404 } else if (pte) { 405 /* We require an unmap first */ 406 WARN_ON(!selftest_running); 407 return -EEXIST; 408 } 409 410 /* Rinse, repeat */ 411 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, 412 cptep, gfp, mapped); 413 } 414 415 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 416 int prot) 417 { 418 arm_lpae_iopte pte; 419 420 if (data->iop.fmt == ARM_64_LPAE_S1 || 421 data->iop.fmt == ARM_32_LPAE_S1) { 422 pte = ARM_LPAE_PTE_nG; 423 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 424 pte |= ARM_LPAE_PTE_AP_RDONLY; 425 if (!(prot & IOMMU_PRIV)) 426 pte |= ARM_LPAE_PTE_AP_UNPRIV; 427 } else { 428 pte = ARM_LPAE_PTE_HAP_FAULT; 429 if (prot & IOMMU_READ) 430 pte |= ARM_LPAE_PTE_HAP_READ; 431 if (prot & IOMMU_WRITE) 432 pte |= ARM_LPAE_PTE_HAP_WRITE; 433 } 434 435 /* 436 * Note that this logic is structured to accommodate Mali LPAE 437 * having stage-1-like attributes but stage-2-like permissions. 438 */ 439 if (data->iop.fmt == ARM_64_LPAE_S2 || 440 data->iop.fmt == ARM_32_LPAE_S2) { 441 if (prot & IOMMU_MMIO) 442 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 443 else if (prot & IOMMU_CACHE) 444 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 445 else 446 pte |= ARM_LPAE_PTE_MEMATTR_NC; 447 } else { 448 if (prot & IOMMU_MMIO) 449 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 450 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 451 else if (prot & IOMMU_CACHE) 452 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 453 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 454 } 455 456 /* 457 * Also Mali has its own notions of shareability wherein its Inner 458 * domain covers the cores within the GPU, and its Outer domain is 459 * "outside the GPU" (i.e. either the Inner or System domain in CPU 460 * terms, depending on coherency). 461 */ 462 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) 463 pte |= ARM_LPAE_PTE_SH_IS; 464 else 465 pte |= ARM_LPAE_PTE_SH_OS; 466 467 if (prot & IOMMU_NOEXEC) 468 pte |= ARM_LPAE_PTE_XN; 469 470 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) 471 pte |= ARM_LPAE_PTE_NS; 472 473 if (data->iop.fmt != ARM_MALI_LPAE) 474 pte |= ARM_LPAE_PTE_AF; 475 476 return pte; 477 } 478 479 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, 480 phys_addr_t paddr, size_t pgsize, size_t pgcount, 481 int iommu_prot, gfp_t gfp, size_t *mapped) 482 { 483 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 484 struct io_pgtable_cfg *cfg = &data->iop.cfg; 485 arm_lpae_iopte *ptep = data->pgd; 486 int ret, lvl = data->start_level; 487 arm_lpae_iopte prot; 488 long iaext = (s64)iova >> cfg->ias; 489 490 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) 491 return -EINVAL; 492 493 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 494 iaext = ~iaext; 495 if (WARN_ON(iaext || paddr >> cfg->oas)) 496 return -ERANGE; 497 498 /* If no access, then nothing to do */ 499 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 500 return 0; 501 502 prot = arm_lpae_prot_to_pte(data, iommu_prot); 503 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, 504 ptep, gfp, mapped); 505 /* 506 * Synchronise all PTE updates for the new mapping before there's 507 * a chance for anything to kick off a table walk for the new iova. 508 */ 509 wmb(); 510 511 return ret; 512 } 513 514 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 515 arm_lpae_iopte *ptep) 516 { 517 arm_lpae_iopte *start, *end; 518 unsigned long table_size; 519 520 if (lvl == data->start_level) 521 table_size = ARM_LPAE_PGD_SIZE(data); 522 else 523 table_size = ARM_LPAE_GRANULE(data); 524 525 start = ptep; 526 527 /* Only leaf entries at the last level */ 528 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 529 end = ptep; 530 else 531 end = (void *)ptep + table_size; 532 533 while (ptep != end) { 534 arm_lpae_iopte pte = *ptep++; 535 536 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) 537 continue; 538 539 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 540 } 541 542 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie); 543 } 544 545 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 546 { 547 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 548 549 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); 550 kfree(data); 551 } 552 553 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 554 struct iommu_iotlb_gather *gather, 555 unsigned long iova, size_t size, 556 arm_lpae_iopte blk_pte, int lvl, 557 arm_lpae_iopte *ptep, size_t pgcount) 558 { 559 struct io_pgtable_cfg *cfg = &data->iop.cfg; 560 arm_lpae_iopte pte, *tablep; 561 phys_addr_t blk_paddr; 562 size_t tablesz = ARM_LPAE_GRANULE(data); 563 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 564 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); 565 int i, unmap_idx_start = -1, num_entries = 0, max_entries; 566 567 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 568 return 0; 569 570 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie); 571 if (!tablep) 572 return 0; /* Bytes unmapped */ 573 574 if (size == split_sz) { 575 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 576 max_entries = ptes_per_table - unmap_idx_start; 577 num_entries = min_t(int, pgcount, max_entries); 578 } 579 580 blk_paddr = iopte_to_paddr(blk_pte, data); 581 pte = iopte_prot(blk_pte); 582 583 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { 584 /* Unmap! */ 585 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) 586 continue; 587 588 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); 589 } 590 591 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); 592 if (pte != blk_pte) { 593 __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie); 594 /* 595 * We may race against someone unmapping another part of this 596 * block, but anything else is invalid. We can't misinterpret 597 * a page entry here since we're never at the last level. 598 */ 599 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) 600 return 0; 601 602 tablep = iopte_deref(pte, data); 603 } else if (unmap_idx_start >= 0) { 604 for (i = 0; i < num_entries; i++) 605 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); 606 607 return num_entries * size; 608 } 609 610 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); 611 } 612 613 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 614 struct iommu_iotlb_gather *gather, 615 unsigned long iova, size_t size, size_t pgcount, 616 int lvl, arm_lpae_iopte *ptep) 617 { 618 arm_lpae_iopte pte; 619 struct io_pgtable *iop = &data->iop; 620 int i = 0, num_entries, max_entries, unmap_idx_start; 621 622 /* Something went horribly wrong and we ran out of page table */ 623 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 624 return 0; 625 626 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 627 ptep += unmap_idx_start; 628 pte = READ_ONCE(*ptep); 629 if (WARN_ON(!pte)) 630 return 0; 631 632 /* If the size matches this level, we're in the right place */ 633 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { 634 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; 635 num_entries = min_t(int, pgcount, max_entries); 636 637 while (i < num_entries) { 638 pte = READ_ONCE(*ptep); 639 if (WARN_ON(!pte)) 640 break; 641 642 __arm_lpae_clear_pte(ptep, &iop->cfg); 643 644 if (!iopte_leaf(pte, lvl, iop->fmt)) { 645 /* Also flush any partial walks */ 646 io_pgtable_tlb_flush_walk(iop, iova + i * size, size, 647 ARM_LPAE_GRANULE(data)); 648 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 649 } else if (!iommu_iotlb_gather_queued(gather)) { 650 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); 651 } 652 653 ptep++; 654 i++; 655 } 656 657 return i * size; 658 } else if (iopte_leaf(pte, lvl, iop->fmt)) { 659 /* 660 * Insert a table at the next level to map the old region, 661 * minus the part we want to unmap 662 */ 663 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, 664 lvl + 1, ptep, pgcount); 665 } 666 667 /* Keep on walkin' */ 668 ptep = iopte_deref(pte, data); 669 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); 670 } 671 672 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, 673 size_t pgsize, size_t pgcount, 674 struct iommu_iotlb_gather *gather) 675 { 676 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 677 struct io_pgtable_cfg *cfg = &data->iop.cfg; 678 arm_lpae_iopte *ptep = data->pgd; 679 long iaext = (s64)iova >> cfg->ias; 680 681 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) 682 return 0; 683 684 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 685 iaext = ~iaext; 686 if (WARN_ON(iaext)) 687 return 0; 688 689 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, 690 data->start_level, ptep); 691 } 692 693 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 694 unsigned long iova) 695 { 696 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 697 arm_lpae_iopte pte, *ptep = data->pgd; 698 int lvl = data->start_level; 699 700 do { 701 /* Valid IOPTE pointer? */ 702 if (!ptep) 703 return 0; 704 705 /* Grab the IOPTE we're interested in */ 706 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 707 pte = READ_ONCE(*ptep); 708 709 /* Valid entry? */ 710 if (!pte) 711 return 0; 712 713 /* Leaf entry? */ 714 if (iopte_leaf(pte, lvl, data->iop.fmt)) 715 goto found_translation; 716 717 /* Take it to the next level */ 718 ptep = iopte_deref(pte, data); 719 } while (++lvl < ARM_LPAE_MAX_LEVELS); 720 721 /* Ran out of page tables to walk */ 722 return 0; 723 724 found_translation: 725 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 726 return iopte_to_paddr(pte, data) | iova; 727 } 728 729 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 730 { 731 unsigned long granule, page_sizes; 732 unsigned int max_addr_bits = 48; 733 734 /* 735 * We need to restrict the supported page sizes to match the 736 * translation regime for a particular granule. Aim to match 737 * the CPU page size if possible, otherwise prefer smaller sizes. 738 * While we're at it, restrict the block sizes to match the 739 * chosen granule. 740 */ 741 if (cfg->pgsize_bitmap & PAGE_SIZE) 742 granule = PAGE_SIZE; 743 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 744 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 745 else if (cfg->pgsize_bitmap & PAGE_MASK) 746 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 747 else 748 granule = 0; 749 750 switch (granule) { 751 case SZ_4K: 752 page_sizes = (SZ_4K | SZ_2M | SZ_1G); 753 break; 754 case SZ_16K: 755 page_sizes = (SZ_16K | SZ_32M); 756 break; 757 case SZ_64K: 758 max_addr_bits = 52; 759 page_sizes = (SZ_64K | SZ_512M); 760 if (cfg->oas > 48) 761 page_sizes |= 1ULL << 42; /* 4TB */ 762 break; 763 default: 764 page_sizes = 0; 765 } 766 767 cfg->pgsize_bitmap &= page_sizes; 768 cfg->ias = min(cfg->ias, max_addr_bits); 769 cfg->oas = min(cfg->oas, max_addr_bits); 770 } 771 772 static struct arm_lpae_io_pgtable * 773 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 774 { 775 struct arm_lpae_io_pgtable *data; 776 int levels, va_bits, pg_shift; 777 778 arm_lpae_restrict_pgsizes(cfg); 779 780 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 781 return NULL; 782 783 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 784 return NULL; 785 786 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 787 return NULL; 788 789 data = kmalloc(sizeof(*data), GFP_KERNEL); 790 if (!data) 791 return NULL; 792 793 pg_shift = __ffs(cfg->pgsize_bitmap); 794 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); 795 796 va_bits = cfg->ias - pg_shift; 797 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 798 data->start_level = ARM_LPAE_MAX_LEVELS - levels; 799 800 /* Calculate the actual size of our pgd (without concatenation) */ 801 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); 802 803 data->iop.ops = (struct io_pgtable_ops) { 804 .map_pages = arm_lpae_map_pages, 805 .unmap_pages = arm_lpae_unmap_pages, 806 .iova_to_phys = arm_lpae_iova_to_phys, 807 }; 808 809 return data; 810 } 811 812 static struct io_pgtable * 813 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 814 { 815 u64 reg; 816 struct arm_lpae_io_pgtable *data; 817 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; 818 bool tg1; 819 820 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | 821 IO_PGTABLE_QUIRK_ARM_TTBR1 | 822 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 823 return NULL; 824 825 data = arm_lpae_alloc_pgtable(cfg); 826 if (!data) 827 return NULL; 828 829 /* TCR */ 830 if (cfg->coherent_walk) { 831 tcr->sh = ARM_LPAE_TCR_SH_IS; 832 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 833 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 834 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA) 835 goto out_free_data; 836 } else { 837 tcr->sh = ARM_LPAE_TCR_SH_OS; 838 tcr->irgn = ARM_LPAE_TCR_RGN_NC; 839 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 840 tcr->orgn = ARM_LPAE_TCR_RGN_NC; 841 else 842 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 843 } 844 845 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; 846 switch (ARM_LPAE_GRANULE(data)) { 847 case SZ_4K: 848 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; 849 break; 850 case SZ_16K: 851 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; 852 break; 853 case SZ_64K: 854 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; 855 break; 856 } 857 858 switch (cfg->oas) { 859 case 32: 860 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; 861 break; 862 case 36: 863 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; 864 break; 865 case 40: 866 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; 867 break; 868 case 42: 869 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; 870 break; 871 case 44: 872 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; 873 break; 874 case 48: 875 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; 876 break; 877 case 52: 878 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; 879 break; 880 default: 881 goto out_free_data; 882 } 883 884 tcr->tsz = 64ULL - cfg->ias; 885 886 /* MAIRs */ 887 reg = (ARM_LPAE_MAIR_ATTR_NC 888 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 889 (ARM_LPAE_MAIR_ATTR_WBRWA 890 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 891 (ARM_LPAE_MAIR_ATTR_DEVICE 892 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | 893 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA 894 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); 895 896 cfg->arm_lpae_s1_cfg.mair = reg; 897 898 /* Looking good; allocate a pgd */ 899 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 900 GFP_KERNEL, cfg, cookie); 901 if (!data->pgd) 902 goto out_free_data; 903 904 /* Ensure the empty pgd is visible before any actual TTBR write */ 905 wmb(); 906 907 /* TTBR */ 908 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); 909 return &data->iop; 910 911 out_free_data: 912 kfree(data); 913 return NULL; 914 } 915 916 static struct io_pgtable * 917 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 918 { 919 u64 sl; 920 struct arm_lpae_io_pgtable *data; 921 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; 922 923 /* The NS quirk doesn't apply at stage 2 */ 924 if (cfg->quirks) 925 return NULL; 926 927 data = arm_lpae_alloc_pgtable(cfg); 928 if (!data) 929 return NULL; 930 931 /* 932 * Concatenate PGDs at level 1 if possible in order to reduce 933 * the depth of the stage-2 walk. 934 */ 935 if (data->start_level == 0) { 936 unsigned long pgd_pages; 937 938 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); 939 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 940 data->pgd_bits += data->bits_per_level; 941 data->start_level++; 942 } 943 } 944 945 /* VTCR */ 946 if (cfg->coherent_walk) { 947 vtcr->sh = ARM_LPAE_TCR_SH_IS; 948 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 949 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 950 } else { 951 vtcr->sh = ARM_LPAE_TCR_SH_OS; 952 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; 953 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; 954 } 955 956 sl = data->start_level; 957 958 switch (ARM_LPAE_GRANULE(data)) { 959 case SZ_4K: 960 vtcr->tg = ARM_LPAE_TCR_TG0_4K; 961 sl++; /* SL0 format is different for 4K granule size */ 962 break; 963 case SZ_16K: 964 vtcr->tg = ARM_LPAE_TCR_TG0_16K; 965 break; 966 case SZ_64K: 967 vtcr->tg = ARM_LPAE_TCR_TG0_64K; 968 break; 969 } 970 971 switch (cfg->oas) { 972 case 32: 973 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; 974 break; 975 case 36: 976 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; 977 break; 978 case 40: 979 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; 980 break; 981 case 42: 982 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; 983 break; 984 case 44: 985 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; 986 break; 987 case 48: 988 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; 989 break; 990 case 52: 991 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; 992 break; 993 default: 994 goto out_free_data; 995 } 996 997 vtcr->tsz = 64ULL - cfg->ias; 998 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; 999 1000 /* Allocate pgd pages */ 1001 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 1002 GFP_KERNEL, cfg, cookie); 1003 if (!data->pgd) 1004 goto out_free_data; 1005 1006 /* Ensure the empty pgd is visible before any actual TTBR write */ 1007 wmb(); 1008 1009 /* VTTBR */ 1010 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 1011 return &data->iop; 1012 1013 out_free_data: 1014 kfree(data); 1015 return NULL; 1016 } 1017 1018 static struct io_pgtable * 1019 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 1020 { 1021 if (cfg->ias > 32 || cfg->oas > 40) 1022 return NULL; 1023 1024 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1025 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 1026 } 1027 1028 static struct io_pgtable * 1029 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 1030 { 1031 if (cfg->ias > 40 || cfg->oas > 40) 1032 return NULL; 1033 1034 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1035 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 1036 } 1037 1038 static struct io_pgtable * 1039 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 1040 { 1041 struct arm_lpae_io_pgtable *data; 1042 1043 /* No quirks for Mali (hopefully) */ 1044 if (cfg->quirks) 1045 return NULL; 1046 1047 if (cfg->ias > 48 || cfg->oas > 40) 1048 return NULL; 1049 1050 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1051 1052 data = arm_lpae_alloc_pgtable(cfg); 1053 if (!data) 1054 return NULL; 1055 1056 /* Mali seems to need a full 4-level table regardless of IAS */ 1057 if (data->start_level > 0) { 1058 data->start_level = 0; 1059 data->pgd_bits = 0; 1060 } 1061 /* 1062 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the 1063 * best we can do is mimic the out-of-tree driver and hope that the 1064 * "implementation-defined caching policy" is good enough. Similarly, 1065 * we'll use it for the sake of a valid attribute for our 'device' 1066 * index, although callers should never request that in practice. 1067 */ 1068 cfg->arm_mali_lpae_cfg.memattr = 1069 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1070 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 1071 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 1072 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 1073 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1074 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 1075 1076 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, 1077 cfg, cookie); 1078 if (!data->pgd) 1079 goto out_free_data; 1080 1081 /* Ensure the empty pgd is visible before TRANSTAB can be written */ 1082 wmb(); 1083 1084 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | 1085 ARM_MALI_LPAE_TTBR_READ_INNER | 1086 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; 1087 if (cfg->coherent_walk) 1088 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; 1089 1090 return &data->iop; 1091 1092 out_free_data: 1093 kfree(data); 1094 return NULL; 1095 } 1096 1097 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 1098 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1099 .alloc = arm_64_lpae_alloc_pgtable_s1, 1100 .free = arm_lpae_free_pgtable, 1101 }; 1102 1103 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 1104 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1105 .alloc = arm_64_lpae_alloc_pgtable_s2, 1106 .free = arm_lpae_free_pgtable, 1107 }; 1108 1109 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 1110 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1111 .alloc = arm_32_lpae_alloc_pgtable_s1, 1112 .free = arm_lpae_free_pgtable, 1113 }; 1114 1115 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 1116 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1117 .alloc = arm_32_lpae_alloc_pgtable_s2, 1118 .free = arm_lpae_free_pgtable, 1119 }; 1120 1121 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { 1122 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR, 1123 .alloc = arm_mali_lpae_alloc_pgtable, 1124 .free = arm_lpae_free_pgtable, 1125 }; 1126 1127 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 1128 1129 static struct io_pgtable_cfg *cfg_cookie __initdata; 1130 1131 static void __init dummy_tlb_flush_all(void *cookie) 1132 { 1133 WARN_ON(cookie != cfg_cookie); 1134 } 1135 1136 static void __init dummy_tlb_flush(unsigned long iova, size_t size, 1137 size_t granule, void *cookie) 1138 { 1139 WARN_ON(cookie != cfg_cookie); 1140 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 1141 } 1142 1143 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, 1144 unsigned long iova, size_t granule, 1145 void *cookie) 1146 { 1147 dummy_tlb_flush(iova, granule, granule, cookie); 1148 } 1149 1150 static const struct iommu_flush_ops dummy_tlb_ops __initconst = { 1151 .tlb_flush_all = dummy_tlb_flush_all, 1152 .tlb_flush_walk = dummy_tlb_flush, 1153 .tlb_add_page = dummy_tlb_add_page, 1154 }; 1155 1156 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 1157 { 1158 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 1159 struct io_pgtable_cfg *cfg = &data->iop.cfg; 1160 1161 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 1162 cfg->pgsize_bitmap, cfg->ias); 1163 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", 1164 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), 1165 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); 1166 } 1167 1168 #define __FAIL(ops, i) ({ \ 1169 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 1170 arm_lpae_dump_ops(ops); \ 1171 selftest_running = false; \ 1172 -EFAULT; \ 1173 }) 1174 1175 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 1176 { 1177 static const enum io_pgtable_fmt fmts[] __initconst = { 1178 ARM_64_LPAE_S1, 1179 ARM_64_LPAE_S2, 1180 }; 1181 1182 int i, j; 1183 unsigned long iova; 1184 size_t size, mapped; 1185 struct io_pgtable_ops *ops; 1186 1187 selftest_running = true; 1188 1189 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 1190 cfg_cookie = cfg; 1191 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 1192 if (!ops) { 1193 pr_err("selftest: failed to allocate io pgtable ops\n"); 1194 return -ENOMEM; 1195 } 1196 1197 /* 1198 * Initial sanity checks. 1199 * Empty page tables shouldn't provide any translations. 1200 */ 1201 if (ops->iova_to_phys(ops, 42)) 1202 return __FAIL(ops, i); 1203 1204 if (ops->iova_to_phys(ops, SZ_1G + 42)) 1205 return __FAIL(ops, i); 1206 1207 if (ops->iova_to_phys(ops, SZ_2G + 42)) 1208 return __FAIL(ops, i); 1209 1210 /* 1211 * Distinct mappings of different granule sizes. 1212 */ 1213 iova = 0; 1214 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1215 size = 1UL << j; 1216 1217 if (ops->map_pages(ops, iova, iova, size, 1, 1218 IOMMU_READ | IOMMU_WRITE | 1219 IOMMU_NOEXEC | IOMMU_CACHE, 1220 GFP_KERNEL, &mapped)) 1221 return __FAIL(ops, i); 1222 1223 /* Overlapping mappings */ 1224 if (!ops->map_pages(ops, iova, iova + size, size, 1, 1225 IOMMU_READ | IOMMU_NOEXEC, 1226 GFP_KERNEL, &mapped)) 1227 return __FAIL(ops, i); 1228 1229 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1230 return __FAIL(ops, i); 1231 1232 iova += SZ_1G; 1233 } 1234 1235 /* Partial unmap */ 1236 size = 1UL << __ffs(cfg->pgsize_bitmap); 1237 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) 1238 return __FAIL(ops, i); 1239 1240 /* Remap of partial unmap */ 1241 if (ops->map_pages(ops, SZ_1G + size, size, size, 1, 1242 IOMMU_READ, GFP_KERNEL, &mapped)) 1243 return __FAIL(ops, i); 1244 1245 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1246 return __FAIL(ops, i); 1247 1248 /* Full unmap */ 1249 iova = 0; 1250 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1251 size = 1UL << j; 1252 1253 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) 1254 return __FAIL(ops, i); 1255 1256 if (ops->iova_to_phys(ops, iova + 42)) 1257 return __FAIL(ops, i); 1258 1259 /* Remap full block */ 1260 if (ops->map_pages(ops, iova, iova, size, 1, 1261 IOMMU_WRITE, GFP_KERNEL, &mapped)) 1262 return __FAIL(ops, i); 1263 1264 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1265 return __FAIL(ops, i); 1266 1267 iova += SZ_1G; 1268 } 1269 1270 free_io_pgtable_ops(ops); 1271 } 1272 1273 selftest_running = false; 1274 return 0; 1275 } 1276 1277 static int __init arm_lpae_do_selftests(void) 1278 { 1279 static const unsigned long pgsize[] __initconst = { 1280 SZ_4K | SZ_2M | SZ_1G, 1281 SZ_16K | SZ_32M, 1282 SZ_64K | SZ_512M, 1283 }; 1284 1285 static const unsigned int ias[] __initconst = { 1286 32, 36, 40, 42, 44, 48, 1287 }; 1288 1289 int i, j, pass = 0, fail = 0; 1290 struct device dev; 1291 struct io_pgtable_cfg cfg = { 1292 .tlb = &dummy_tlb_ops, 1293 .oas = 48, 1294 .coherent_walk = true, 1295 .iommu_dev = &dev, 1296 }; 1297 1298 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */ 1299 set_dev_node(&dev, NUMA_NO_NODE); 1300 1301 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1302 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1303 cfg.pgsize_bitmap = pgsize[i]; 1304 cfg.ias = ias[j]; 1305 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1306 pgsize[i], ias[j]); 1307 if (arm_lpae_run_tests(&cfg)) 1308 fail++; 1309 else 1310 pass++; 1311 } 1312 } 1313 1314 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1315 return fail ? -EFAULT : 0; 1316 } 1317 subsys_initcall(arm_lpae_do_selftests); 1318 #endif 1319