1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Apple DART page table allocator. 4 * 5 * Copyright (C) 2022 The Asahi Linux Contributors 6 * 7 * Based on io-pgtable-arm. 8 * 9 * Copyright (C) 2014 ARM Limited 10 * 11 * Author: Will Deacon <will.deacon@arm.com> 12 */ 13 14 #define pr_fmt(fmt) "dart io-pgtable: " fmt 15 16 #include <linux/atomic.h> 17 #include <linux/bitfield.h> 18 #include <linux/bitops.h> 19 #include <linux/io-pgtable.h> 20 #include <linux/kernel.h> 21 #include <linux/sizes.h> 22 #include <linux/slab.h> 23 #include <linux/types.h> 24 25 #include <asm/barrier.h> 26 #include "iommu-pages.h" 27 28 #define DART1_MAX_ADDR_BITS 36 29 30 #define DART_MAX_TABLES 4 31 #define DART_LEVELS 2 32 33 /* Struct accessors */ 34 #define io_pgtable_to_data(x) \ 35 container_of((x), struct dart_io_pgtable, iop) 36 37 #define io_pgtable_ops_to_data(x) \ 38 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 39 40 #define DART_GRANULE(d) \ 41 (sizeof(dart_iopte) << (d)->bits_per_level) 42 #define DART_PTES_PER_TABLE(d) \ 43 (DART_GRANULE(d) >> ilog2(sizeof(dart_iopte))) 44 45 #define APPLE_DART_PTE_SUBPAGE_START GENMASK_ULL(63, 52) 46 #define APPLE_DART_PTE_SUBPAGE_END GENMASK_ULL(51, 40) 47 48 #define APPLE_DART1_PADDR_MASK GENMASK_ULL(35, 12) 49 #define APPLE_DART2_PADDR_MASK GENMASK_ULL(37, 10) 50 #define APPLE_DART2_PADDR_SHIFT (4) 51 52 /* Apple DART1 protection bits */ 53 #define APPLE_DART1_PTE_PROT_NO_READ BIT(8) 54 #define APPLE_DART1_PTE_PROT_NO_WRITE BIT(7) 55 #define APPLE_DART1_PTE_PROT_SP_DIS BIT(1) 56 57 /* Apple DART2 protection bits */ 58 #define APPLE_DART2_PTE_PROT_NO_READ BIT(3) 59 #define APPLE_DART2_PTE_PROT_NO_WRITE BIT(2) 60 #define APPLE_DART2_PTE_PROT_NO_CACHE BIT(1) 61 62 /* marks PTE as valid */ 63 #define APPLE_DART_PTE_VALID BIT(0) 64 65 /* IOPTE accessors */ 66 #define iopte_deref(pte, d) __va(iopte_to_paddr(pte, d)) 67 68 struct dart_io_pgtable { 69 struct io_pgtable iop; 70 71 int tbl_bits; 72 int bits_per_level; 73 74 void *pgd[DART_MAX_TABLES]; 75 }; 76 77 typedef u64 dart_iopte; 78 79 80 static dart_iopte paddr_to_iopte(phys_addr_t paddr, 81 struct dart_io_pgtable *data) 82 { 83 dart_iopte pte; 84 85 if (data->iop.fmt == APPLE_DART) 86 return paddr & APPLE_DART1_PADDR_MASK; 87 88 /* format is APPLE_DART2 */ 89 pte = paddr >> APPLE_DART2_PADDR_SHIFT; 90 pte &= APPLE_DART2_PADDR_MASK; 91 92 return pte; 93 } 94 95 static phys_addr_t iopte_to_paddr(dart_iopte pte, 96 struct dart_io_pgtable *data) 97 { 98 u64 paddr; 99 100 if (data->iop.fmt == APPLE_DART) 101 return pte & APPLE_DART1_PADDR_MASK; 102 103 /* format is APPLE_DART2 */ 104 paddr = pte & APPLE_DART2_PADDR_MASK; 105 paddr <<= APPLE_DART2_PADDR_SHIFT; 106 107 return paddr; 108 } 109 110 static void *__dart_alloc_pages(size_t size, gfp_t gfp) 111 { 112 int order = get_order(size); 113 114 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 115 return iommu_alloc_pages(gfp, order); 116 } 117 118 static int dart_init_pte(struct dart_io_pgtable *data, 119 unsigned long iova, phys_addr_t paddr, 120 dart_iopte prot, int num_entries, 121 dart_iopte *ptep) 122 { 123 int i; 124 dart_iopte pte = prot; 125 size_t sz = data->iop.cfg.pgsize_bitmap; 126 127 for (i = 0; i < num_entries; i++) 128 if (ptep[i] & APPLE_DART_PTE_VALID) { 129 /* We require an unmap first */ 130 WARN_ON(ptep[i] & APPLE_DART_PTE_VALID); 131 return -EEXIST; 132 } 133 134 /* subpage protection: always allow access to the entire page */ 135 pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0); 136 pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff); 137 138 pte |= APPLE_DART1_PTE_PROT_SP_DIS; 139 pte |= APPLE_DART_PTE_VALID; 140 141 for (i = 0; i < num_entries; i++) 142 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); 143 144 return 0; 145 } 146 147 static dart_iopte dart_install_table(dart_iopte *table, 148 dart_iopte *ptep, 149 dart_iopte curr, 150 struct dart_io_pgtable *data) 151 { 152 dart_iopte old, new; 153 154 new = paddr_to_iopte(__pa(table), data) | APPLE_DART_PTE_VALID; 155 156 /* 157 * Ensure the table itself is visible before its PTE can be. 158 * Whilst we could get away with cmpxchg64_release below, this 159 * doesn't have any ordering semantics when !CONFIG_SMP. 160 */ 161 dma_wmb(); 162 163 old = cmpxchg64_relaxed(ptep, curr, new); 164 165 return old; 166 } 167 168 static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova) 169 { 170 return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) & 171 ((1 << data->tbl_bits) - 1); 172 } 173 174 static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova) 175 { 176 177 return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) & 178 ((1 << data->bits_per_level) - 1); 179 } 180 181 static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova) 182 { 183 184 return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) & 185 ((1 << data->bits_per_level) - 1); 186 } 187 188 static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova) 189 { 190 dart_iopte pte, *ptep; 191 int tbl = dart_get_table(data, iova); 192 193 ptep = data->pgd[tbl]; 194 if (!ptep) 195 return NULL; 196 197 ptep += dart_get_l1_index(data, iova); 198 pte = READ_ONCE(*ptep); 199 200 /* Valid entry? */ 201 if (!pte) 202 return NULL; 203 204 /* Deref to get level 2 table */ 205 return iopte_deref(pte, data); 206 } 207 208 static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data, 209 int prot) 210 { 211 dart_iopte pte = 0; 212 213 if (data->iop.fmt == APPLE_DART) { 214 if (!(prot & IOMMU_WRITE)) 215 pte |= APPLE_DART1_PTE_PROT_NO_WRITE; 216 if (!(prot & IOMMU_READ)) 217 pte |= APPLE_DART1_PTE_PROT_NO_READ; 218 } 219 if (data->iop.fmt == APPLE_DART2) { 220 if (!(prot & IOMMU_WRITE)) 221 pte |= APPLE_DART2_PTE_PROT_NO_WRITE; 222 if (!(prot & IOMMU_READ)) 223 pte |= APPLE_DART2_PTE_PROT_NO_READ; 224 if (!(prot & IOMMU_CACHE)) 225 pte |= APPLE_DART2_PTE_PROT_NO_CACHE; 226 } 227 228 return pte; 229 } 230 231 static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova, 232 phys_addr_t paddr, size_t pgsize, size_t pgcount, 233 int iommu_prot, gfp_t gfp, size_t *mapped) 234 { 235 struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops); 236 struct io_pgtable_cfg *cfg = &data->iop.cfg; 237 size_t tblsz = DART_GRANULE(data); 238 int ret = 0, tbl, num_entries, max_entries, map_idx_start; 239 dart_iopte pte, *cptep, *ptep; 240 dart_iopte prot; 241 242 if (WARN_ON(pgsize != cfg->pgsize_bitmap)) 243 return -EINVAL; 244 245 if (WARN_ON(paddr >> cfg->oas)) 246 return -ERANGE; 247 248 /* If no access, then nothing to do */ 249 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 250 return 0; 251 252 tbl = dart_get_table(data, iova); 253 254 ptep = data->pgd[tbl]; 255 ptep += dart_get_l1_index(data, iova); 256 pte = READ_ONCE(*ptep); 257 258 /* no L2 table present */ 259 if (!pte) { 260 cptep = __dart_alloc_pages(tblsz, gfp); 261 if (!cptep) 262 return -ENOMEM; 263 264 pte = dart_install_table(cptep, ptep, 0, data); 265 if (pte) 266 iommu_free_pages(cptep, get_order(tblsz)); 267 268 /* L2 table is present (now) */ 269 pte = READ_ONCE(*ptep); 270 } 271 272 ptep = iopte_deref(pte, data); 273 274 /* install a leaf entries into L2 table */ 275 prot = dart_prot_to_pte(data, iommu_prot); 276 map_idx_start = dart_get_l2_index(data, iova); 277 max_entries = DART_PTES_PER_TABLE(data) - map_idx_start; 278 num_entries = min_t(int, pgcount, max_entries); 279 ptep += map_idx_start; 280 ret = dart_init_pte(data, iova, paddr, prot, num_entries, ptep); 281 if (!ret && mapped) 282 *mapped += num_entries * pgsize; 283 284 /* 285 * Synchronise all PTE updates for the new mapping before there's 286 * a chance for anything to kick off a table walk for the new iova. 287 */ 288 wmb(); 289 290 return ret; 291 } 292 293 static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, 294 size_t pgsize, size_t pgcount, 295 struct iommu_iotlb_gather *gather) 296 { 297 struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops); 298 struct io_pgtable_cfg *cfg = &data->iop.cfg; 299 int i = 0, num_entries, max_entries, unmap_idx_start; 300 dart_iopte pte, *ptep; 301 302 if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount)) 303 return 0; 304 305 ptep = dart_get_l2(data, iova); 306 307 /* Valid L2 IOPTE pointer? */ 308 if (WARN_ON(!ptep)) 309 return 0; 310 311 unmap_idx_start = dart_get_l2_index(data, iova); 312 ptep += unmap_idx_start; 313 314 max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start; 315 num_entries = min_t(int, pgcount, max_entries); 316 317 while (i < num_entries) { 318 pte = READ_ONCE(*ptep); 319 if (WARN_ON(!pte)) 320 break; 321 322 /* clear pte */ 323 *ptep = 0; 324 325 if (!iommu_iotlb_gather_queued(gather)) 326 io_pgtable_tlb_add_page(&data->iop, gather, 327 iova + i * pgsize, pgsize); 328 329 ptep++; 330 i++; 331 } 332 333 return i * pgsize; 334 } 335 336 static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops, 337 unsigned long iova) 338 { 339 struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops); 340 dart_iopte pte, *ptep; 341 342 ptep = dart_get_l2(data, iova); 343 344 /* Valid L2 IOPTE pointer? */ 345 if (!ptep) 346 return 0; 347 348 ptep += dart_get_l2_index(data, iova); 349 350 pte = READ_ONCE(*ptep); 351 /* Found translation */ 352 if (pte) { 353 iova &= (data->iop.cfg.pgsize_bitmap - 1); 354 return iopte_to_paddr(pte, data) | iova; 355 } 356 357 /* Ran out of page tables to walk */ 358 return 0; 359 } 360 361 static struct dart_io_pgtable * 362 dart_alloc_pgtable(struct io_pgtable_cfg *cfg) 363 { 364 struct dart_io_pgtable *data; 365 int tbl_bits, bits_per_level, va_bits, pg_shift; 366 367 pg_shift = __ffs(cfg->pgsize_bitmap); 368 bits_per_level = pg_shift - ilog2(sizeof(dart_iopte)); 369 370 va_bits = cfg->ias - pg_shift; 371 372 tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS)); 373 if ((1 << tbl_bits) > DART_MAX_TABLES) 374 return NULL; 375 376 data = kzalloc(sizeof(*data), GFP_KERNEL); 377 if (!data) 378 return NULL; 379 380 data->tbl_bits = tbl_bits; 381 data->bits_per_level = bits_per_level; 382 383 data->iop.ops = (struct io_pgtable_ops) { 384 .map_pages = dart_map_pages, 385 .unmap_pages = dart_unmap_pages, 386 .iova_to_phys = dart_iova_to_phys, 387 }; 388 389 return data; 390 } 391 392 static struct io_pgtable * 393 apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 394 { 395 struct dart_io_pgtable *data; 396 int i; 397 398 if (!cfg->coherent_walk) 399 return NULL; 400 401 if (cfg->oas != 36 && cfg->oas != 42) 402 return NULL; 403 404 if (cfg->ias > cfg->oas) 405 return NULL; 406 407 if (!(cfg->pgsize_bitmap == SZ_4K || cfg->pgsize_bitmap == SZ_16K)) 408 return NULL; 409 410 data = dart_alloc_pgtable(cfg); 411 if (!data) 412 return NULL; 413 414 cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits; 415 416 for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) { 417 data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL); 418 if (!data->pgd[i]) 419 goto out_free_data; 420 cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]); 421 } 422 423 return &data->iop; 424 425 out_free_data: 426 while (--i >= 0) { 427 iommu_free_pages(data->pgd[i], 428 get_order(DART_GRANULE(data))); 429 } 430 kfree(data); 431 return NULL; 432 } 433 434 static void apple_dart_free_pgtable(struct io_pgtable *iop) 435 { 436 struct dart_io_pgtable *data = io_pgtable_to_data(iop); 437 int order = get_order(DART_GRANULE(data)); 438 dart_iopte *ptep, *end; 439 int i; 440 441 for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) { 442 ptep = data->pgd[i]; 443 end = (void *)ptep + DART_GRANULE(data); 444 445 while (ptep != end) { 446 dart_iopte pte = *ptep++; 447 448 if (pte) 449 iommu_free_pages(iopte_deref(pte, data), order); 450 } 451 iommu_free_pages(data->pgd[i], order); 452 } 453 454 kfree(data); 455 } 456 457 struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = { 458 .alloc = apple_dart_alloc_pgtable, 459 .free = apple_dart_free_pgtable, 460 }; 461