1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/err.h> 9 #include <linux/iommu.h> 10 #include <linux/kernel.h> 11 #include <linux/of.h> 12 #include <linux/of_platform.h> 13 #include <linux/pci.h> 14 #include <linux/platform_device.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/dma-mapping.h> 18 19 #include <soc/tegra/ahb.h> 20 #include <soc/tegra/mc.h> 21 22 #include "iommu-pages.h" 23 24 struct tegra_smmu_group { 25 struct list_head list; 26 struct tegra_smmu *smmu; 27 const struct tegra_smmu_group_soc *soc; 28 struct iommu_group *group; 29 unsigned int swgroup; 30 }; 31 32 struct tegra_smmu { 33 void __iomem *regs; 34 struct device *dev; 35 36 struct tegra_mc *mc; 37 const struct tegra_smmu_soc *soc; 38 39 struct list_head groups; 40 41 unsigned long pfn_mask; 42 unsigned long tlb_mask; 43 44 unsigned long *asids; 45 struct mutex lock; 46 47 struct list_head list; 48 49 struct dentry *debugfs; 50 51 struct iommu_device iommu; /* IOMMU Core code handle */ 52 }; 53 54 struct tegra_pd; 55 struct tegra_pt; 56 57 struct tegra_smmu_as { 58 struct iommu_domain domain; 59 struct tegra_smmu *smmu; 60 unsigned int use_count; 61 spinlock_t lock; 62 u32 *count; 63 struct tegra_pt **pts; 64 struct tegra_pd *pd; 65 dma_addr_t pd_dma; 66 unsigned id; 67 u32 attr; 68 }; 69 70 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) 71 { 72 return container_of(dom, struct tegra_smmu_as, domain); 73 } 74 75 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, 76 unsigned long offset) 77 { 78 writel(value, smmu->regs + offset); 79 } 80 81 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) 82 { 83 return readl(smmu->regs + offset); 84 } 85 86 #define SMMU_CONFIG 0x010 87 #define SMMU_CONFIG_ENABLE (1 << 0) 88 89 #define SMMU_TLB_CONFIG 0x14 90 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) 91 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) 92 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ 93 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) 94 95 #define SMMU_PTC_CONFIG 0x18 96 #define SMMU_PTC_CONFIG_ENABLE (1 << 29) 97 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) 98 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) 99 100 #define SMMU_PTB_ASID 0x01c 101 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) 102 103 #define SMMU_PTB_DATA 0x020 104 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) 105 106 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) 107 108 #define SMMU_TLB_FLUSH 0x030 109 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) 110 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) 111 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) 112 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ 113 SMMU_TLB_FLUSH_VA_MATCH_SECTION) 114 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ 115 SMMU_TLB_FLUSH_VA_MATCH_GROUP) 116 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) 117 118 #define SMMU_PTC_FLUSH 0x034 119 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) 120 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) 121 122 #define SMMU_PTC_FLUSH_HI 0x9b8 123 #define SMMU_PTC_FLUSH_HI_MASK 0x3 124 125 /* per-SWGROUP SMMU_*_ASID register */ 126 #define SMMU_ASID_ENABLE (1 << 31) 127 #define SMMU_ASID_MASK 0x7f 128 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) 129 130 /* page table definitions */ 131 #define SMMU_NUM_PDE 1024 132 #define SMMU_NUM_PTE 1024 133 134 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) 135 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) 136 137 #define SMMU_PDE_SHIFT 22 138 #define SMMU_PTE_SHIFT 12 139 140 #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1)) 141 #define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK) 142 #define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT) 143 #define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT)) 144 145 #define SMMU_PD_READABLE (1 << 31) 146 #define SMMU_PD_WRITABLE (1 << 30) 147 #define SMMU_PD_NONSECURE (1 << 29) 148 149 #define SMMU_PDE_READABLE (1 << 31) 150 #define SMMU_PDE_WRITABLE (1 << 30) 151 #define SMMU_PDE_NONSECURE (1 << 29) 152 #define SMMU_PDE_NEXT (1 << 28) 153 154 #define SMMU_PTE_READABLE (1 << 31) 155 #define SMMU_PTE_WRITABLE (1 << 30) 156 #define SMMU_PTE_NONSECURE (1 << 29) 157 158 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ 159 SMMU_PDE_NONSECURE) 160 161 struct tegra_pd { 162 u32 val[SMMU_NUM_PDE]; 163 }; 164 165 struct tegra_pt { 166 u32 val[SMMU_NUM_PTE]; 167 }; 168 169 static unsigned int iova_pd_index(unsigned long iova) 170 { 171 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); 172 } 173 174 static unsigned int iova_pt_index(unsigned long iova) 175 { 176 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); 177 } 178 179 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) 180 { 181 addr >>= 12; 182 return (addr & smmu->pfn_mask) == addr; 183 } 184 185 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) 186 { 187 return (dma_addr_t)(pde & smmu->pfn_mask) << 12; 188 } 189 190 static void smmu_flush_ptc_all(struct tegra_smmu *smmu) 191 { 192 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); 193 } 194 195 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, 196 unsigned long offset) 197 { 198 u32 value; 199 200 offset &= ~(smmu->mc->soc->atom_size - 1); 201 202 if (smmu->mc->soc->num_address_bits > 32) { 203 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 204 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; 205 #else 206 value = 0; 207 #endif 208 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); 209 } 210 211 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; 212 smmu_writel(smmu, value, SMMU_PTC_FLUSH); 213 } 214 215 static inline void smmu_flush_tlb(struct tegra_smmu *smmu) 216 { 217 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); 218 } 219 220 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, 221 unsigned long asid) 222 { 223 u32 value; 224 225 if (smmu->soc->num_asids == 4) 226 value = (asid & 0x3) << 29; 227 else 228 value = (asid & 0x7f) << 24; 229 230 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL; 231 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 232 } 233 234 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, 235 unsigned long asid, 236 unsigned long iova) 237 { 238 u32 value; 239 240 if (smmu->soc->num_asids == 4) 241 value = (asid & 0x3) << 29; 242 else 243 value = (asid & 0x7f) << 24; 244 245 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); 246 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 247 } 248 249 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, 250 unsigned long asid, 251 unsigned long iova) 252 { 253 u32 value; 254 255 if (smmu->soc->num_asids == 4) 256 value = (asid & 0x3) << 29; 257 else 258 value = (asid & 0x7f) << 24; 259 260 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); 261 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 262 } 263 264 static inline void smmu_flush(struct tegra_smmu *smmu) 265 { 266 smmu_readl(smmu, SMMU_PTB_ASID); 267 } 268 269 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) 270 { 271 unsigned long id; 272 273 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); 274 if (id >= smmu->soc->num_asids) 275 return -ENOSPC; 276 277 set_bit(id, smmu->asids); 278 *idp = id; 279 280 return 0; 281 } 282 283 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) 284 { 285 clear_bit(id, smmu->asids); 286 } 287 288 static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) 289 { 290 struct tegra_smmu_as *as; 291 292 as = kzalloc(sizeof(*as), GFP_KERNEL); 293 if (!as) 294 return NULL; 295 296 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; 297 298 as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD); 299 if (!as->pd) { 300 kfree(as); 301 return NULL; 302 } 303 304 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); 305 if (!as->count) { 306 iommu_free_pages(as->pd); 307 kfree(as); 308 return NULL; 309 } 310 311 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); 312 if (!as->pts) { 313 kfree(as->count); 314 iommu_free_pages(as->pd); 315 kfree(as); 316 return NULL; 317 } 318 319 spin_lock_init(&as->lock); 320 321 /* setup aperture */ 322 as->domain.geometry.aperture_start = 0; 323 as->domain.geometry.aperture_end = 0xffffffff; 324 as->domain.geometry.force_aperture = true; 325 326 return &as->domain; 327 } 328 329 static void tegra_smmu_domain_free(struct iommu_domain *domain) 330 { 331 struct tegra_smmu_as *as = to_smmu_as(domain); 332 333 /* TODO: free page directory and page tables */ 334 335 WARN_ON_ONCE(as->use_count); 336 kfree(as->count); 337 kfree(as->pts); 338 kfree(as); 339 } 340 341 static const struct tegra_smmu_swgroup * 342 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) 343 { 344 const struct tegra_smmu_swgroup *group = NULL; 345 unsigned int i; 346 347 for (i = 0; i < smmu->soc->num_swgroups; i++) { 348 if (smmu->soc->swgroups[i].swgroup == swgroup) { 349 group = &smmu->soc->swgroups[i]; 350 break; 351 } 352 } 353 354 return group; 355 } 356 357 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, 358 unsigned int asid) 359 { 360 const struct tegra_smmu_swgroup *group; 361 unsigned int i; 362 u32 value; 363 364 group = tegra_smmu_find_swgroup(smmu, swgroup); 365 if (group) { 366 value = smmu_readl(smmu, group->reg); 367 value &= ~SMMU_ASID_MASK; 368 value |= SMMU_ASID_VALUE(asid); 369 value |= SMMU_ASID_ENABLE; 370 smmu_writel(smmu, value, group->reg); 371 } else { 372 pr_warn("%s group from swgroup %u not found\n", __func__, 373 swgroup); 374 /* No point moving ahead if group was not found */ 375 return; 376 } 377 378 for (i = 0; i < smmu->soc->num_clients; i++) { 379 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 380 381 if (client->swgroup != swgroup) 382 continue; 383 384 value = smmu_readl(smmu, client->regs.smmu.reg); 385 value |= BIT(client->regs.smmu.bit); 386 smmu_writel(smmu, value, client->regs.smmu.reg); 387 } 388 } 389 390 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, 391 unsigned int asid) 392 { 393 const struct tegra_smmu_swgroup *group; 394 unsigned int i; 395 u32 value; 396 397 group = tegra_smmu_find_swgroup(smmu, swgroup); 398 if (group) { 399 value = smmu_readl(smmu, group->reg); 400 value &= ~SMMU_ASID_MASK; 401 value |= SMMU_ASID_VALUE(asid); 402 value &= ~SMMU_ASID_ENABLE; 403 smmu_writel(smmu, value, group->reg); 404 } 405 406 for (i = 0; i < smmu->soc->num_clients; i++) { 407 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 408 409 if (client->swgroup != swgroup) 410 continue; 411 412 value = smmu_readl(smmu, client->regs.smmu.reg); 413 value &= ~BIT(client->regs.smmu.bit); 414 smmu_writel(smmu, value, client->regs.smmu.reg); 415 } 416 } 417 418 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, 419 struct tegra_smmu_as *as) 420 { 421 u32 value; 422 int err = 0; 423 424 mutex_lock(&smmu->lock); 425 426 if (as->use_count > 0) { 427 as->use_count++; 428 goto unlock; 429 } 430 431 as->pd_dma = 432 dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE); 433 if (dma_mapping_error(smmu->dev, as->pd_dma)) { 434 err = -ENOMEM; 435 goto unlock; 436 } 437 438 /* We can't handle 64-bit DMA addresses */ 439 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { 440 err = -ENOMEM; 441 goto err_unmap; 442 } 443 444 err = tegra_smmu_alloc_asid(smmu, &as->id); 445 if (err < 0) 446 goto err_unmap; 447 448 smmu_flush_ptc(smmu, as->pd_dma, 0); 449 smmu_flush_tlb_asid(smmu, as->id); 450 451 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); 452 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); 453 smmu_writel(smmu, value, SMMU_PTB_DATA); 454 smmu_flush(smmu); 455 456 as->smmu = smmu; 457 as->use_count++; 458 459 mutex_unlock(&smmu->lock); 460 461 return 0; 462 463 err_unmap: 464 dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 465 unlock: 466 mutex_unlock(&smmu->lock); 467 468 return err; 469 } 470 471 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, 472 struct tegra_smmu_as *as) 473 { 474 mutex_lock(&smmu->lock); 475 476 if (--as->use_count > 0) { 477 mutex_unlock(&smmu->lock); 478 return; 479 } 480 481 tegra_smmu_free_asid(smmu, as->id); 482 483 dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 484 485 as->smmu = NULL; 486 487 mutex_unlock(&smmu->lock); 488 } 489 490 static int tegra_smmu_attach_dev(struct iommu_domain *domain, 491 struct device *dev) 492 { 493 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 494 struct tegra_smmu *smmu = dev_iommu_priv_get(dev); 495 struct tegra_smmu_as *as = to_smmu_as(domain); 496 unsigned int index; 497 int err; 498 499 if (!fwspec) 500 return -ENOENT; 501 502 for (index = 0; index < fwspec->num_ids; index++) { 503 err = tegra_smmu_as_prepare(smmu, as); 504 if (err) 505 goto disable; 506 507 tegra_smmu_enable(smmu, fwspec->ids[index], as->id); 508 } 509 510 if (index == 0) 511 return -ENODEV; 512 513 return 0; 514 515 disable: 516 while (index--) { 517 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); 518 tegra_smmu_as_unprepare(smmu, as); 519 } 520 521 return err; 522 } 523 524 static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain, 525 struct device *dev) 526 { 527 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 528 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 529 struct tegra_smmu_as *as; 530 struct tegra_smmu *smmu; 531 unsigned int index; 532 533 if (!fwspec) 534 return -ENODEV; 535 536 if (domain == identity_domain || !domain) 537 return 0; 538 539 as = to_smmu_as(domain); 540 smmu = as->smmu; 541 for (index = 0; index < fwspec->num_ids; index++) { 542 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); 543 tegra_smmu_as_unprepare(smmu, as); 544 } 545 return 0; 546 } 547 548 static struct iommu_domain_ops tegra_smmu_identity_ops = { 549 .attach_dev = tegra_smmu_identity_attach, 550 }; 551 552 static struct iommu_domain tegra_smmu_identity_domain = { 553 .type = IOMMU_DOMAIN_IDENTITY, 554 .ops = &tegra_smmu_identity_ops, 555 }; 556 557 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, 558 u32 value) 559 { 560 unsigned int pd_index = iova_pd_index(iova); 561 struct tegra_smmu *smmu = as->smmu; 562 struct tegra_pd *pd = as->pd; 563 unsigned long offset = pd_index * sizeof(*pd); 564 565 /* Set the page directory entry first */ 566 pd->val[pd_index] = value; 567 568 /* The flush the page directory entry from caches */ 569 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, 570 sizeof(*pd), DMA_TO_DEVICE); 571 572 /* And flush the iommu */ 573 smmu_flush_ptc(smmu, as->pd_dma, offset); 574 smmu_flush_tlb_section(smmu, as->id, iova); 575 smmu_flush(smmu); 576 } 577 578 static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova) 579 { 580 return &pt->val[iova_pt_index(iova)]; 581 } 582 583 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, 584 dma_addr_t *dmap) 585 { 586 unsigned int pd_index = iova_pd_index(iova); 587 struct tegra_smmu *smmu = as->smmu; 588 struct tegra_pt *pt; 589 590 pt = as->pts[pd_index]; 591 if (!pt) 592 return NULL; 593 594 *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]); 595 596 return tegra_smmu_pte_offset(pt, iova); 597 } 598 599 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, 600 dma_addr_t *dmap, struct tegra_pt *pt) 601 { 602 unsigned int pde = iova_pd_index(iova); 603 struct tegra_smmu *smmu = as->smmu; 604 605 if (!as->pts[pde]) { 606 dma_addr_t dma; 607 608 dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT, 609 DMA_TO_DEVICE); 610 if (dma_mapping_error(smmu->dev, dma)) { 611 iommu_free_pages(pt); 612 return NULL; 613 } 614 615 if (!smmu_dma_addr_valid(smmu, dma)) { 616 dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT, 617 DMA_TO_DEVICE); 618 iommu_free_pages(pt); 619 return NULL; 620 } 621 622 as->pts[pde] = pt; 623 624 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | 625 SMMU_PDE_NEXT)); 626 627 *dmap = dma; 628 } else { 629 *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]); 630 } 631 632 return tegra_smmu_pte_offset(as->pts[pde], iova); 633 } 634 635 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) 636 { 637 unsigned int pd_index = iova_pd_index(iova); 638 639 as->count[pd_index]++; 640 } 641 642 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) 643 { 644 unsigned int pde = iova_pd_index(iova); 645 struct tegra_pt *pt = as->pts[pde]; 646 647 /* 648 * When no entries in this page table are used anymore, return the 649 * memory page to the system. 650 */ 651 if (--as->count[pde] == 0) { 652 struct tegra_smmu *smmu = as->smmu; 653 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]); 654 655 tegra_smmu_set_pde(as, iova, 0); 656 657 dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT, 658 DMA_TO_DEVICE); 659 iommu_free_pages(pt); 660 as->pts[pde] = NULL; 661 } 662 } 663 664 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, 665 u32 *pte, dma_addr_t pte_dma, u32 val) 666 { 667 struct tegra_smmu *smmu = as->smmu; 668 unsigned long offset = SMMU_OFFSET_IN_PAGE(pte); 669 670 *pte = val; 671 672 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, 673 4, DMA_TO_DEVICE); 674 smmu_flush_ptc(smmu, pte_dma, offset); 675 smmu_flush_tlb_group(smmu, as->id, iova); 676 smmu_flush(smmu); 677 } 678 679 static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as, 680 unsigned long iova, gfp_t gfp, 681 unsigned long *flags) 682 { 683 unsigned int pde = iova_pd_index(iova); 684 struct tegra_pt *pt = as->pts[pde]; 685 686 /* at first check whether allocation needs to be done at all */ 687 if (pt) 688 return pt; 689 690 /* 691 * In order to prevent exhaustion of the atomic memory pool, we 692 * allocate page in a sleeping context if GFP flags permit. Hence 693 * spinlock needs to be unlocked and re-locked after allocation. 694 */ 695 if (gfpflags_allow_blocking(gfp)) 696 spin_unlock_irqrestore(&as->lock, *flags); 697 698 pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT); 699 700 if (gfpflags_allow_blocking(gfp)) 701 spin_lock_irqsave(&as->lock, *flags); 702 703 /* 704 * In a case of blocking allocation, a concurrent mapping may win 705 * the PDE allocation. In this case the allocated page isn't needed 706 * if allocation succeeded and the allocation failure isn't fatal. 707 */ 708 if (as->pts[pde]) { 709 if (pt) 710 iommu_free_pages(pt); 711 712 pt = as->pts[pde]; 713 } 714 715 return pt; 716 } 717 718 static int 719 __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 720 phys_addr_t paddr, size_t size, int prot, gfp_t gfp, 721 unsigned long *flags) 722 { 723 struct tegra_smmu_as *as = to_smmu_as(domain); 724 dma_addr_t pte_dma; 725 struct tegra_pt *pt; 726 u32 pte_attrs; 727 u32 *pte; 728 729 pt = as_get_pde_page(as, iova, gfp, flags); 730 if (!pt) 731 return -ENOMEM; 732 733 pte = as_get_pte(as, iova, &pte_dma, pt); 734 if (!pte) 735 return -ENOMEM; 736 737 /* If we aren't overwriting a pre-existing entry, increment use */ 738 if (*pte == 0) 739 tegra_smmu_pte_get_use(as, iova); 740 741 pte_attrs = SMMU_PTE_NONSECURE; 742 743 if (prot & IOMMU_READ) 744 pte_attrs |= SMMU_PTE_READABLE; 745 746 if (prot & IOMMU_WRITE) 747 pte_attrs |= SMMU_PTE_WRITABLE; 748 749 tegra_smmu_set_pte(as, iova, pte, pte_dma, 750 SMMU_PHYS_PFN(paddr) | pte_attrs); 751 752 return 0; 753 } 754 755 static size_t 756 __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 757 size_t size, struct iommu_iotlb_gather *gather) 758 { 759 struct tegra_smmu_as *as = to_smmu_as(domain); 760 dma_addr_t pte_dma; 761 u32 *pte; 762 763 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 764 if (!pte || !*pte) 765 return 0; 766 767 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); 768 tegra_smmu_pte_put_use(as, iova); 769 770 return size; 771 } 772 773 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 774 phys_addr_t paddr, size_t size, size_t count, 775 int prot, gfp_t gfp, size_t *mapped) 776 { 777 struct tegra_smmu_as *as = to_smmu_as(domain); 778 unsigned long flags; 779 int ret; 780 781 spin_lock_irqsave(&as->lock, flags); 782 ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags); 783 spin_unlock_irqrestore(&as->lock, flags); 784 785 if (!ret) 786 *mapped = size; 787 788 return ret; 789 } 790 791 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 792 size_t size, size_t count, struct iommu_iotlb_gather *gather) 793 { 794 struct tegra_smmu_as *as = to_smmu_as(domain); 795 unsigned long flags; 796 797 spin_lock_irqsave(&as->lock, flags); 798 size = __tegra_smmu_unmap(domain, iova, size, gather); 799 spin_unlock_irqrestore(&as->lock, flags); 800 801 return size; 802 } 803 804 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, 805 dma_addr_t iova) 806 { 807 struct tegra_smmu_as *as = to_smmu_as(domain); 808 unsigned long pfn; 809 dma_addr_t pte_dma; 810 u32 *pte; 811 812 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 813 if (!pte || !*pte) 814 return 0; 815 816 pfn = *pte & as->smmu->pfn_mask; 817 818 return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova); 819 } 820 821 static struct tegra_smmu *tegra_smmu_find(struct device_node *np) 822 { 823 struct platform_device *pdev; 824 struct tegra_mc *mc; 825 826 pdev = of_find_device_by_node(np); 827 if (!pdev) 828 return NULL; 829 830 mc = platform_get_drvdata(pdev); 831 if (!mc) { 832 put_device(&pdev->dev); 833 return NULL; 834 } 835 836 return mc->smmu; 837 } 838 839 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, 840 const struct of_phandle_args *args) 841 { 842 const struct iommu_ops *ops = smmu->iommu.ops; 843 int err; 844 845 err = iommu_fwspec_init(dev, dev_fwnode(smmu->dev)); 846 if (err < 0) { 847 dev_err(dev, "failed to initialize fwspec: %d\n", err); 848 return err; 849 } 850 851 err = ops->of_xlate(dev, args); 852 if (err < 0) { 853 dev_err(dev, "failed to parse SW group ID: %d\n", err); 854 return err; 855 } 856 857 return 0; 858 } 859 860 static struct iommu_device *tegra_smmu_probe_device(struct device *dev) 861 { 862 struct device_node *np = dev->of_node; 863 struct tegra_smmu *smmu = NULL; 864 struct of_phandle_args args; 865 unsigned int index = 0; 866 int err; 867 868 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 869 &args) == 0) { 870 smmu = tegra_smmu_find(args.np); 871 if (smmu) { 872 err = tegra_smmu_configure(smmu, dev, &args); 873 874 if (err < 0) { 875 of_node_put(args.np); 876 return ERR_PTR(err); 877 } 878 } 879 880 of_node_put(args.np); 881 index++; 882 } 883 884 smmu = dev_iommu_priv_get(dev); 885 if (!smmu) 886 return ERR_PTR(-ENODEV); 887 888 return &smmu->iommu; 889 } 890 891 static const struct tegra_smmu_group_soc * 892 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) 893 { 894 unsigned int i, j; 895 896 for (i = 0; i < smmu->soc->num_groups; i++) 897 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) 898 if (smmu->soc->groups[i].swgroups[j] == swgroup) 899 return &smmu->soc->groups[i]; 900 901 return NULL; 902 } 903 904 static void tegra_smmu_group_release(void *iommu_data) 905 { 906 struct tegra_smmu_group *group = iommu_data; 907 struct tegra_smmu *smmu = group->smmu; 908 909 mutex_lock(&smmu->lock); 910 list_del(&group->list); 911 mutex_unlock(&smmu->lock); 912 } 913 914 static struct iommu_group *tegra_smmu_device_group(struct device *dev) 915 { 916 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 917 struct tegra_smmu *smmu = dev_iommu_priv_get(dev); 918 const struct tegra_smmu_group_soc *soc; 919 unsigned int swgroup = fwspec->ids[0]; 920 struct tegra_smmu_group *group; 921 struct iommu_group *grp; 922 923 /* Find group_soc associating with swgroup */ 924 soc = tegra_smmu_find_group(smmu, swgroup); 925 926 mutex_lock(&smmu->lock); 927 928 /* Find existing iommu_group associating with swgroup or group_soc */ 929 list_for_each_entry(group, &smmu->groups, list) 930 if ((group->swgroup == swgroup) || (soc && group->soc == soc)) { 931 grp = iommu_group_ref_get(group->group); 932 mutex_unlock(&smmu->lock); 933 return grp; 934 } 935 936 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); 937 if (!group) { 938 mutex_unlock(&smmu->lock); 939 return NULL; 940 } 941 942 INIT_LIST_HEAD(&group->list); 943 group->swgroup = swgroup; 944 group->smmu = smmu; 945 group->soc = soc; 946 947 if (dev_is_pci(dev)) 948 group->group = pci_device_group(dev); 949 else 950 group->group = generic_device_group(dev); 951 952 if (IS_ERR(group->group)) { 953 devm_kfree(smmu->dev, group); 954 mutex_unlock(&smmu->lock); 955 return NULL; 956 } 957 958 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release); 959 if (soc) 960 iommu_group_set_name(group->group, soc->name); 961 list_add_tail(&group->list, &smmu->groups); 962 mutex_unlock(&smmu->lock); 963 964 return group->group; 965 } 966 967 static int tegra_smmu_of_xlate(struct device *dev, 968 const struct of_phandle_args *args) 969 { 970 struct platform_device *iommu_pdev = of_find_device_by_node(args->np); 971 struct tegra_mc *mc = platform_get_drvdata(iommu_pdev); 972 u32 id = args->args[0]; 973 974 /* 975 * Note: we are here releasing the reference of &iommu_pdev->dev, which 976 * is mc->dev. Although some functions in tegra_smmu_ops may keep using 977 * its private data beyond this point, it's still safe to do so because 978 * the SMMU parent device is the same as the MC, so the reference count 979 * isn't strictly necessary. 980 */ 981 put_device(&iommu_pdev->dev); 982 983 dev_iommu_priv_set(dev, mc->smmu); 984 985 return iommu_fwspec_add_ids(dev, &id, 1); 986 } 987 988 static int tegra_smmu_def_domain_type(struct device *dev) 989 { 990 /* 991 * FIXME: For now we want to run all translation in IDENTITY mode, due 992 * to some device quirks. Better would be to just quirk the troubled 993 * devices. 994 */ 995 return IOMMU_DOMAIN_IDENTITY; 996 } 997 998 static const struct iommu_ops tegra_smmu_ops = { 999 .identity_domain = &tegra_smmu_identity_domain, 1000 .def_domain_type = &tegra_smmu_def_domain_type, 1001 .domain_alloc_paging = tegra_smmu_domain_alloc_paging, 1002 .probe_device = tegra_smmu_probe_device, 1003 .device_group = tegra_smmu_device_group, 1004 .of_xlate = tegra_smmu_of_xlate, 1005 .pgsize_bitmap = SZ_4K, 1006 .default_domain_ops = &(const struct iommu_domain_ops) { 1007 .attach_dev = tegra_smmu_attach_dev, 1008 .map_pages = tegra_smmu_map, 1009 .unmap_pages = tegra_smmu_unmap, 1010 .iova_to_phys = tegra_smmu_iova_to_phys, 1011 .free = tegra_smmu_domain_free, 1012 } 1013 }; 1014 1015 static void tegra_smmu_ahb_enable(void) 1016 { 1017 static const struct of_device_id ahb_match[] = { 1018 { .compatible = "nvidia,tegra30-ahb", }, 1019 { } 1020 }; 1021 struct device_node *ahb; 1022 1023 ahb = of_find_matching_node(NULL, ahb_match); 1024 if (ahb) { 1025 tegra_ahb_enable_smmu(ahb); 1026 of_node_put(ahb); 1027 } 1028 } 1029 1030 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) 1031 { 1032 struct tegra_smmu *smmu = s->private; 1033 unsigned int i; 1034 u32 value; 1035 1036 seq_printf(s, "swgroup enabled ASID\n"); 1037 seq_printf(s, "------------------------\n"); 1038 1039 for (i = 0; i < smmu->soc->num_swgroups; i++) { 1040 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; 1041 const char *status; 1042 unsigned int asid; 1043 1044 value = smmu_readl(smmu, group->reg); 1045 1046 if (value & SMMU_ASID_ENABLE) 1047 status = "yes"; 1048 else 1049 status = "no"; 1050 1051 asid = value & SMMU_ASID_MASK; 1052 1053 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, 1054 asid); 1055 } 1056 1057 return 0; 1058 } 1059 1060 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups); 1061 1062 static int tegra_smmu_clients_show(struct seq_file *s, void *data) 1063 { 1064 struct tegra_smmu *smmu = s->private; 1065 unsigned int i; 1066 u32 value; 1067 1068 seq_printf(s, "client enabled\n"); 1069 seq_printf(s, "--------------------\n"); 1070 1071 for (i = 0; i < smmu->soc->num_clients; i++) { 1072 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 1073 const char *status; 1074 1075 value = smmu_readl(smmu, client->regs.smmu.reg); 1076 1077 if (value & BIT(client->regs.smmu.bit)) 1078 status = "yes"; 1079 else 1080 status = "no"; 1081 1082 seq_printf(s, "%-12s %s\n", client->name, status); 1083 } 1084 1085 return 0; 1086 } 1087 1088 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients); 1089 1090 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) 1091 { 1092 smmu->debugfs = debugfs_create_dir("smmu", NULL); 1093 1094 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, 1095 &tegra_smmu_swgroups_fops); 1096 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, 1097 &tegra_smmu_clients_fops); 1098 } 1099 1100 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) 1101 { 1102 debugfs_remove_recursive(smmu->debugfs); 1103 } 1104 1105 struct tegra_smmu *tegra_smmu_probe(struct device *dev, 1106 const struct tegra_smmu_soc *soc, 1107 struct tegra_mc *mc) 1108 { 1109 struct tegra_smmu *smmu; 1110 u32 value; 1111 int err; 1112 1113 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 1114 if (!smmu) 1115 return ERR_PTR(-ENOMEM); 1116 1117 /* 1118 * This is a bit of a hack. Ideally we'd want to simply return this 1119 * value. However iommu_device_register() will attempt to add 1120 * all devices to the IOMMU before we get that far. In order 1121 * not to rely on global variables to track the IOMMU instance, we 1122 * set it here so that it can be looked up from the .probe_device() 1123 * callback via the IOMMU device's .drvdata field. 1124 */ 1125 mc->smmu = smmu; 1126 1127 smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL); 1128 if (!smmu->asids) 1129 return ERR_PTR(-ENOMEM); 1130 1131 INIT_LIST_HEAD(&smmu->groups); 1132 mutex_init(&smmu->lock); 1133 1134 smmu->regs = mc->regs; 1135 smmu->soc = soc; 1136 smmu->dev = dev; 1137 smmu->mc = mc; 1138 1139 smmu->pfn_mask = 1140 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1; 1141 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", 1142 mc->soc->num_address_bits, smmu->pfn_mask); 1143 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1; 1144 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, 1145 smmu->tlb_mask); 1146 1147 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); 1148 1149 if (soc->supports_request_limit) 1150 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); 1151 1152 smmu_writel(smmu, value, SMMU_PTC_CONFIG); 1153 1154 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | 1155 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); 1156 1157 if (soc->supports_round_robin_arbitration) 1158 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; 1159 1160 smmu_writel(smmu, value, SMMU_TLB_CONFIG); 1161 1162 smmu_flush_ptc_all(smmu); 1163 smmu_flush_tlb(smmu); 1164 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 1165 smmu_flush(smmu); 1166 1167 tegra_smmu_ahb_enable(); 1168 1169 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); 1170 if (err) 1171 return ERR_PTR(err); 1172 1173 err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev); 1174 if (err) { 1175 iommu_device_sysfs_remove(&smmu->iommu); 1176 return ERR_PTR(err); 1177 } 1178 1179 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1180 tegra_smmu_debugfs_init(smmu); 1181 1182 return smmu; 1183 } 1184 1185 void tegra_smmu_remove(struct tegra_smmu *smmu) 1186 { 1187 iommu_device_unregister(&smmu->iommu); 1188 iommu_device_sysfs_remove(&smmu->iommu); 1189 1190 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1191 tegra_smmu_debugfs_exit(smmu); 1192 } 1193