1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/err.h> 9 #include <linux/iommu.h> 10 #include <linux/kernel.h> 11 #include <linux/of.h> 12 #include <linux/of_platform.h> 13 #include <linux/pci.h> 14 #include <linux/platform_device.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/dma-mapping.h> 18 19 #include <soc/tegra/ahb.h> 20 #include <soc/tegra/mc.h> 21 22 #include "iommu-pages.h" 23 24 struct tegra_smmu_group { 25 struct list_head list; 26 struct tegra_smmu *smmu; 27 const struct tegra_smmu_group_soc *soc; 28 struct iommu_group *group; 29 unsigned int swgroup; 30 }; 31 32 struct tegra_smmu { 33 void __iomem *regs; 34 struct device *dev; 35 36 struct tegra_mc *mc; 37 const struct tegra_smmu_soc *soc; 38 39 struct list_head groups; 40 41 unsigned long pfn_mask; 42 unsigned long tlb_mask; 43 44 unsigned long *asids; 45 struct mutex lock; 46 47 struct list_head list; 48 49 struct dentry *debugfs; 50 51 struct iommu_device iommu; /* IOMMU Core code handle */ 52 }; 53 54 struct tegra_smmu_as { 55 struct iommu_domain domain; 56 struct tegra_smmu *smmu; 57 unsigned int use_count; 58 spinlock_t lock; 59 u32 *count; 60 struct page **pts; 61 struct page *pd; 62 dma_addr_t pd_dma; 63 unsigned id; 64 u32 attr; 65 }; 66 67 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) 68 { 69 return container_of(dom, struct tegra_smmu_as, domain); 70 } 71 72 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, 73 unsigned long offset) 74 { 75 writel(value, smmu->regs + offset); 76 } 77 78 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) 79 { 80 return readl(smmu->regs + offset); 81 } 82 83 #define SMMU_CONFIG 0x010 84 #define SMMU_CONFIG_ENABLE (1 << 0) 85 86 #define SMMU_TLB_CONFIG 0x14 87 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) 88 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) 89 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ 90 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) 91 92 #define SMMU_PTC_CONFIG 0x18 93 #define SMMU_PTC_CONFIG_ENABLE (1 << 29) 94 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) 95 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) 96 97 #define SMMU_PTB_ASID 0x01c 98 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) 99 100 #define SMMU_PTB_DATA 0x020 101 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) 102 103 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) 104 105 #define SMMU_TLB_FLUSH 0x030 106 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) 107 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) 108 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) 109 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ 110 SMMU_TLB_FLUSH_VA_MATCH_SECTION) 111 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ 112 SMMU_TLB_FLUSH_VA_MATCH_GROUP) 113 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) 114 115 #define SMMU_PTC_FLUSH 0x034 116 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) 117 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) 118 119 #define SMMU_PTC_FLUSH_HI 0x9b8 120 #define SMMU_PTC_FLUSH_HI_MASK 0x3 121 122 /* per-SWGROUP SMMU_*_ASID register */ 123 #define SMMU_ASID_ENABLE (1 << 31) 124 #define SMMU_ASID_MASK 0x7f 125 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) 126 127 /* page table definitions */ 128 #define SMMU_NUM_PDE 1024 129 #define SMMU_NUM_PTE 1024 130 131 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) 132 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) 133 134 #define SMMU_PDE_SHIFT 22 135 #define SMMU_PTE_SHIFT 12 136 137 #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1)) 138 #define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK) 139 #define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT) 140 #define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT)) 141 142 #define SMMU_PD_READABLE (1 << 31) 143 #define SMMU_PD_WRITABLE (1 << 30) 144 #define SMMU_PD_NONSECURE (1 << 29) 145 146 #define SMMU_PDE_READABLE (1 << 31) 147 #define SMMU_PDE_WRITABLE (1 << 30) 148 #define SMMU_PDE_NONSECURE (1 << 29) 149 #define SMMU_PDE_NEXT (1 << 28) 150 151 #define SMMU_PTE_READABLE (1 << 31) 152 #define SMMU_PTE_WRITABLE (1 << 30) 153 #define SMMU_PTE_NONSECURE (1 << 29) 154 155 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ 156 SMMU_PDE_NONSECURE) 157 158 static unsigned int iova_pd_index(unsigned long iova) 159 { 160 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); 161 } 162 163 static unsigned int iova_pt_index(unsigned long iova) 164 { 165 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); 166 } 167 168 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) 169 { 170 addr >>= 12; 171 return (addr & smmu->pfn_mask) == addr; 172 } 173 174 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) 175 { 176 return (dma_addr_t)(pde & smmu->pfn_mask) << 12; 177 } 178 179 static void smmu_flush_ptc_all(struct tegra_smmu *smmu) 180 { 181 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); 182 } 183 184 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, 185 unsigned long offset) 186 { 187 u32 value; 188 189 offset &= ~(smmu->mc->soc->atom_size - 1); 190 191 if (smmu->mc->soc->num_address_bits > 32) { 192 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 193 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; 194 #else 195 value = 0; 196 #endif 197 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); 198 } 199 200 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; 201 smmu_writel(smmu, value, SMMU_PTC_FLUSH); 202 } 203 204 static inline void smmu_flush_tlb(struct tegra_smmu *smmu) 205 { 206 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); 207 } 208 209 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, 210 unsigned long asid) 211 { 212 u32 value; 213 214 if (smmu->soc->num_asids == 4) 215 value = (asid & 0x3) << 29; 216 else 217 value = (asid & 0x7f) << 24; 218 219 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL; 220 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 221 } 222 223 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, 224 unsigned long asid, 225 unsigned long iova) 226 { 227 u32 value; 228 229 if (smmu->soc->num_asids == 4) 230 value = (asid & 0x3) << 29; 231 else 232 value = (asid & 0x7f) << 24; 233 234 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); 235 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 236 } 237 238 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, 239 unsigned long asid, 240 unsigned long iova) 241 { 242 u32 value; 243 244 if (smmu->soc->num_asids == 4) 245 value = (asid & 0x3) << 29; 246 else 247 value = (asid & 0x7f) << 24; 248 249 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); 250 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 251 } 252 253 static inline void smmu_flush(struct tegra_smmu *smmu) 254 { 255 smmu_readl(smmu, SMMU_PTB_ASID); 256 } 257 258 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) 259 { 260 unsigned long id; 261 262 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); 263 if (id >= smmu->soc->num_asids) 264 return -ENOSPC; 265 266 set_bit(id, smmu->asids); 267 *idp = id; 268 269 return 0; 270 } 271 272 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) 273 { 274 clear_bit(id, smmu->asids); 275 } 276 277 static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) 278 { 279 struct tegra_smmu_as *as; 280 281 as = kzalloc(sizeof(*as), GFP_KERNEL); 282 if (!as) 283 return NULL; 284 285 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; 286 287 as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0); 288 if (!as->pd) { 289 kfree(as); 290 return NULL; 291 } 292 293 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); 294 if (!as->count) { 295 __iommu_free_pages(as->pd, 0); 296 kfree(as); 297 return NULL; 298 } 299 300 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); 301 if (!as->pts) { 302 kfree(as->count); 303 __iommu_free_pages(as->pd, 0); 304 kfree(as); 305 return NULL; 306 } 307 308 spin_lock_init(&as->lock); 309 310 /* setup aperture */ 311 as->domain.geometry.aperture_start = 0; 312 as->domain.geometry.aperture_end = 0xffffffff; 313 as->domain.geometry.force_aperture = true; 314 315 return &as->domain; 316 } 317 318 static void tegra_smmu_domain_free(struct iommu_domain *domain) 319 { 320 struct tegra_smmu_as *as = to_smmu_as(domain); 321 322 /* TODO: free page directory and page tables */ 323 324 WARN_ON_ONCE(as->use_count); 325 kfree(as->count); 326 kfree(as->pts); 327 kfree(as); 328 } 329 330 static const struct tegra_smmu_swgroup * 331 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) 332 { 333 const struct tegra_smmu_swgroup *group = NULL; 334 unsigned int i; 335 336 for (i = 0; i < smmu->soc->num_swgroups; i++) { 337 if (smmu->soc->swgroups[i].swgroup == swgroup) { 338 group = &smmu->soc->swgroups[i]; 339 break; 340 } 341 } 342 343 return group; 344 } 345 346 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, 347 unsigned int asid) 348 { 349 const struct tegra_smmu_swgroup *group; 350 unsigned int i; 351 u32 value; 352 353 group = tegra_smmu_find_swgroup(smmu, swgroup); 354 if (group) { 355 value = smmu_readl(smmu, group->reg); 356 value &= ~SMMU_ASID_MASK; 357 value |= SMMU_ASID_VALUE(asid); 358 value |= SMMU_ASID_ENABLE; 359 smmu_writel(smmu, value, group->reg); 360 } else { 361 pr_warn("%s group from swgroup %u not found\n", __func__, 362 swgroup); 363 /* No point moving ahead if group was not found */ 364 return; 365 } 366 367 for (i = 0; i < smmu->soc->num_clients; i++) { 368 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 369 370 if (client->swgroup != swgroup) 371 continue; 372 373 value = smmu_readl(smmu, client->regs.smmu.reg); 374 value |= BIT(client->regs.smmu.bit); 375 smmu_writel(smmu, value, client->regs.smmu.reg); 376 } 377 } 378 379 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, 380 unsigned int asid) 381 { 382 const struct tegra_smmu_swgroup *group; 383 unsigned int i; 384 u32 value; 385 386 group = tegra_smmu_find_swgroup(smmu, swgroup); 387 if (group) { 388 value = smmu_readl(smmu, group->reg); 389 value &= ~SMMU_ASID_MASK; 390 value |= SMMU_ASID_VALUE(asid); 391 value &= ~SMMU_ASID_ENABLE; 392 smmu_writel(smmu, value, group->reg); 393 } 394 395 for (i = 0; i < smmu->soc->num_clients; i++) { 396 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 397 398 if (client->swgroup != swgroup) 399 continue; 400 401 value = smmu_readl(smmu, client->regs.smmu.reg); 402 value &= ~BIT(client->regs.smmu.bit); 403 smmu_writel(smmu, value, client->regs.smmu.reg); 404 } 405 } 406 407 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, 408 struct tegra_smmu_as *as) 409 { 410 u32 value; 411 int err = 0; 412 413 mutex_lock(&smmu->lock); 414 415 if (as->use_count > 0) { 416 as->use_count++; 417 goto unlock; 418 } 419 420 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, 421 DMA_TO_DEVICE); 422 if (dma_mapping_error(smmu->dev, as->pd_dma)) { 423 err = -ENOMEM; 424 goto unlock; 425 } 426 427 /* We can't handle 64-bit DMA addresses */ 428 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { 429 err = -ENOMEM; 430 goto err_unmap; 431 } 432 433 err = tegra_smmu_alloc_asid(smmu, &as->id); 434 if (err < 0) 435 goto err_unmap; 436 437 smmu_flush_ptc(smmu, as->pd_dma, 0); 438 smmu_flush_tlb_asid(smmu, as->id); 439 440 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); 441 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); 442 smmu_writel(smmu, value, SMMU_PTB_DATA); 443 smmu_flush(smmu); 444 445 as->smmu = smmu; 446 as->use_count++; 447 448 mutex_unlock(&smmu->lock); 449 450 return 0; 451 452 err_unmap: 453 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 454 unlock: 455 mutex_unlock(&smmu->lock); 456 457 return err; 458 } 459 460 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, 461 struct tegra_smmu_as *as) 462 { 463 mutex_lock(&smmu->lock); 464 465 if (--as->use_count > 0) { 466 mutex_unlock(&smmu->lock); 467 return; 468 } 469 470 tegra_smmu_free_asid(smmu, as->id); 471 472 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 473 474 as->smmu = NULL; 475 476 mutex_unlock(&smmu->lock); 477 } 478 479 static int tegra_smmu_attach_dev(struct iommu_domain *domain, 480 struct device *dev) 481 { 482 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 483 struct tegra_smmu *smmu = dev_iommu_priv_get(dev); 484 struct tegra_smmu_as *as = to_smmu_as(domain); 485 unsigned int index; 486 int err; 487 488 if (!fwspec) 489 return -ENOENT; 490 491 for (index = 0; index < fwspec->num_ids; index++) { 492 err = tegra_smmu_as_prepare(smmu, as); 493 if (err) 494 goto disable; 495 496 tegra_smmu_enable(smmu, fwspec->ids[index], as->id); 497 } 498 499 if (index == 0) 500 return -ENODEV; 501 502 return 0; 503 504 disable: 505 while (index--) { 506 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); 507 tegra_smmu_as_unprepare(smmu, as); 508 } 509 510 return err; 511 } 512 513 static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain, 514 struct device *dev) 515 { 516 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 517 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 518 struct tegra_smmu_as *as; 519 struct tegra_smmu *smmu; 520 unsigned int index; 521 522 if (!fwspec) 523 return -ENODEV; 524 525 if (domain == identity_domain || !domain) 526 return 0; 527 528 as = to_smmu_as(domain); 529 smmu = as->smmu; 530 for (index = 0; index < fwspec->num_ids; index++) { 531 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); 532 tegra_smmu_as_unprepare(smmu, as); 533 } 534 return 0; 535 } 536 537 static struct iommu_domain_ops tegra_smmu_identity_ops = { 538 .attach_dev = tegra_smmu_identity_attach, 539 }; 540 541 static struct iommu_domain tegra_smmu_identity_domain = { 542 .type = IOMMU_DOMAIN_IDENTITY, 543 .ops = &tegra_smmu_identity_ops, 544 }; 545 546 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, 547 u32 value) 548 { 549 unsigned int pd_index = iova_pd_index(iova); 550 struct tegra_smmu *smmu = as->smmu; 551 u32 *pd = page_address(as->pd); 552 unsigned long offset = pd_index * sizeof(*pd); 553 554 /* Set the page directory entry first */ 555 pd[pd_index] = value; 556 557 /* The flush the page directory entry from caches */ 558 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, 559 sizeof(*pd), DMA_TO_DEVICE); 560 561 /* And flush the iommu */ 562 smmu_flush_ptc(smmu, as->pd_dma, offset); 563 smmu_flush_tlb_section(smmu, as->id, iova); 564 smmu_flush(smmu); 565 } 566 567 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) 568 { 569 u32 *pt = page_address(pt_page); 570 571 return pt + iova_pt_index(iova); 572 } 573 574 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, 575 dma_addr_t *dmap) 576 { 577 unsigned int pd_index = iova_pd_index(iova); 578 struct tegra_smmu *smmu = as->smmu; 579 struct page *pt_page; 580 u32 *pd; 581 582 pt_page = as->pts[pd_index]; 583 if (!pt_page) 584 return NULL; 585 586 pd = page_address(as->pd); 587 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]); 588 589 return tegra_smmu_pte_offset(pt_page, iova); 590 } 591 592 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, 593 dma_addr_t *dmap, struct page *page) 594 { 595 unsigned int pde = iova_pd_index(iova); 596 struct tegra_smmu *smmu = as->smmu; 597 598 if (!as->pts[pde]) { 599 dma_addr_t dma; 600 601 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, 602 DMA_TO_DEVICE); 603 if (dma_mapping_error(smmu->dev, dma)) { 604 __iommu_free_pages(page, 0); 605 return NULL; 606 } 607 608 if (!smmu_dma_addr_valid(smmu, dma)) { 609 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, 610 DMA_TO_DEVICE); 611 __iommu_free_pages(page, 0); 612 return NULL; 613 } 614 615 as->pts[pde] = page; 616 617 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | 618 SMMU_PDE_NEXT)); 619 620 *dmap = dma; 621 } else { 622 u32 *pd = page_address(as->pd); 623 624 *dmap = smmu_pde_to_dma(smmu, pd[pde]); 625 } 626 627 return tegra_smmu_pte_offset(as->pts[pde], iova); 628 } 629 630 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) 631 { 632 unsigned int pd_index = iova_pd_index(iova); 633 634 as->count[pd_index]++; 635 } 636 637 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) 638 { 639 unsigned int pde = iova_pd_index(iova); 640 struct page *page = as->pts[pde]; 641 642 /* 643 * When no entries in this page table are used anymore, return the 644 * memory page to the system. 645 */ 646 if (--as->count[pde] == 0) { 647 struct tegra_smmu *smmu = as->smmu; 648 u32 *pd = page_address(as->pd); 649 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]); 650 651 tegra_smmu_set_pde(as, iova, 0); 652 653 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); 654 __iommu_free_pages(page, 0); 655 as->pts[pde] = NULL; 656 } 657 } 658 659 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, 660 u32 *pte, dma_addr_t pte_dma, u32 val) 661 { 662 struct tegra_smmu *smmu = as->smmu; 663 unsigned long offset = SMMU_OFFSET_IN_PAGE(pte); 664 665 *pte = val; 666 667 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, 668 4, DMA_TO_DEVICE); 669 smmu_flush_ptc(smmu, pte_dma, offset); 670 smmu_flush_tlb_group(smmu, as->id, iova); 671 smmu_flush(smmu); 672 } 673 674 static struct page *as_get_pde_page(struct tegra_smmu_as *as, 675 unsigned long iova, gfp_t gfp, 676 unsigned long *flags) 677 { 678 unsigned int pde = iova_pd_index(iova); 679 struct page *page = as->pts[pde]; 680 681 /* at first check whether allocation needs to be done at all */ 682 if (page) 683 return page; 684 685 /* 686 * In order to prevent exhaustion of the atomic memory pool, we 687 * allocate page in a sleeping context if GFP flags permit. Hence 688 * spinlock needs to be unlocked and re-locked after allocation. 689 */ 690 if (gfpflags_allow_blocking(gfp)) 691 spin_unlock_irqrestore(&as->lock, *flags); 692 693 page = __iommu_alloc_pages(gfp | __GFP_DMA, 0); 694 695 if (gfpflags_allow_blocking(gfp)) 696 spin_lock_irqsave(&as->lock, *flags); 697 698 /* 699 * In a case of blocking allocation, a concurrent mapping may win 700 * the PDE allocation. In this case the allocated page isn't needed 701 * if allocation succeeded and the allocation failure isn't fatal. 702 */ 703 if (as->pts[pde]) { 704 if (page) 705 __iommu_free_pages(page, 0); 706 707 page = as->pts[pde]; 708 } 709 710 return page; 711 } 712 713 static int 714 __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 715 phys_addr_t paddr, size_t size, int prot, gfp_t gfp, 716 unsigned long *flags) 717 { 718 struct tegra_smmu_as *as = to_smmu_as(domain); 719 dma_addr_t pte_dma; 720 struct page *page; 721 u32 pte_attrs; 722 u32 *pte; 723 724 page = as_get_pde_page(as, iova, gfp, flags); 725 if (!page) 726 return -ENOMEM; 727 728 pte = as_get_pte(as, iova, &pte_dma, page); 729 if (!pte) 730 return -ENOMEM; 731 732 /* If we aren't overwriting a pre-existing entry, increment use */ 733 if (*pte == 0) 734 tegra_smmu_pte_get_use(as, iova); 735 736 pte_attrs = SMMU_PTE_NONSECURE; 737 738 if (prot & IOMMU_READ) 739 pte_attrs |= SMMU_PTE_READABLE; 740 741 if (prot & IOMMU_WRITE) 742 pte_attrs |= SMMU_PTE_WRITABLE; 743 744 tegra_smmu_set_pte(as, iova, pte, pte_dma, 745 SMMU_PHYS_PFN(paddr) | pte_attrs); 746 747 return 0; 748 } 749 750 static size_t 751 __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 752 size_t size, struct iommu_iotlb_gather *gather) 753 { 754 struct tegra_smmu_as *as = to_smmu_as(domain); 755 dma_addr_t pte_dma; 756 u32 *pte; 757 758 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 759 if (!pte || !*pte) 760 return 0; 761 762 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); 763 tegra_smmu_pte_put_use(as, iova); 764 765 return size; 766 } 767 768 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 769 phys_addr_t paddr, size_t size, size_t count, 770 int prot, gfp_t gfp, size_t *mapped) 771 { 772 struct tegra_smmu_as *as = to_smmu_as(domain); 773 unsigned long flags; 774 int ret; 775 776 spin_lock_irqsave(&as->lock, flags); 777 ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags); 778 spin_unlock_irqrestore(&as->lock, flags); 779 780 if (!ret) 781 *mapped = size; 782 783 return ret; 784 } 785 786 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 787 size_t size, size_t count, struct iommu_iotlb_gather *gather) 788 { 789 struct tegra_smmu_as *as = to_smmu_as(domain); 790 unsigned long flags; 791 792 spin_lock_irqsave(&as->lock, flags); 793 size = __tegra_smmu_unmap(domain, iova, size, gather); 794 spin_unlock_irqrestore(&as->lock, flags); 795 796 return size; 797 } 798 799 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, 800 dma_addr_t iova) 801 { 802 struct tegra_smmu_as *as = to_smmu_as(domain); 803 unsigned long pfn; 804 dma_addr_t pte_dma; 805 u32 *pte; 806 807 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 808 if (!pte || !*pte) 809 return 0; 810 811 pfn = *pte & as->smmu->pfn_mask; 812 813 return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova); 814 } 815 816 static struct tegra_smmu *tegra_smmu_find(struct device_node *np) 817 { 818 struct platform_device *pdev; 819 struct tegra_mc *mc; 820 821 pdev = of_find_device_by_node(np); 822 if (!pdev) 823 return NULL; 824 825 mc = platform_get_drvdata(pdev); 826 if (!mc) { 827 put_device(&pdev->dev); 828 return NULL; 829 } 830 831 return mc->smmu; 832 } 833 834 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, 835 const struct of_phandle_args *args) 836 { 837 const struct iommu_ops *ops = smmu->iommu.ops; 838 int err; 839 840 err = iommu_fwspec_init(dev, dev_fwnode(smmu->dev)); 841 if (err < 0) { 842 dev_err(dev, "failed to initialize fwspec: %d\n", err); 843 return err; 844 } 845 846 err = ops->of_xlate(dev, args); 847 if (err < 0) { 848 dev_err(dev, "failed to parse SW group ID: %d\n", err); 849 iommu_fwspec_free(dev); 850 return err; 851 } 852 853 return 0; 854 } 855 856 static struct iommu_device *tegra_smmu_probe_device(struct device *dev) 857 { 858 struct device_node *np = dev->of_node; 859 struct tegra_smmu *smmu = NULL; 860 struct of_phandle_args args; 861 unsigned int index = 0; 862 int err; 863 864 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 865 &args) == 0) { 866 smmu = tegra_smmu_find(args.np); 867 if (smmu) { 868 err = tegra_smmu_configure(smmu, dev, &args); 869 870 if (err < 0) { 871 of_node_put(args.np); 872 return ERR_PTR(err); 873 } 874 } 875 876 of_node_put(args.np); 877 index++; 878 } 879 880 smmu = dev_iommu_priv_get(dev); 881 if (!smmu) 882 return ERR_PTR(-ENODEV); 883 884 return &smmu->iommu; 885 } 886 887 static const struct tegra_smmu_group_soc * 888 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) 889 { 890 unsigned int i, j; 891 892 for (i = 0; i < smmu->soc->num_groups; i++) 893 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) 894 if (smmu->soc->groups[i].swgroups[j] == swgroup) 895 return &smmu->soc->groups[i]; 896 897 return NULL; 898 } 899 900 static void tegra_smmu_group_release(void *iommu_data) 901 { 902 struct tegra_smmu_group *group = iommu_data; 903 struct tegra_smmu *smmu = group->smmu; 904 905 mutex_lock(&smmu->lock); 906 list_del(&group->list); 907 mutex_unlock(&smmu->lock); 908 } 909 910 static struct iommu_group *tegra_smmu_device_group(struct device *dev) 911 { 912 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 913 struct tegra_smmu *smmu = dev_iommu_priv_get(dev); 914 const struct tegra_smmu_group_soc *soc; 915 unsigned int swgroup = fwspec->ids[0]; 916 struct tegra_smmu_group *group; 917 struct iommu_group *grp; 918 919 /* Find group_soc associating with swgroup */ 920 soc = tegra_smmu_find_group(smmu, swgroup); 921 922 mutex_lock(&smmu->lock); 923 924 /* Find existing iommu_group associating with swgroup or group_soc */ 925 list_for_each_entry(group, &smmu->groups, list) 926 if ((group->swgroup == swgroup) || (soc && group->soc == soc)) { 927 grp = iommu_group_ref_get(group->group); 928 mutex_unlock(&smmu->lock); 929 return grp; 930 } 931 932 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); 933 if (!group) { 934 mutex_unlock(&smmu->lock); 935 return NULL; 936 } 937 938 INIT_LIST_HEAD(&group->list); 939 group->swgroup = swgroup; 940 group->smmu = smmu; 941 group->soc = soc; 942 943 if (dev_is_pci(dev)) 944 group->group = pci_device_group(dev); 945 else 946 group->group = generic_device_group(dev); 947 948 if (IS_ERR(group->group)) { 949 devm_kfree(smmu->dev, group); 950 mutex_unlock(&smmu->lock); 951 return NULL; 952 } 953 954 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release); 955 if (soc) 956 iommu_group_set_name(group->group, soc->name); 957 list_add_tail(&group->list, &smmu->groups); 958 mutex_unlock(&smmu->lock); 959 960 return group->group; 961 } 962 963 static int tegra_smmu_of_xlate(struct device *dev, 964 const struct of_phandle_args *args) 965 { 966 struct platform_device *iommu_pdev = of_find_device_by_node(args->np); 967 struct tegra_mc *mc = platform_get_drvdata(iommu_pdev); 968 u32 id = args->args[0]; 969 970 /* 971 * Note: we are here releasing the reference of &iommu_pdev->dev, which 972 * is mc->dev. Although some functions in tegra_smmu_ops may keep using 973 * its private data beyond this point, it's still safe to do so because 974 * the SMMU parent device is the same as the MC, so the reference count 975 * isn't strictly necessary. 976 */ 977 put_device(&iommu_pdev->dev); 978 979 dev_iommu_priv_set(dev, mc->smmu); 980 981 return iommu_fwspec_add_ids(dev, &id, 1); 982 } 983 984 static int tegra_smmu_def_domain_type(struct device *dev) 985 { 986 /* 987 * FIXME: For now we want to run all translation in IDENTITY mode, due 988 * to some device quirks. Better would be to just quirk the troubled 989 * devices. 990 */ 991 return IOMMU_DOMAIN_IDENTITY; 992 } 993 994 static const struct iommu_ops tegra_smmu_ops = { 995 .identity_domain = &tegra_smmu_identity_domain, 996 .def_domain_type = &tegra_smmu_def_domain_type, 997 .domain_alloc_paging = tegra_smmu_domain_alloc_paging, 998 .probe_device = tegra_smmu_probe_device, 999 .device_group = tegra_smmu_device_group, 1000 .of_xlate = tegra_smmu_of_xlate, 1001 .pgsize_bitmap = SZ_4K, 1002 .default_domain_ops = &(const struct iommu_domain_ops) { 1003 .attach_dev = tegra_smmu_attach_dev, 1004 .map_pages = tegra_smmu_map, 1005 .unmap_pages = tegra_smmu_unmap, 1006 .iova_to_phys = tegra_smmu_iova_to_phys, 1007 .free = tegra_smmu_domain_free, 1008 } 1009 }; 1010 1011 static void tegra_smmu_ahb_enable(void) 1012 { 1013 static const struct of_device_id ahb_match[] = { 1014 { .compatible = "nvidia,tegra30-ahb", }, 1015 { } 1016 }; 1017 struct device_node *ahb; 1018 1019 ahb = of_find_matching_node(NULL, ahb_match); 1020 if (ahb) { 1021 tegra_ahb_enable_smmu(ahb); 1022 of_node_put(ahb); 1023 } 1024 } 1025 1026 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) 1027 { 1028 struct tegra_smmu *smmu = s->private; 1029 unsigned int i; 1030 u32 value; 1031 1032 seq_printf(s, "swgroup enabled ASID\n"); 1033 seq_printf(s, "------------------------\n"); 1034 1035 for (i = 0; i < smmu->soc->num_swgroups; i++) { 1036 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; 1037 const char *status; 1038 unsigned int asid; 1039 1040 value = smmu_readl(smmu, group->reg); 1041 1042 if (value & SMMU_ASID_ENABLE) 1043 status = "yes"; 1044 else 1045 status = "no"; 1046 1047 asid = value & SMMU_ASID_MASK; 1048 1049 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, 1050 asid); 1051 } 1052 1053 return 0; 1054 } 1055 1056 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups); 1057 1058 static int tegra_smmu_clients_show(struct seq_file *s, void *data) 1059 { 1060 struct tegra_smmu *smmu = s->private; 1061 unsigned int i; 1062 u32 value; 1063 1064 seq_printf(s, "client enabled\n"); 1065 seq_printf(s, "--------------------\n"); 1066 1067 for (i = 0; i < smmu->soc->num_clients; i++) { 1068 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 1069 const char *status; 1070 1071 value = smmu_readl(smmu, client->regs.smmu.reg); 1072 1073 if (value & BIT(client->regs.smmu.bit)) 1074 status = "yes"; 1075 else 1076 status = "no"; 1077 1078 seq_printf(s, "%-12s %s\n", client->name, status); 1079 } 1080 1081 return 0; 1082 } 1083 1084 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients); 1085 1086 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) 1087 { 1088 smmu->debugfs = debugfs_create_dir("smmu", NULL); 1089 1090 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, 1091 &tegra_smmu_swgroups_fops); 1092 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, 1093 &tegra_smmu_clients_fops); 1094 } 1095 1096 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) 1097 { 1098 debugfs_remove_recursive(smmu->debugfs); 1099 } 1100 1101 struct tegra_smmu *tegra_smmu_probe(struct device *dev, 1102 const struct tegra_smmu_soc *soc, 1103 struct tegra_mc *mc) 1104 { 1105 struct tegra_smmu *smmu; 1106 u32 value; 1107 int err; 1108 1109 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 1110 if (!smmu) 1111 return ERR_PTR(-ENOMEM); 1112 1113 /* 1114 * This is a bit of a hack. Ideally we'd want to simply return this 1115 * value. However iommu_device_register() will attempt to add 1116 * all devices to the IOMMU before we get that far. In order 1117 * not to rely on global variables to track the IOMMU instance, we 1118 * set it here so that it can be looked up from the .probe_device() 1119 * callback via the IOMMU device's .drvdata field. 1120 */ 1121 mc->smmu = smmu; 1122 1123 smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL); 1124 if (!smmu->asids) 1125 return ERR_PTR(-ENOMEM); 1126 1127 INIT_LIST_HEAD(&smmu->groups); 1128 mutex_init(&smmu->lock); 1129 1130 smmu->regs = mc->regs; 1131 smmu->soc = soc; 1132 smmu->dev = dev; 1133 smmu->mc = mc; 1134 1135 smmu->pfn_mask = 1136 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1; 1137 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", 1138 mc->soc->num_address_bits, smmu->pfn_mask); 1139 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1; 1140 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, 1141 smmu->tlb_mask); 1142 1143 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); 1144 1145 if (soc->supports_request_limit) 1146 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); 1147 1148 smmu_writel(smmu, value, SMMU_PTC_CONFIG); 1149 1150 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | 1151 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); 1152 1153 if (soc->supports_round_robin_arbitration) 1154 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; 1155 1156 smmu_writel(smmu, value, SMMU_TLB_CONFIG); 1157 1158 smmu_flush_ptc_all(smmu); 1159 smmu_flush_tlb(smmu); 1160 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 1161 smmu_flush(smmu); 1162 1163 tegra_smmu_ahb_enable(); 1164 1165 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); 1166 if (err) 1167 return ERR_PTR(err); 1168 1169 err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev); 1170 if (err) { 1171 iommu_device_sysfs_remove(&smmu->iommu); 1172 return ERR_PTR(err); 1173 } 1174 1175 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1176 tegra_smmu_debugfs_init(smmu); 1177 1178 return smmu; 1179 } 1180 1181 void tegra_smmu_remove(struct tegra_smmu *smmu) 1182 { 1183 iommu_device_unregister(&smmu->iommu); 1184 iommu_device_sysfs_remove(&smmu->iommu); 1185 1186 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1187 tegra_smmu_debugfs_exit(smmu); 1188 } 1189