1 /* 2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/err.h> 10 #include <linux/iommu.h> 11 #include <linux/kernel.h> 12 #include <linux/of.h> 13 #include <linux/of_device.h> 14 #include <linux/platform_device.h> 15 #include <linux/slab.h> 16 17 #include <soc/tegra/ahb.h> 18 #include <soc/tegra/mc.h> 19 20 struct tegra_smmu { 21 void __iomem *regs; 22 struct device *dev; 23 24 struct tegra_mc *mc; 25 const struct tegra_smmu_soc *soc; 26 27 unsigned long *asids; 28 struct mutex lock; 29 30 struct list_head list; 31 }; 32 33 struct tegra_smmu_as { 34 struct iommu_domain *domain; 35 struct tegra_smmu *smmu; 36 unsigned int use_count; 37 struct page *count; 38 struct page *pd; 39 unsigned id; 40 u32 attr; 41 }; 42 43 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, 44 unsigned long offset) 45 { 46 writel(value, smmu->regs + offset); 47 } 48 49 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) 50 { 51 return readl(smmu->regs + offset); 52 } 53 54 #define SMMU_CONFIG 0x010 55 #define SMMU_CONFIG_ENABLE (1 << 0) 56 57 #define SMMU_TLB_CONFIG 0x14 58 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) 59 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) 60 #define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f) 61 62 #define SMMU_PTC_CONFIG 0x18 63 #define SMMU_PTC_CONFIG_ENABLE (1 << 29) 64 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) 65 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) 66 67 #define SMMU_PTB_ASID 0x01c 68 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) 69 70 #define SMMU_PTB_DATA 0x020 71 #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr)) 72 73 #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr)) 74 75 #define SMMU_TLB_FLUSH 0x030 76 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) 77 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) 78 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) 79 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) 80 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ 81 SMMU_TLB_FLUSH_VA_MATCH_SECTION) 82 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ 83 SMMU_TLB_FLUSH_VA_MATCH_GROUP) 84 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) 85 86 #define SMMU_PTC_FLUSH 0x034 87 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) 88 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) 89 90 #define SMMU_PTC_FLUSH_HI 0x9b8 91 #define SMMU_PTC_FLUSH_HI_MASK 0x3 92 93 /* per-SWGROUP SMMU_*_ASID register */ 94 #define SMMU_ASID_ENABLE (1 << 31) 95 #define SMMU_ASID_MASK 0x7f 96 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) 97 98 /* page table definitions */ 99 #define SMMU_NUM_PDE 1024 100 #define SMMU_NUM_PTE 1024 101 102 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) 103 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) 104 105 #define SMMU_PDE_SHIFT 22 106 #define SMMU_PTE_SHIFT 12 107 108 #define SMMU_PFN_MASK 0x000fffff 109 110 #define SMMU_PD_READABLE (1 << 31) 111 #define SMMU_PD_WRITABLE (1 << 30) 112 #define SMMU_PD_NONSECURE (1 << 29) 113 114 #define SMMU_PDE_READABLE (1 << 31) 115 #define SMMU_PDE_WRITABLE (1 << 30) 116 #define SMMU_PDE_NONSECURE (1 << 29) 117 #define SMMU_PDE_NEXT (1 << 28) 118 119 #define SMMU_PTE_READABLE (1 << 31) 120 #define SMMU_PTE_WRITABLE (1 << 30) 121 #define SMMU_PTE_NONSECURE (1 << 29) 122 123 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ 124 SMMU_PDE_NONSECURE) 125 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \ 126 SMMU_PTE_NONSECURE) 127 128 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page, 129 unsigned long offset) 130 { 131 phys_addr_t phys = page ? page_to_phys(page) : 0; 132 u32 value; 133 134 if (page) { 135 offset &= ~(smmu->mc->soc->atom_size - 1); 136 137 if (smmu->mc->soc->num_address_bits > 32) { 138 #ifdef CONFIG_PHYS_ADDR_T_64BIT 139 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK; 140 #else 141 value = 0; 142 #endif 143 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); 144 } 145 146 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR; 147 } else { 148 value = SMMU_PTC_FLUSH_TYPE_ALL; 149 } 150 151 smmu_writel(smmu, value, SMMU_PTC_FLUSH); 152 } 153 154 static inline void smmu_flush_tlb(struct tegra_smmu *smmu) 155 { 156 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); 157 } 158 159 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, 160 unsigned long asid) 161 { 162 u32 value; 163 164 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 165 SMMU_TLB_FLUSH_VA_MATCH_ALL; 166 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 167 } 168 169 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, 170 unsigned long asid, 171 unsigned long iova) 172 { 173 u32 value; 174 175 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 176 SMMU_TLB_FLUSH_VA_SECTION(iova); 177 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 178 } 179 180 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, 181 unsigned long asid, 182 unsigned long iova) 183 { 184 u32 value; 185 186 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 187 SMMU_TLB_FLUSH_VA_GROUP(iova); 188 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 189 } 190 191 static inline void smmu_flush(struct tegra_smmu *smmu) 192 { 193 smmu_readl(smmu, SMMU_CONFIG); 194 } 195 196 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) 197 { 198 unsigned long id; 199 200 mutex_lock(&smmu->lock); 201 202 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); 203 if (id >= smmu->soc->num_asids) { 204 mutex_unlock(&smmu->lock); 205 return -ENOSPC; 206 } 207 208 set_bit(id, smmu->asids); 209 *idp = id; 210 211 mutex_unlock(&smmu->lock); 212 return 0; 213 } 214 215 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) 216 { 217 mutex_lock(&smmu->lock); 218 clear_bit(id, smmu->asids); 219 mutex_unlock(&smmu->lock); 220 } 221 222 static bool tegra_smmu_capable(enum iommu_cap cap) 223 { 224 return false; 225 } 226 227 static int tegra_smmu_domain_init(struct iommu_domain *domain) 228 { 229 struct tegra_smmu_as *as; 230 unsigned int i; 231 uint32_t *pd; 232 233 as = kzalloc(sizeof(*as), GFP_KERNEL); 234 if (!as) 235 return -ENOMEM; 236 237 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; 238 as->domain = domain; 239 240 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); 241 if (!as->pd) { 242 kfree(as); 243 return -ENOMEM; 244 } 245 246 as->count = alloc_page(GFP_KERNEL); 247 if (!as->count) { 248 __free_page(as->pd); 249 kfree(as); 250 return -ENOMEM; 251 } 252 253 /* clear PDEs */ 254 pd = page_address(as->pd); 255 SetPageReserved(as->pd); 256 257 for (i = 0; i < SMMU_NUM_PDE; i++) 258 pd[i] = 0; 259 260 /* clear PDE usage counters */ 261 pd = page_address(as->count); 262 SetPageReserved(as->count); 263 264 for (i = 0; i < SMMU_NUM_PDE; i++) 265 pd[i] = 0; 266 267 domain->priv = as; 268 269 return 0; 270 } 271 272 static void tegra_smmu_domain_destroy(struct iommu_domain *domain) 273 { 274 struct tegra_smmu_as *as = domain->priv; 275 276 /* TODO: free page directory and page tables */ 277 ClearPageReserved(as->pd); 278 279 kfree(as); 280 } 281 282 static const struct tegra_smmu_swgroup * 283 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) 284 { 285 const struct tegra_smmu_swgroup *group = NULL; 286 unsigned int i; 287 288 for (i = 0; i < smmu->soc->num_swgroups; i++) { 289 if (smmu->soc->swgroups[i].swgroup == swgroup) { 290 group = &smmu->soc->swgroups[i]; 291 break; 292 } 293 } 294 295 return group; 296 } 297 298 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, 299 unsigned int asid) 300 { 301 const struct tegra_smmu_swgroup *group; 302 unsigned int i; 303 u32 value; 304 305 for (i = 0; i < smmu->soc->num_clients; i++) { 306 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 307 308 if (client->swgroup != swgroup) 309 continue; 310 311 value = smmu_readl(smmu, client->smmu.reg); 312 value |= BIT(client->smmu.bit); 313 smmu_writel(smmu, value, client->smmu.reg); 314 } 315 316 group = tegra_smmu_find_swgroup(smmu, swgroup); 317 if (group) { 318 value = smmu_readl(smmu, group->reg); 319 value &= ~SMMU_ASID_MASK; 320 value |= SMMU_ASID_VALUE(asid); 321 value |= SMMU_ASID_ENABLE; 322 smmu_writel(smmu, value, group->reg); 323 } 324 } 325 326 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, 327 unsigned int asid) 328 { 329 const struct tegra_smmu_swgroup *group; 330 unsigned int i; 331 u32 value; 332 333 group = tegra_smmu_find_swgroup(smmu, swgroup); 334 if (group) { 335 value = smmu_readl(smmu, group->reg); 336 value &= ~SMMU_ASID_MASK; 337 value |= SMMU_ASID_VALUE(asid); 338 value &= ~SMMU_ASID_ENABLE; 339 smmu_writel(smmu, value, group->reg); 340 } 341 342 for (i = 0; i < smmu->soc->num_clients; i++) { 343 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 344 345 if (client->swgroup != swgroup) 346 continue; 347 348 value = smmu_readl(smmu, client->smmu.reg); 349 value &= ~BIT(client->smmu.bit); 350 smmu_writel(smmu, value, client->smmu.reg); 351 } 352 } 353 354 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, 355 struct tegra_smmu_as *as) 356 { 357 u32 value; 358 int err; 359 360 if (as->use_count > 0) { 361 as->use_count++; 362 return 0; 363 } 364 365 err = tegra_smmu_alloc_asid(smmu, &as->id); 366 if (err < 0) 367 return err; 368 369 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD); 370 smmu_flush_ptc(smmu, as->pd, 0); 371 smmu_flush_tlb_asid(smmu, as->id); 372 373 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); 374 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr); 375 smmu_writel(smmu, value, SMMU_PTB_DATA); 376 smmu_flush(smmu); 377 378 as->smmu = smmu; 379 as->use_count++; 380 381 return 0; 382 } 383 384 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, 385 struct tegra_smmu_as *as) 386 { 387 if (--as->use_count > 0) 388 return; 389 390 tegra_smmu_free_asid(smmu, as->id); 391 as->smmu = NULL; 392 } 393 394 static int tegra_smmu_attach_dev(struct iommu_domain *domain, 395 struct device *dev) 396 { 397 struct tegra_smmu *smmu = dev->archdata.iommu; 398 struct tegra_smmu_as *as = domain->priv; 399 struct device_node *np = dev->of_node; 400 struct of_phandle_args args; 401 unsigned int index = 0; 402 int err = 0; 403 404 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 405 &args)) { 406 unsigned int swgroup = args.args[0]; 407 408 if (args.np != smmu->dev->of_node) { 409 of_node_put(args.np); 410 continue; 411 } 412 413 of_node_put(args.np); 414 415 err = tegra_smmu_as_prepare(smmu, as); 416 if (err < 0) 417 return err; 418 419 tegra_smmu_enable(smmu, swgroup, as->id); 420 index++; 421 } 422 423 if (index == 0) 424 return -ENODEV; 425 426 return 0; 427 } 428 429 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) 430 { 431 struct tegra_smmu_as *as = domain->priv; 432 struct device_node *np = dev->of_node; 433 struct tegra_smmu *smmu = as->smmu; 434 struct of_phandle_args args; 435 unsigned int index = 0; 436 437 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 438 &args)) { 439 unsigned int swgroup = args.args[0]; 440 441 if (args.np != smmu->dev->of_node) { 442 of_node_put(args.np); 443 continue; 444 } 445 446 of_node_put(args.np); 447 448 tegra_smmu_disable(smmu, swgroup, as->id); 449 tegra_smmu_as_unprepare(smmu, as); 450 index++; 451 } 452 } 453 454 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, 455 struct page **pagep) 456 { 457 u32 *pd = page_address(as->pd), *pt, *count; 458 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; 459 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; 460 struct tegra_smmu *smmu = as->smmu; 461 struct page *page; 462 unsigned int i; 463 464 if (pd[pde] == 0) { 465 page = alloc_page(GFP_KERNEL | __GFP_DMA); 466 if (!page) 467 return NULL; 468 469 pt = page_address(page); 470 SetPageReserved(page); 471 472 for (i = 0; i < SMMU_NUM_PTE; i++) 473 pt[i] = 0; 474 475 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT); 476 477 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT); 478 479 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4); 480 smmu_flush_ptc(smmu, as->pd, pde << 2); 481 smmu_flush_tlb_section(smmu, as->id, iova); 482 smmu_flush(smmu); 483 } else { 484 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); 485 pt = page_address(page); 486 } 487 488 *pagep = page; 489 490 /* Keep track of entries in this page table. */ 491 count = page_address(as->count); 492 if (pt[pte] == 0) 493 count[pde]++; 494 495 return &pt[pte]; 496 } 497 498 static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) 499 { 500 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; 501 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; 502 u32 *count = page_address(as->count); 503 u32 *pd = page_address(as->pd), *pt; 504 struct page *page; 505 506 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); 507 pt = page_address(page); 508 509 /* 510 * When no entries in this page table are used anymore, return the 511 * memory page to the system. 512 */ 513 if (pt[pte] != 0) { 514 if (--count[pde] == 0) { 515 ClearPageReserved(page); 516 __free_page(page); 517 pd[pde] = 0; 518 } 519 520 pt[pte] = 0; 521 } 522 } 523 524 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 525 phys_addr_t paddr, size_t size, int prot) 526 { 527 struct tegra_smmu_as *as = domain->priv; 528 struct tegra_smmu *smmu = as->smmu; 529 unsigned long offset; 530 struct page *page; 531 u32 *pte; 532 533 pte = as_get_pte(as, iova, &page); 534 if (!pte) 535 return -ENOMEM; 536 537 *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR; 538 offset = offset_in_page(pte); 539 540 smmu->soc->ops->flush_dcache(page, offset, 4); 541 smmu_flush_ptc(smmu, page, offset); 542 smmu_flush_tlb_group(smmu, as->id, iova); 543 smmu_flush(smmu); 544 545 return 0; 546 } 547 548 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 549 size_t size) 550 { 551 struct tegra_smmu_as *as = domain->priv; 552 struct tegra_smmu *smmu = as->smmu; 553 unsigned long offset; 554 struct page *page; 555 u32 *pte; 556 557 pte = as_get_pte(as, iova, &page); 558 if (!pte) 559 return 0; 560 561 offset = offset_in_page(pte); 562 as_put_pte(as, iova); 563 564 smmu->soc->ops->flush_dcache(page, offset, 4); 565 smmu_flush_ptc(smmu, page, offset); 566 smmu_flush_tlb_group(smmu, as->id, iova); 567 smmu_flush(smmu); 568 569 return size; 570 } 571 572 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, 573 dma_addr_t iova) 574 { 575 struct tegra_smmu_as *as = domain->priv; 576 struct page *page; 577 unsigned long pfn; 578 u32 *pte; 579 580 pte = as_get_pte(as, iova, &page); 581 pfn = *pte & SMMU_PFN_MASK; 582 583 return PFN_PHYS(pfn); 584 } 585 586 static struct tegra_smmu *tegra_smmu_find(struct device_node *np) 587 { 588 struct platform_device *pdev; 589 struct tegra_mc *mc; 590 591 pdev = of_find_device_by_node(np); 592 if (!pdev) 593 return NULL; 594 595 mc = platform_get_drvdata(pdev); 596 if (!mc) 597 return NULL; 598 599 return mc->smmu; 600 } 601 602 static int tegra_smmu_add_device(struct device *dev) 603 { 604 struct device_node *np = dev->of_node; 605 struct of_phandle_args args; 606 unsigned int index = 0; 607 608 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 609 &args) == 0) { 610 struct tegra_smmu *smmu; 611 612 smmu = tegra_smmu_find(args.np); 613 if (smmu) { 614 /* 615 * Only a single IOMMU master interface is currently 616 * supported by the Linux kernel, so abort after the 617 * first match. 618 */ 619 dev->archdata.iommu = smmu; 620 break; 621 } 622 623 index++; 624 } 625 626 return 0; 627 } 628 629 static void tegra_smmu_remove_device(struct device *dev) 630 { 631 dev->archdata.iommu = NULL; 632 } 633 634 static const struct iommu_ops tegra_smmu_ops = { 635 .capable = tegra_smmu_capable, 636 .domain_init = tegra_smmu_domain_init, 637 .domain_destroy = tegra_smmu_domain_destroy, 638 .attach_dev = tegra_smmu_attach_dev, 639 .detach_dev = tegra_smmu_detach_dev, 640 .add_device = tegra_smmu_add_device, 641 .remove_device = tegra_smmu_remove_device, 642 .map = tegra_smmu_map, 643 .unmap = tegra_smmu_unmap, 644 .map_sg = default_iommu_map_sg, 645 .iova_to_phys = tegra_smmu_iova_to_phys, 646 647 .pgsize_bitmap = SZ_4K, 648 }; 649 650 static void tegra_smmu_ahb_enable(void) 651 { 652 static const struct of_device_id ahb_match[] = { 653 { .compatible = "nvidia,tegra30-ahb", }, 654 { } 655 }; 656 struct device_node *ahb; 657 658 ahb = of_find_matching_node(NULL, ahb_match); 659 if (ahb) { 660 tegra_ahb_enable_smmu(ahb); 661 of_node_put(ahb); 662 } 663 } 664 665 struct tegra_smmu *tegra_smmu_probe(struct device *dev, 666 const struct tegra_smmu_soc *soc, 667 struct tegra_mc *mc) 668 { 669 struct tegra_smmu *smmu; 670 size_t size; 671 u32 value; 672 int err; 673 674 /* This can happen on Tegra20 which doesn't have an SMMU */ 675 if (!soc) 676 return NULL; 677 678 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 679 if (!smmu) 680 return ERR_PTR(-ENOMEM); 681 682 /* 683 * This is a bit of a hack. Ideally we'd want to simply return this 684 * value. However the IOMMU registration process will attempt to add 685 * all devices to the IOMMU when bus_set_iommu() is called. In order 686 * not to rely on global variables to track the IOMMU instance, we 687 * set it here so that it can be looked up from the .add_device() 688 * callback via the IOMMU device's .drvdata field. 689 */ 690 mc->smmu = smmu; 691 692 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); 693 694 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); 695 if (!smmu->asids) 696 return ERR_PTR(-ENOMEM); 697 698 mutex_init(&smmu->lock); 699 700 smmu->regs = mc->regs; 701 smmu->soc = soc; 702 smmu->dev = dev; 703 smmu->mc = mc; 704 705 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); 706 707 if (soc->supports_request_limit) 708 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); 709 710 smmu_writel(smmu, value, SMMU_PTC_CONFIG); 711 712 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | 713 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20); 714 715 if (soc->supports_round_robin_arbitration) 716 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; 717 718 smmu_writel(smmu, value, SMMU_TLB_CONFIG); 719 720 smmu_flush_ptc(smmu, NULL, 0); 721 smmu_flush_tlb(smmu); 722 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 723 smmu_flush(smmu); 724 725 tegra_smmu_ahb_enable(); 726 727 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); 728 if (err < 0) 729 return ERR_PTR(err); 730 731 return smmu; 732 } 733