1 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 15 * 02110-1301, USA. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/platform_device.h> 22 #include <linux/errno.h> 23 #include <linux/io.h> 24 #include <linux/interrupt.h> 25 #include <linux/list.h> 26 #include <linux/spinlock.h> 27 #include <linux/slab.h> 28 #include <linux/iommu.h> 29 #include <linux/clk.h> 30 31 #include <asm/cacheflush.h> 32 #include <asm/sizes.h> 33 34 #include "msm_iommu_hw-8xxx.h" 35 #include "msm_iommu.h" 36 37 #define MRC(reg, processor, op1, crn, crm, op2) \ 38 __asm__ __volatile__ ( \ 39 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ 40 : "=r" (reg)) 41 42 #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) 43 #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) 44 45 /* bitmap of the page sizes currently supported */ 46 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) 47 48 static int msm_iommu_tex_class[4]; 49 50 DEFINE_SPINLOCK(msm_iommu_lock); 51 52 struct msm_priv { 53 unsigned long *pgtable; 54 struct list_head list_attached; 55 }; 56 57 static int __enable_clocks(struct msm_iommu_drvdata *drvdata) 58 { 59 int ret; 60 61 ret = clk_enable(drvdata->pclk); 62 if (ret) 63 goto fail; 64 65 if (drvdata->clk) { 66 ret = clk_enable(drvdata->clk); 67 if (ret) 68 clk_disable(drvdata->pclk); 69 } 70 fail: 71 return ret; 72 } 73 74 static void __disable_clocks(struct msm_iommu_drvdata *drvdata) 75 { 76 clk_disable(drvdata->clk); 77 clk_disable(drvdata->pclk); 78 } 79 80 static int __flush_iotlb(struct iommu_domain *domain) 81 { 82 struct msm_priv *priv = domain->priv; 83 struct msm_iommu_drvdata *iommu_drvdata; 84 struct msm_iommu_ctx_drvdata *ctx_drvdata; 85 int ret = 0; 86 #ifndef CONFIG_IOMMU_PGTABLES_L2 87 unsigned long *fl_table = priv->pgtable; 88 int i; 89 90 if (!list_empty(&priv->list_attached)) { 91 dmac_flush_range(fl_table, fl_table + SZ_16K); 92 93 for (i = 0; i < NUM_FL_PTE; i++) 94 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { 95 void *sl_table = __va(fl_table[i] & 96 FL_BASE_MASK); 97 dmac_flush_range(sl_table, sl_table + SZ_4K); 98 } 99 } 100 #endif 101 102 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { 103 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) 104 BUG(); 105 106 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); 107 BUG_ON(!iommu_drvdata); 108 109 ret = __enable_clocks(iommu_drvdata); 110 if (ret) 111 goto fail; 112 113 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); 114 __disable_clocks(iommu_drvdata); 115 } 116 fail: 117 return ret; 118 } 119 120 static void __reset_context(void __iomem *base, int ctx) 121 { 122 SET_BPRCOSH(base, ctx, 0); 123 SET_BPRCISH(base, ctx, 0); 124 SET_BPRCNSH(base, ctx, 0); 125 SET_BPSHCFG(base, ctx, 0); 126 SET_BPMTCFG(base, ctx, 0); 127 SET_ACTLR(base, ctx, 0); 128 SET_SCTLR(base, ctx, 0); 129 SET_FSRRESTORE(base, ctx, 0); 130 SET_TTBR0(base, ctx, 0); 131 SET_TTBR1(base, ctx, 0); 132 SET_TTBCR(base, ctx, 0); 133 SET_BFBCR(base, ctx, 0); 134 SET_PAR(base, ctx, 0); 135 SET_FAR(base, ctx, 0); 136 SET_CTX_TLBIALL(base, ctx, 0); 137 SET_TLBFLPTER(base, ctx, 0); 138 SET_TLBSLPTER(base, ctx, 0); 139 SET_TLBLKCR(base, ctx, 0); 140 SET_PRRR(base, ctx, 0); 141 SET_NMRR(base, ctx, 0); 142 } 143 144 static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) 145 { 146 unsigned int prrr, nmrr; 147 __reset_context(base, ctx); 148 149 /* Set up HTW mode */ 150 /* TLB miss configuration: perform HTW on miss */ 151 SET_TLBMCFG(base, ctx, 0x3); 152 153 /* V2P configuration: HTW for access */ 154 SET_V2PCFG(base, ctx, 0x3); 155 156 SET_TTBCR(base, ctx, 0); 157 SET_TTBR0_PA(base, ctx, (pgtable >> 14)); 158 159 /* Invalidate the TLB for this context */ 160 SET_CTX_TLBIALL(base, ctx, 0); 161 162 /* Set interrupt number to "secure" interrupt */ 163 SET_IRPTNDX(base, ctx, 0); 164 165 /* Enable context fault interrupt */ 166 SET_CFEIE(base, ctx, 1); 167 168 /* Stall access on a context fault and let the handler deal with it */ 169 SET_CFCFG(base, ctx, 1); 170 171 /* Redirect all cacheable requests to L2 slave port. */ 172 SET_RCISH(base, ctx, 1); 173 SET_RCOSH(base, ctx, 1); 174 SET_RCNSH(base, ctx, 1); 175 176 /* Turn on TEX Remap */ 177 SET_TRE(base, ctx, 1); 178 179 /* Set TEX remap attributes */ 180 RCP15_PRRR(prrr); 181 RCP15_NMRR(nmrr); 182 SET_PRRR(base, ctx, prrr); 183 SET_NMRR(base, ctx, nmrr); 184 185 /* Turn on BFB prefetch */ 186 SET_BFBDFE(base, ctx, 1); 187 188 #ifdef CONFIG_IOMMU_PGTABLES_L2 189 /* Configure page tables as inner-cacheable and shareable to reduce 190 * the TLB miss penalty. 191 */ 192 SET_TTBR0_SH(base, ctx, 1); 193 SET_TTBR1_SH(base, ctx, 1); 194 195 SET_TTBR0_NOS(base, ctx, 1); 196 SET_TTBR1_NOS(base, ctx, 1); 197 198 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ 199 SET_TTBR0_IRGNL(base, ctx, 1); 200 201 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ 202 SET_TTBR1_IRGNL(base, ctx, 1); 203 204 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ 205 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ 206 #endif 207 208 /* Enable the MMU */ 209 SET_M(base, ctx, 1); 210 } 211 212 static int msm_iommu_domain_init(struct iommu_domain *domain) 213 { 214 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); 215 216 if (!priv) 217 goto fail_nomem; 218 219 INIT_LIST_HEAD(&priv->list_attached); 220 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, 221 get_order(SZ_16K)); 222 223 if (!priv->pgtable) 224 goto fail_nomem; 225 226 memset(priv->pgtable, 0, SZ_16K); 227 domain->priv = priv; 228 229 domain->geometry.aperture_start = 0; 230 domain->geometry.aperture_end = (1ULL << 32) - 1; 231 domain->geometry.force_aperture = true; 232 233 return 0; 234 235 fail_nomem: 236 kfree(priv); 237 return -ENOMEM; 238 } 239 240 static void msm_iommu_domain_destroy(struct iommu_domain *domain) 241 { 242 struct msm_priv *priv; 243 unsigned long flags; 244 unsigned long *fl_table; 245 int i; 246 247 spin_lock_irqsave(&msm_iommu_lock, flags); 248 priv = domain->priv; 249 domain->priv = NULL; 250 251 if (priv) { 252 fl_table = priv->pgtable; 253 254 for (i = 0; i < NUM_FL_PTE; i++) 255 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) 256 free_page((unsigned long) __va(((fl_table[i]) & 257 FL_BASE_MASK))); 258 259 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); 260 priv->pgtable = NULL; 261 } 262 263 kfree(priv); 264 spin_unlock_irqrestore(&msm_iommu_lock, flags); 265 } 266 267 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 268 { 269 struct msm_priv *priv; 270 struct msm_iommu_ctx_dev *ctx_dev; 271 struct msm_iommu_drvdata *iommu_drvdata; 272 struct msm_iommu_ctx_drvdata *ctx_drvdata; 273 struct msm_iommu_ctx_drvdata *tmp_drvdata; 274 int ret = 0; 275 unsigned long flags; 276 277 spin_lock_irqsave(&msm_iommu_lock, flags); 278 279 priv = domain->priv; 280 281 if (!priv || !dev) { 282 ret = -EINVAL; 283 goto fail; 284 } 285 286 iommu_drvdata = dev_get_drvdata(dev->parent); 287 ctx_drvdata = dev_get_drvdata(dev); 288 ctx_dev = dev->platform_data; 289 290 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { 291 ret = -EINVAL; 292 goto fail; 293 } 294 295 if (!list_empty(&ctx_drvdata->attached_elm)) { 296 ret = -EBUSY; 297 goto fail; 298 } 299 300 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) 301 if (tmp_drvdata == ctx_drvdata) { 302 ret = -EBUSY; 303 goto fail; 304 } 305 306 ret = __enable_clocks(iommu_drvdata); 307 if (ret) 308 goto fail; 309 310 __program_context(iommu_drvdata->base, ctx_dev->num, 311 __pa(priv->pgtable)); 312 313 __disable_clocks(iommu_drvdata); 314 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); 315 ret = __flush_iotlb(domain); 316 317 fail: 318 spin_unlock_irqrestore(&msm_iommu_lock, flags); 319 return ret; 320 } 321 322 static void msm_iommu_detach_dev(struct iommu_domain *domain, 323 struct device *dev) 324 { 325 struct msm_priv *priv; 326 struct msm_iommu_ctx_dev *ctx_dev; 327 struct msm_iommu_drvdata *iommu_drvdata; 328 struct msm_iommu_ctx_drvdata *ctx_drvdata; 329 unsigned long flags; 330 int ret; 331 332 spin_lock_irqsave(&msm_iommu_lock, flags); 333 priv = domain->priv; 334 335 if (!priv || !dev) 336 goto fail; 337 338 iommu_drvdata = dev_get_drvdata(dev->parent); 339 ctx_drvdata = dev_get_drvdata(dev); 340 ctx_dev = dev->platform_data; 341 342 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) 343 goto fail; 344 345 ret = __flush_iotlb(domain); 346 if (ret) 347 goto fail; 348 349 ret = __enable_clocks(iommu_drvdata); 350 if (ret) 351 goto fail; 352 353 __reset_context(iommu_drvdata->base, ctx_dev->num); 354 __disable_clocks(iommu_drvdata); 355 list_del_init(&ctx_drvdata->attached_elm); 356 357 fail: 358 spin_unlock_irqrestore(&msm_iommu_lock, flags); 359 } 360 361 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, 362 phys_addr_t pa, size_t len, int prot) 363 { 364 struct msm_priv *priv; 365 unsigned long flags; 366 unsigned long *fl_table; 367 unsigned long *fl_pte; 368 unsigned long fl_offset; 369 unsigned long *sl_table; 370 unsigned long *sl_pte; 371 unsigned long sl_offset; 372 unsigned int pgprot; 373 int ret = 0, tex, sh; 374 375 spin_lock_irqsave(&msm_iommu_lock, flags); 376 377 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; 378 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; 379 380 if (tex < 0 || tex > NUM_TEX_CLASS - 1) { 381 ret = -EINVAL; 382 goto fail; 383 } 384 385 priv = domain->priv; 386 if (!priv) { 387 ret = -EINVAL; 388 goto fail; 389 } 390 391 fl_table = priv->pgtable; 392 393 if (len != SZ_16M && len != SZ_1M && 394 len != SZ_64K && len != SZ_4K) { 395 pr_debug("Bad size: %d\n", len); 396 ret = -EINVAL; 397 goto fail; 398 } 399 400 if (!fl_table) { 401 pr_debug("Null page table\n"); 402 ret = -EINVAL; 403 goto fail; 404 } 405 406 if (len == SZ_16M || len == SZ_1M) { 407 pgprot = sh ? FL_SHARED : 0; 408 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; 409 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; 410 pgprot |= tex & 0x04 ? FL_TEX0 : 0; 411 } else { 412 pgprot = sh ? SL_SHARED : 0; 413 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; 414 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; 415 pgprot |= tex & 0x04 ? SL_TEX0 : 0; 416 } 417 418 fl_offset = FL_OFFSET(va); /* Upper 12 bits */ 419 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ 420 421 if (len == SZ_16M) { 422 int i = 0; 423 for (i = 0; i < 16; i++) 424 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | 425 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | 426 FL_SHARED | FL_NG | pgprot; 427 } 428 429 if (len == SZ_1M) 430 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | 431 FL_TYPE_SECT | FL_SHARED | pgprot; 432 433 /* Need a 2nd level table */ 434 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { 435 unsigned long *sl; 436 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, 437 get_order(SZ_4K)); 438 439 if (!sl) { 440 pr_debug("Could not allocate second level table\n"); 441 ret = -ENOMEM; 442 goto fail; 443 } 444 445 memset(sl, 0, SZ_4K); 446 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); 447 } 448 449 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); 450 sl_offset = SL_OFFSET(va); 451 sl_pte = sl_table + sl_offset; 452 453 454 if (len == SZ_4K) 455 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | 456 SL_SHARED | SL_TYPE_SMALL | pgprot; 457 458 if (len == SZ_64K) { 459 int i; 460 461 for (i = 0; i < 16; i++) 462 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | 463 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; 464 } 465 466 ret = __flush_iotlb(domain); 467 fail: 468 spin_unlock_irqrestore(&msm_iommu_lock, flags); 469 return ret; 470 } 471 472 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, 473 size_t len) 474 { 475 struct msm_priv *priv; 476 unsigned long flags; 477 unsigned long *fl_table; 478 unsigned long *fl_pte; 479 unsigned long fl_offset; 480 unsigned long *sl_table; 481 unsigned long *sl_pte; 482 unsigned long sl_offset; 483 int i, ret = 0; 484 485 spin_lock_irqsave(&msm_iommu_lock, flags); 486 487 priv = domain->priv; 488 489 if (!priv) 490 goto fail; 491 492 fl_table = priv->pgtable; 493 494 if (len != SZ_16M && len != SZ_1M && 495 len != SZ_64K && len != SZ_4K) { 496 pr_debug("Bad length: %d\n", len); 497 goto fail; 498 } 499 500 if (!fl_table) { 501 pr_debug("Null page table\n"); 502 goto fail; 503 } 504 505 fl_offset = FL_OFFSET(va); /* Upper 12 bits */ 506 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ 507 508 if (*fl_pte == 0) { 509 pr_debug("First level PTE is 0\n"); 510 goto fail; 511 } 512 513 /* Unmap supersection */ 514 if (len == SZ_16M) 515 for (i = 0; i < 16; i++) 516 *(fl_pte+i) = 0; 517 518 if (len == SZ_1M) 519 *fl_pte = 0; 520 521 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); 522 sl_offset = SL_OFFSET(va); 523 sl_pte = sl_table + sl_offset; 524 525 if (len == SZ_64K) { 526 for (i = 0; i < 16; i++) 527 *(sl_pte+i) = 0; 528 } 529 530 if (len == SZ_4K) 531 *sl_pte = 0; 532 533 if (len == SZ_4K || len == SZ_64K) { 534 int used = 0; 535 536 for (i = 0; i < NUM_SL_PTE; i++) 537 if (sl_table[i]) 538 used = 1; 539 if (!used) { 540 free_page((unsigned long)sl_table); 541 *fl_pte = 0; 542 } 543 } 544 545 ret = __flush_iotlb(domain); 546 547 fail: 548 spin_unlock_irqrestore(&msm_iommu_lock, flags); 549 550 /* the IOMMU API requires us to return how many bytes were unmapped */ 551 len = ret ? 0 : len; 552 return len; 553 } 554 555 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, 556 dma_addr_t va) 557 { 558 struct msm_priv *priv; 559 struct msm_iommu_drvdata *iommu_drvdata; 560 struct msm_iommu_ctx_drvdata *ctx_drvdata; 561 unsigned int par; 562 unsigned long flags; 563 void __iomem *base; 564 phys_addr_t ret = 0; 565 int ctx; 566 567 spin_lock_irqsave(&msm_iommu_lock, flags); 568 569 priv = domain->priv; 570 if (list_empty(&priv->list_attached)) 571 goto fail; 572 573 ctx_drvdata = list_entry(priv->list_attached.next, 574 struct msm_iommu_ctx_drvdata, attached_elm); 575 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); 576 577 base = iommu_drvdata->base; 578 ctx = ctx_drvdata->num; 579 580 ret = __enable_clocks(iommu_drvdata); 581 if (ret) 582 goto fail; 583 584 /* Invalidate context TLB */ 585 SET_CTX_TLBIALL(base, ctx, 0); 586 SET_V2PPR(base, ctx, va & V2Pxx_VA); 587 588 par = GET_PAR(base, ctx); 589 590 /* We are dealing with a supersection */ 591 if (GET_NOFAULT_SS(base, ctx)) 592 ret = (par & 0xFF000000) | (va & 0x00FFFFFF); 593 else /* Upper 20 bits from PAR, lower 12 from VA */ 594 ret = (par & 0xFFFFF000) | (va & 0x00000FFF); 595 596 if (GET_FAULT(base, ctx)) 597 ret = 0; 598 599 __disable_clocks(iommu_drvdata); 600 fail: 601 spin_unlock_irqrestore(&msm_iommu_lock, flags); 602 return ret; 603 } 604 605 static bool msm_iommu_capable(enum iommu_cap cap) 606 { 607 return false; 608 } 609 610 static void print_ctx_regs(void __iomem *base, int ctx) 611 { 612 unsigned int fsr = GET_FSR(base, ctx); 613 pr_err("FAR = %08x PAR = %08x\n", 614 GET_FAR(base, ctx), GET_PAR(base, ctx)); 615 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, 616 (fsr & 0x02) ? "TF " : "", 617 (fsr & 0x04) ? "AFF " : "", 618 (fsr & 0x08) ? "APF " : "", 619 (fsr & 0x10) ? "TLBMF " : "", 620 (fsr & 0x20) ? "HTWDEEF " : "", 621 (fsr & 0x40) ? "HTWSEEF " : "", 622 (fsr & 0x80) ? "MHF " : "", 623 (fsr & 0x10000) ? "SL " : "", 624 (fsr & 0x40000000) ? "SS " : "", 625 (fsr & 0x80000000) ? "MULTI " : ""); 626 627 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", 628 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); 629 pr_err("TTBR0 = %08x TTBR1 = %08x\n", 630 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); 631 pr_err("SCTLR = %08x ACTLR = %08x\n", 632 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); 633 pr_err("PRRR = %08x NMRR = %08x\n", 634 GET_PRRR(base, ctx), GET_NMRR(base, ctx)); 635 } 636 637 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) 638 { 639 struct msm_iommu_drvdata *drvdata = dev_id; 640 void __iomem *base; 641 unsigned int fsr; 642 int i, ret; 643 644 spin_lock(&msm_iommu_lock); 645 646 if (!drvdata) { 647 pr_err("Invalid device ID in context interrupt handler\n"); 648 goto fail; 649 } 650 651 base = drvdata->base; 652 653 pr_err("Unexpected IOMMU page fault!\n"); 654 pr_err("base = %08x\n", (unsigned int) base); 655 656 ret = __enable_clocks(drvdata); 657 if (ret) 658 goto fail; 659 660 for (i = 0; i < drvdata->ncb; i++) { 661 fsr = GET_FSR(base, i); 662 if (fsr) { 663 pr_err("Fault occurred in context %d.\n", i); 664 pr_err("Interesting registers:\n"); 665 print_ctx_regs(base, i); 666 SET_FSR(base, i, 0x4000000F); 667 } 668 } 669 __disable_clocks(drvdata); 670 fail: 671 spin_unlock(&msm_iommu_lock); 672 return 0; 673 } 674 675 static const struct iommu_ops msm_iommu_ops = { 676 .capable = msm_iommu_capable, 677 .domain_init = msm_iommu_domain_init, 678 .domain_destroy = msm_iommu_domain_destroy, 679 .attach_dev = msm_iommu_attach_dev, 680 .detach_dev = msm_iommu_detach_dev, 681 .map = msm_iommu_map, 682 .unmap = msm_iommu_unmap, 683 .map_sg = default_iommu_map_sg, 684 .iova_to_phys = msm_iommu_iova_to_phys, 685 .pgsize_bitmap = MSM_IOMMU_PGSIZES, 686 }; 687 688 static int __init get_tex_class(int icp, int ocp, int mt, int nos) 689 { 690 int i = 0; 691 unsigned int prrr = 0; 692 unsigned int nmrr = 0; 693 int c_icp, c_ocp, c_mt, c_nos; 694 695 RCP15_PRRR(prrr); 696 RCP15_NMRR(nmrr); 697 698 for (i = 0; i < NUM_TEX_CLASS; i++) { 699 c_nos = PRRR_NOS(prrr, i); 700 c_mt = PRRR_MT(prrr, i); 701 c_icp = NMRR_ICP(nmrr, i); 702 c_ocp = NMRR_OCP(nmrr, i); 703 704 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) 705 return i; 706 } 707 708 return -ENODEV; 709 } 710 711 static void __init setup_iommu_tex_classes(void) 712 { 713 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = 714 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); 715 716 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = 717 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); 718 719 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = 720 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); 721 722 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = 723 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); 724 } 725 726 static int __init msm_iommu_init(void) 727 { 728 setup_iommu_tex_classes(); 729 bus_set_iommu(&platform_bus_type, &msm_iommu_ops); 730 return 0; 731 } 732 733 subsys_initcall(msm_iommu_init); 734 735 MODULE_LICENSE("GPL v2"); 736 MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); 737