1 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 15 * 02110-1301, USA. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/platform_device.h> 22 #include <linux/errno.h> 23 #include <linux/io.h> 24 #include <linux/interrupt.h> 25 #include <linux/list.h> 26 #include <linux/spinlock.h> 27 #include <linux/slab.h> 28 #include <linux/iommu.h> 29 #include <linux/clk.h> 30 31 #include <asm/cacheflush.h> 32 #include <asm/sizes.h> 33 34 #include <mach/iommu_hw-8xxx.h> 35 #include <mach/iommu.h> 36 37 #define MRC(reg, processor, op1, crn, crm, op2) \ 38 __asm__ __volatile__ ( \ 39 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ 40 : "=r" (reg)) 41 42 #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) 43 #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) 44 45 static int msm_iommu_tex_class[4]; 46 47 DEFINE_SPINLOCK(msm_iommu_lock); 48 49 struct msm_priv { 50 unsigned long *pgtable; 51 struct list_head list_attached; 52 }; 53 54 static int __enable_clocks(struct msm_iommu_drvdata *drvdata) 55 { 56 int ret; 57 58 ret = clk_enable(drvdata->pclk); 59 if (ret) 60 goto fail; 61 62 if (drvdata->clk) { 63 ret = clk_enable(drvdata->clk); 64 if (ret) 65 clk_disable(drvdata->pclk); 66 } 67 fail: 68 return ret; 69 } 70 71 static void __disable_clocks(struct msm_iommu_drvdata *drvdata) 72 { 73 if (drvdata->clk) 74 clk_disable(drvdata->clk); 75 clk_disable(drvdata->pclk); 76 } 77 78 static int __flush_iotlb(struct iommu_domain *domain) 79 { 80 struct msm_priv *priv = domain->priv; 81 struct msm_iommu_drvdata *iommu_drvdata; 82 struct msm_iommu_ctx_drvdata *ctx_drvdata; 83 int ret = 0; 84 #ifndef CONFIG_IOMMU_PGTABLES_L2 85 unsigned long *fl_table = priv->pgtable; 86 int i; 87 88 if (!list_empty(&priv->list_attached)) { 89 dmac_flush_range(fl_table, fl_table + SZ_16K); 90 91 for (i = 0; i < NUM_FL_PTE; i++) 92 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { 93 void *sl_table = __va(fl_table[i] & 94 FL_BASE_MASK); 95 dmac_flush_range(sl_table, sl_table + SZ_4K); 96 } 97 } 98 #endif 99 100 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { 101 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) 102 BUG(); 103 104 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); 105 BUG_ON(!iommu_drvdata); 106 107 ret = __enable_clocks(iommu_drvdata); 108 if (ret) 109 goto fail; 110 111 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); 112 __disable_clocks(iommu_drvdata); 113 } 114 fail: 115 return ret; 116 } 117 118 static void __reset_context(void __iomem *base, int ctx) 119 { 120 SET_BPRCOSH(base, ctx, 0); 121 SET_BPRCISH(base, ctx, 0); 122 SET_BPRCNSH(base, ctx, 0); 123 SET_BPSHCFG(base, ctx, 0); 124 SET_BPMTCFG(base, ctx, 0); 125 SET_ACTLR(base, ctx, 0); 126 SET_SCTLR(base, ctx, 0); 127 SET_FSRRESTORE(base, ctx, 0); 128 SET_TTBR0(base, ctx, 0); 129 SET_TTBR1(base, ctx, 0); 130 SET_TTBCR(base, ctx, 0); 131 SET_BFBCR(base, ctx, 0); 132 SET_PAR(base, ctx, 0); 133 SET_FAR(base, ctx, 0); 134 SET_CTX_TLBIALL(base, ctx, 0); 135 SET_TLBFLPTER(base, ctx, 0); 136 SET_TLBSLPTER(base, ctx, 0); 137 SET_TLBLKCR(base, ctx, 0); 138 SET_PRRR(base, ctx, 0); 139 SET_NMRR(base, ctx, 0); 140 } 141 142 static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) 143 { 144 unsigned int prrr, nmrr; 145 __reset_context(base, ctx); 146 147 /* Set up HTW mode */ 148 /* TLB miss configuration: perform HTW on miss */ 149 SET_TLBMCFG(base, ctx, 0x3); 150 151 /* V2P configuration: HTW for access */ 152 SET_V2PCFG(base, ctx, 0x3); 153 154 SET_TTBCR(base, ctx, 0); 155 SET_TTBR0_PA(base, ctx, (pgtable >> 14)); 156 157 /* Invalidate the TLB for this context */ 158 SET_CTX_TLBIALL(base, ctx, 0); 159 160 /* Set interrupt number to "secure" interrupt */ 161 SET_IRPTNDX(base, ctx, 0); 162 163 /* Enable context fault interrupt */ 164 SET_CFEIE(base, ctx, 1); 165 166 /* Stall access on a context fault and let the handler deal with it */ 167 SET_CFCFG(base, ctx, 1); 168 169 /* Redirect all cacheable requests to L2 slave port. */ 170 SET_RCISH(base, ctx, 1); 171 SET_RCOSH(base, ctx, 1); 172 SET_RCNSH(base, ctx, 1); 173 174 /* Turn on TEX Remap */ 175 SET_TRE(base, ctx, 1); 176 177 /* Set TEX remap attributes */ 178 RCP15_PRRR(prrr); 179 RCP15_NMRR(nmrr); 180 SET_PRRR(base, ctx, prrr); 181 SET_NMRR(base, ctx, nmrr); 182 183 /* Turn on BFB prefetch */ 184 SET_BFBDFE(base, ctx, 1); 185 186 #ifdef CONFIG_IOMMU_PGTABLES_L2 187 /* Configure page tables as inner-cacheable and shareable to reduce 188 * the TLB miss penalty. 189 */ 190 SET_TTBR0_SH(base, ctx, 1); 191 SET_TTBR1_SH(base, ctx, 1); 192 193 SET_TTBR0_NOS(base, ctx, 1); 194 SET_TTBR1_NOS(base, ctx, 1); 195 196 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ 197 SET_TTBR0_IRGNL(base, ctx, 1); 198 199 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ 200 SET_TTBR1_IRGNL(base, ctx, 1); 201 202 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ 203 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ 204 #endif 205 206 /* Enable the MMU */ 207 SET_M(base, ctx, 1); 208 } 209 210 static int msm_iommu_domain_init(struct iommu_domain *domain) 211 { 212 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); 213 214 if (!priv) 215 goto fail_nomem; 216 217 INIT_LIST_HEAD(&priv->list_attached); 218 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, 219 get_order(SZ_16K)); 220 221 if (!priv->pgtable) 222 goto fail_nomem; 223 224 memset(priv->pgtable, 0, SZ_16K); 225 domain->priv = priv; 226 return 0; 227 228 fail_nomem: 229 kfree(priv); 230 return -ENOMEM; 231 } 232 233 static void msm_iommu_domain_destroy(struct iommu_domain *domain) 234 { 235 struct msm_priv *priv; 236 unsigned long flags; 237 unsigned long *fl_table; 238 int i; 239 240 spin_lock_irqsave(&msm_iommu_lock, flags); 241 priv = domain->priv; 242 domain->priv = NULL; 243 244 if (priv) { 245 fl_table = priv->pgtable; 246 247 for (i = 0; i < NUM_FL_PTE; i++) 248 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) 249 free_page((unsigned long) __va(((fl_table[i]) & 250 FL_BASE_MASK))); 251 252 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); 253 priv->pgtable = NULL; 254 } 255 256 kfree(priv); 257 spin_unlock_irqrestore(&msm_iommu_lock, flags); 258 } 259 260 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 261 { 262 struct msm_priv *priv; 263 struct msm_iommu_ctx_dev *ctx_dev; 264 struct msm_iommu_drvdata *iommu_drvdata; 265 struct msm_iommu_ctx_drvdata *ctx_drvdata; 266 struct msm_iommu_ctx_drvdata *tmp_drvdata; 267 int ret = 0; 268 unsigned long flags; 269 270 spin_lock_irqsave(&msm_iommu_lock, flags); 271 272 priv = domain->priv; 273 274 if (!priv || !dev) { 275 ret = -EINVAL; 276 goto fail; 277 } 278 279 iommu_drvdata = dev_get_drvdata(dev->parent); 280 ctx_drvdata = dev_get_drvdata(dev); 281 ctx_dev = dev->platform_data; 282 283 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { 284 ret = -EINVAL; 285 goto fail; 286 } 287 288 if (!list_empty(&ctx_drvdata->attached_elm)) { 289 ret = -EBUSY; 290 goto fail; 291 } 292 293 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) 294 if (tmp_drvdata == ctx_drvdata) { 295 ret = -EBUSY; 296 goto fail; 297 } 298 299 ret = __enable_clocks(iommu_drvdata); 300 if (ret) 301 goto fail; 302 303 __program_context(iommu_drvdata->base, ctx_dev->num, 304 __pa(priv->pgtable)); 305 306 __disable_clocks(iommu_drvdata); 307 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); 308 ret = __flush_iotlb(domain); 309 310 fail: 311 spin_unlock_irqrestore(&msm_iommu_lock, flags); 312 return ret; 313 } 314 315 static void msm_iommu_detach_dev(struct iommu_domain *domain, 316 struct device *dev) 317 { 318 struct msm_priv *priv; 319 struct msm_iommu_ctx_dev *ctx_dev; 320 struct msm_iommu_drvdata *iommu_drvdata; 321 struct msm_iommu_ctx_drvdata *ctx_drvdata; 322 unsigned long flags; 323 int ret; 324 325 spin_lock_irqsave(&msm_iommu_lock, flags); 326 priv = domain->priv; 327 328 if (!priv || !dev) 329 goto fail; 330 331 iommu_drvdata = dev_get_drvdata(dev->parent); 332 ctx_drvdata = dev_get_drvdata(dev); 333 ctx_dev = dev->platform_data; 334 335 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) 336 goto fail; 337 338 ret = __flush_iotlb(domain); 339 if (ret) 340 goto fail; 341 342 ret = __enable_clocks(iommu_drvdata); 343 if (ret) 344 goto fail; 345 346 __reset_context(iommu_drvdata->base, ctx_dev->num); 347 __disable_clocks(iommu_drvdata); 348 list_del_init(&ctx_drvdata->attached_elm); 349 350 fail: 351 spin_unlock_irqrestore(&msm_iommu_lock, flags); 352 } 353 354 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, 355 phys_addr_t pa, int order, int prot) 356 { 357 struct msm_priv *priv; 358 unsigned long flags; 359 unsigned long *fl_table; 360 unsigned long *fl_pte; 361 unsigned long fl_offset; 362 unsigned long *sl_table; 363 unsigned long *sl_pte; 364 unsigned long sl_offset; 365 unsigned int pgprot; 366 size_t len = 0x1000UL << order; 367 int ret = 0, tex, sh; 368 369 spin_lock_irqsave(&msm_iommu_lock, flags); 370 371 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; 372 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; 373 374 if (tex < 0 || tex > NUM_TEX_CLASS - 1) { 375 ret = -EINVAL; 376 goto fail; 377 } 378 379 priv = domain->priv; 380 if (!priv) { 381 ret = -EINVAL; 382 goto fail; 383 } 384 385 fl_table = priv->pgtable; 386 387 if (len != SZ_16M && len != SZ_1M && 388 len != SZ_64K && len != SZ_4K) { 389 pr_debug("Bad size: %d\n", len); 390 ret = -EINVAL; 391 goto fail; 392 } 393 394 if (!fl_table) { 395 pr_debug("Null page table\n"); 396 ret = -EINVAL; 397 goto fail; 398 } 399 400 if (len == SZ_16M || len == SZ_1M) { 401 pgprot = sh ? FL_SHARED : 0; 402 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; 403 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; 404 pgprot |= tex & 0x04 ? FL_TEX0 : 0; 405 } else { 406 pgprot = sh ? SL_SHARED : 0; 407 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; 408 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; 409 pgprot |= tex & 0x04 ? SL_TEX0 : 0; 410 } 411 412 fl_offset = FL_OFFSET(va); /* Upper 12 bits */ 413 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ 414 415 if (len == SZ_16M) { 416 int i = 0; 417 for (i = 0; i < 16; i++) 418 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | 419 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | 420 FL_SHARED | FL_NG | pgprot; 421 } 422 423 if (len == SZ_1M) 424 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | 425 FL_TYPE_SECT | FL_SHARED | pgprot; 426 427 /* Need a 2nd level table */ 428 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { 429 unsigned long *sl; 430 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, 431 get_order(SZ_4K)); 432 433 if (!sl) { 434 pr_debug("Could not allocate second level table\n"); 435 ret = -ENOMEM; 436 goto fail; 437 } 438 439 memset(sl, 0, SZ_4K); 440 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); 441 } 442 443 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); 444 sl_offset = SL_OFFSET(va); 445 sl_pte = sl_table + sl_offset; 446 447 448 if (len == SZ_4K) 449 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | 450 SL_SHARED | SL_TYPE_SMALL | pgprot; 451 452 if (len == SZ_64K) { 453 int i; 454 455 for (i = 0; i < 16; i++) 456 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | 457 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; 458 } 459 460 ret = __flush_iotlb(domain); 461 fail: 462 spin_unlock_irqrestore(&msm_iommu_lock, flags); 463 return ret; 464 } 465 466 static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, 467 int order) 468 { 469 struct msm_priv *priv; 470 unsigned long flags; 471 unsigned long *fl_table; 472 unsigned long *fl_pte; 473 unsigned long fl_offset; 474 unsigned long *sl_table; 475 unsigned long *sl_pte; 476 unsigned long sl_offset; 477 size_t len = 0x1000UL << order; 478 int i, ret = 0; 479 480 spin_lock_irqsave(&msm_iommu_lock, flags); 481 482 priv = domain->priv; 483 484 if (!priv) { 485 ret = -ENODEV; 486 goto fail; 487 } 488 489 fl_table = priv->pgtable; 490 491 if (len != SZ_16M && len != SZ_1M && 492 len != SZ_64K && len != SZ_4K) { 493 pr_debug("Bad length: %d\n", len); 494 ret = -EINVAL; 495 goto fail; 496 } 497 498 if (!fl_table) { 499 pr_debug("Null page table\n"); 500 ret = -EINVAL; 501 goto fail; 502 } 503 504 fl_offset = FL_OFFSET(va); /* Upper 12 bits */ 505 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ 506 507 if (*fl_pte == 0) { 508 pr_debug("First level PTE is 0\n"); 509 ret = -ENODEV; 510 goto fail; 511 } 512 513 /* Unmap supersection */ 514 if (len == SZ_16M) 515 for (i = 0; i < 16; i++) 516 *(fl_pte+i) = 0; 517 518 if (len == SZ_1M) 519 *fl_pte = 0; 520 521 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); 522 sl_offset = SL_OFFSET(va); 523 sl_pte = sl_table + sl_offset; 524 525 if (len == SZ_64K) { 526 for (i = 0; i < 16; i++) 527 *(sl_pte+i) = 0; 528 } 529 530 if (len == SZ_4K) 531 *sl_pte = 0; 532 533 if (len == SZ_4K || len == SZ_64K) { 534 int used = 0; 535 536 for (i = 0; i < NUM_SL_PTE; i++) 537 if (sl_table[i]) 538 used = 1; 539 if (!used) { 540 free_page((unsigned long)sl_table); 541 *fl_pte = 0; 542 } 543 } 544 545 ret = __flush_iotlb(domain); 546 547 /* 548 * the IOMMU API requires us to return the order of the unmapped 549 * page (on success). 550 */ 551 if (!ret) 552 ret = order; 553 fail: 554 spin_unlock_irqrestore(&msm_iommu_lock, flags); 555 return ret; 556 } 557 558 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, 559 unsigned long va) 560 { 561 struct msm_priv *priv; 562 struct msm_iommu_drvdata *iommu_drvdata; 563 struct msm_iommu_ctx_drvdata *ctx_drvdata; 564 unsigned int par; 565 unsigned long flags; 566 void __iomem *base; 567 phys_addr_t ret = 0; 568 int ctx; 569 570 spin_lock_irqsave(&msm_iommu_lock, flags); 571 572 priv = domain->priv; 573 if (list_empty(&priv->list_attached)) 574 goto fail; 575 576 ctx_drvdata = list_entry(priv->list_attached.next, 577 struct msm_iommu_ctx_drvdata, attached_elm); 578 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); 579 580 base = iommu_drvdata->base; 581 ctx = ctx_drvdata->num; 582 583 ret = __enable_clocks(iommu_drvdata); 584 if (ret) 585 goto fail; 586 587 /* Invalidate context TLB */ 588 SET_CTX_TLBIALL(base, ctx, 0); 589 SET_V2PPR(base, ctx, va & V2Pxx_VA); 590 591 par = GET_PAR(base, ctx); 592 593 /* We are dealing with a supersection */ 594 if (GET_NOFAULT_SS(base, ctx)) 595 ret = (par & 0xFF000000) | (va & 0x00FFFFFF); 596 else /* Upper 20 bits from PAR, lower 12 from VA */ 597 ret = (par & 0xFFFFF000) | (va & 0x00000FFF); 598 599 if (GET_FAULT(base, ctx)) 600 ret = 0; 601 602 __disable_clocks(iommu_drvdata); 603 fail: 604 spin_unlock_irqrestore(&msm_iommu_lock, flags); 605 return ret; 606 } 607 608 static int msm_iommu_domain_has_cap(struct iommu_domain *domain, 609 unsigned long cap) 610 { 611 return 0; 612 } 613 614 static void print_ctx_regs(void __iomem *base, int ctx) 615 { 616 unsigned int fsr = GET_FSR(base, ctx); 617 pr_err("FAR = %08x PAR = %08x\n", 618 GET_FAR(base, ctx), GET_PAR(base, ctx)); 619 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, 620 (fsr & 0x02) ? "TF " : "", 621 (fsr & 0x04) ? "AFF " : "", 622 (fsr & 0x08) ? "APF " : "", 623 (fsr & 0x10) ? "TLBMF " : "", 624 (fsr & 0x20) ? "HTWDEEF " : "", 625 (fsr & 0x40) ? "HTWSEEF " : "", 626 (fsr & 0x80) ? "MHF " : "", 627 (fsr & 0x10000) ? "SL " : "", 628 (fsr & 0x40000000) ? "SS " : "", 629 (fsr & 0x80000000) ? "MULTI " : ""); 630 631 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", 632 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); 633 pr_err("TTBR0 = %08x TTBR1 = %08x\n", 634 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); 635 pr_err("SCTLR = %08x ACTLR = %08x\n", 636 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); 637 pr_err("PRRR = %08x NMRR = %08x\n", 638 GET_PRRR(base, ctx), GET_NMRR(base, ctx)); 639 } 640 641 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) 642 { 643 struct msm_iommu_drvdata *drvdata = dev_id; 644 void __iomem *base; 645 unsigned int fsr; 646 int i, ret; 647 648 spin_lock(&msm_iommu_lock); 649 650 if (!drvdata) { 651 pr_err("Invalid device ID in context interrupt handler\n"); 652 goto fail; 653 } 654 655 base = drvdata->base; 656 657 pr_err("Unexpected IOMMU page fault!\n"); 658 pr_err("base = %08x\n", (unsigned int) base); 659 660 ret = __enable_clocks(drvdata); 661 if (ret) 662 goto fail; 663 664 for (i = 0; i < drvdata->ncb; i++) { 665 fsr = GET_FSR(base, i); 666 if (fsr) { 667 pr_err("Fault occurred in context %d.\n", i); 668 pr_err("Interesting registers:\n"); 669 print_ctx_regs(base, i); 670 SET_FSR(base, i, 0x4000000F); 671 } 672 } 673 __disable_clocks(drvdata); 674 fail: 675 spin_unlock(&msm_iommu_lock); 676 return 0; 677 } 678 679 static struct iommu_ops msm_iommu_ops = { 680 .domain_init = msm_iommu_domain_init, 681 .domain_destroy = msm_iommu_domain_destroy, 682 .attach_dev = msm_iommu_attach_dev, 683 .detach_dev = msm_iommu_detach_dev, 684 .map = msm_iommu_map, 685 .unmap = msm_iommu_unmap, 686 .iova_to_phys = msm_iommu_iova_to_phys, 687 .domain_has_cap = msm_iommu_domain_has_cap 688 }; 689 690 static int __init get_tex_class(int icp, int ocp, int mt, int nos) 691 { 692 int i = 0; 693 unsigned int prrr = 0; 694 unsigned int nmrr = 0; 695 int c_icp, c_ocp, c_mt, c_nos; 696 697 RCP15_PRRR(prrr); 698 RCP15_NMRR(nmrr); 699 700 for (i = 0; i < NUM_TEX_CLASS; i++) { 701 c_nos = PRRR_NOS(prrr, i); 702 c_mt = PRRR_MT(prrr, i); 703 c_icp = NMRR_ICP(nmrr, i); 704 c_ocp = NMRR_OCP(nmrr, i); 705 706 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) 707 return i; 708 } 709 710 return -ENODEV; 711 } 712 713 static void __init setup_iommu_tex_classes(void) 714 { 715 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = 716 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); 717 718 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = 719 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); 720 721 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = 722 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); 723 724 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = 725 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); 726 } 727 728 static int __init msm_iommu_init(void) 729 { 730 setup_iommu_tex_classes(); 731 bus_set_iommu(&platform_bus_type, &msm_iommu_ops); 732 return 0; 733 } 734 735 subsys_initcall(msm_iommu_init); 736 737 MODULE_LICENSE("GPL v2"); 738 MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); 739