1 /* 2 * omap iommu: tlb and pagetable primitives 3 * 4 * Copyright (C) 2008-2010 Nokia Corporation 5 * 6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, 7 * Paul Mundt and Toshihiro Kobayashi 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/err.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/ioport.h> 19 #include <linux/clk.h> 20 #include <linux/platform_device.h> 21 #include <linux/iommu.h> 22 #include <linux/mutex.h> 23 #include <linux/spinlock.h> 24 25 #include <asm/cacheflush.h> 26 27 #include <plat/iommu.h> 28 29 #include <plat/iopgtable.h> 30 31 #define for_each_iotlb_cr(obj, n, __i, cr) \ 32 for (__i = 0; \ 33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ 34 __i++) 35 36 /** 37 * struct omap_iommu_domain - omap iommu domain 38 * @pgtable: the page table 39 * @iommu_dev: an omap iommu device attached to this domain. only a single 40 * iommu device can be attached for now. 41 * @lock: domain lock, should be taken when attaching/detaching 42 */ 43 struct omap_iommu_domain { 44 u32 *pgtable; 45 struct omap_iommu *iommu_dev; 46 spinlock_t lock; 47 }; 48 49 /* accommodate the difference between omap1 and omap2/3 */ 50 static const struct iommu_functions *arch_iommu; 51 52 static struct platform_driver omap_iommu_driver; 53 static struct kmem_cache *iopte_cachep; 54 55 /** 56 * omap_install_iommu_arch - Install archtecure specific iommu functions 57 * @ops: a pointer to architecture specific iommu functions 58 * 59 * There are several kind of iommu algorithm(tlb, pagetable) among 60 * omap series. This interface installs such an iommu algorighm. 61 **/ 62 int omap_install_iommu_arch(const struct iommu_functions *ops) 63 { 64 if (arch_iommu) 65 return -EBUSY; 66 67 arch_iommu = ops; 68 return 0; 69 } 70 EXPORT_SYMBOL_GPL(omap_install_iommu_arch); 71 72 /** 73 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions 74 * @ops: a pointer to architecture specific iommu functions 75 * 76 * This interface uninstalls the iommu algorighm installed previously. 77 **/ 78 void omap_uninstall_iommu_arch(const struct iommu_functions *ops) 79 { 80 if (arch_iommu != ops) 81 pr_err("%s: not your arch\n", __func__); 82 83 arch_iommu = NULL; 84 } 85 EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); 86 87 /** 88 * omap_iommu_save_ctx - Save registers for pm off-mode support 89 * @obj: target iommu 90 **/ 91 void omap_iommu_save_ctx(struct omap_iommu *obj) 92 { 93 arch_iommu->save_ctx(obj); 94 } 95 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 96 97 /** 98 * omap_iommu_restore_ctx - Restore registers for pm off-mode support 99 * @obj: target iommu 100 **/ 101 void omap_iommu_restore_ctx(struct omap_iommu *obj) 102 { 103 arch_iommu->restore_ctx(obj); 104 } 105 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 106 107 /** 108 * omap_iommu_arch_version - Return running iommu arch version 109 **/ 110 u32 omap_iommu_arch_version(void) 111 { 112 return arch_iommu->version; 113 } 114 EXPORT_SYMBOL_GPL(omap_iommu_arch_version); 115 116 static int iommu_enable(struct omap_iommu *obj) 117 { 118 int err; 119 120 if (!obj) 121 return -EINVAL; 122 123 if (!arch_iommu) 124 return -ENODEV; 125 126 clk_enable(obj->clk); 127 128 err = arch_iommu->enable(obj); 129 130 clk_disable(obj->clk); 131 return err; 132 } 133 134 static void iommu_disable(struct omap_iommu *obj) 135 { 136 if (!obj) 137 return; 138 139 clk_enable(obj->clk); 140 141 arch_iommu->disable(obj); 142 143 clk_disable(obj->clk); 144 } 145 146 /* 147 * TLB operations 148 */ 149 void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) 150 { 151 BUG_ON(!cr || !e); 152 153 arch_iommu->cr_to_e(cr, e); 154 } 155 EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); 156 157 static inline int iotlb_cr_valid(struct cr_regs *cr) 158 { 159 if (!cr) 160 return -EINVAL; 161 162 return arch_iommu->cr_valid(cr); 163 } 164 165 static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, 166 struct iotlb_entry *e) 167 { 168 if (!e) 169 return NULL; 170 171 return arch_iommu->alloc_cr(obj, e); 172 } 173 174 static u32 iotlb_cr_to_virt(struct cr_regs *cr) 175 { 176 return arch_iommu->cr_to_virt(cr); 177 } 178 179 static u32 get_iopte_attr(struct iotlb_entry *e) 180 { 181 return arch_iommu->get_pte_attr(e); 182 } 183 184 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 185 { 186 return arch_iommu->fault_isr(obj, da); 187 } 188 189 static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 190 { 191 u32 val; 192 193 val = iommu_read_reg(obj, MMU_LOCK); 194 195 l->base = MMU_LOCK_BASE(val); 196 l->vict = MMU_LOCK_VICT(val); 197 198 } 199 200 static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) 201 { 202 u32 val; 203 204 val = (l->base << MMU_LOCK_BASE_SHIFT); 205 val |= (l->vict << MMU_LOCK_VICT_SHIFT); 206 207 iommu_write_reg(obj, val, MMU_LOCK); 208 } 209 210 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 211 { 212 arch_iommu->tlb_read_cr(obj, cr); 213 } 214 215 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 216 { 217 arch_iommu->tlb_load_cr(obj, cr); 218 219 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 220 iommu_write_reg(obj, 1, MMU_LD_TLB); 221 } 222 223 /** 224 * iotlb_dump_cr - Dump an iommu tlb entry into buf 225 * @obj: target iommu 226 * @cr: contents of cam and ram register 227 * @buf: output buffer 228 **/ 229 static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, 230 char *buf) 231 { 232 BUG_ON(!cr || !buf); 233 234 return arch_iommu->dump_cr(obj, cr, buf); 235 } 236 237 /* only used in iotlb iteration for-loop */ 238 static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 239 { 240 struct cr_regs cr; 241 struct iotlb_lock l; 242 243 iotlb_lock_get(obj, &l); 244 l.vict = n; 245 iotlb_lock_set(obj, &l); 246 iotlb_read_cr(obj, &cr); 247 248 return cr; 249 } 250 251 /** 252 * load_iotlb_entry - Set an iommu tlb entry 253 * @obj: target iommu 254 * @e: an iommu tlb entry info 255 **/ 256 #ifdef PREFETCH_IOTLB 257 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 258 { 259 int err = 0; 260 struct iotlb_lock l; 261 struct cr_regs *cr; 262 263 if (!obj || !obj->nr_tlb_entries || !e) 264 return -EINVAL; 265 266 clk_enable(obj->clk); 267 268 iotlb_lock_get(obj, &l); 269 if (l.base == obj->nr_tlb_entries) { 270 dev_warn(obj->dev, "%s: preserve entries full\n", __func__); 271 err = -EBUSY; 272 goto out; 273 } 274 if (!e->prsvd) { 275 int i; 276 struct cr_regs tmp; 277 278 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) 279 if (!iotlb_cr_valid(&tmp)) 280 break; 281 282 if (i == obj->nr_tlb_entries) { 283 dev_dbg(obj->dev, "%s: full: no entry\n", __func__); 284 err = -EBUSY; 285 goto out; 286 } 287 288 iotlb_lock_get(obj, &l); 289 } else { 290 l.vict = l.base; 291 iotlb_lock_set(obj, &l); 292 } 293 294 cr = iotlb_alloc_cr(obj, e); 295 if (IS_ERR(cr)) { 296 clk_disable(obj->clk); 297 return PTR_ERR(cr); 298 } 299 300 iotlb_load_cr(obj, cr); 301 kfree(cr); 302 303 if (e->prsvd) 304 l.base++; 305 /* increment victim for next tlb load */ 306 if (++l.vict == obj->nr_tlb_entries) 307 l.vict = l.base; 308 iotlb_lock_set(obj, &l); 309 out: 310 clk_disable(obj->clk); 311 return err; 312 } 313 314 #else /* !PREFETCH_IOTLB */ 315 316 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 317 { 318 return 0; 319 } 320 321 #endif /* !PREFETCH_IOTLB */ 322 323 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 324 { 325 return load_iotlb_entry(obj, e); 326 } 327 328 /** 329 * flush_iotlb_page - Clear an iommu tlb entry 330 * @obj: target iommu 331 * @da: iommu device virtual address 332 * 333 * Clear an iommu tlb entry which includes 'da' address. 334 **/ 335 static void flush_iotlb_page(struct omap_iommu *obj, u32 da) 336 { 337 int i; 338 struct cr_regs cr; 339 340 clk_enable(obj->clk); 341 342 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 343 u32 start; 344 size_t bytes; 345 346 if (!iotlb_cr_valid(&cr)) 347 continue; 348 349 start = iotlb_cr_to_virt(&cr); 350 bytes = iopgsz_to_bytes(cr.cam & 3); 351 352 if ((start <= da) && (da < start + bytes)) { 353 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", 354 __func__, start, da, bytes); 355 iotlb_load_cr(obj, &cr); 356 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 357 } 358 } 359 clk_disable(obj->clk); 360 361 if (i == obj->nr_tlb_entries) 362 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 363 } 364 365 /** 366 * flush_iotlb_all - Clear all iommu tlb entries 367 * @obj: target iommu 368 **/ 369 static void flush_iotlb_all(struct omap_iommu *obj) 370 { 371 struct iotlb_lock l; 372 373 clk_enable(obj->clk); 374 375 l.base = 0; 376 l.vict = 0; 377 iotlb_lock_set(obj, &l); 378 379 iommu_write_reg(obj, 1, MMU_GFLUSH); 380 381 clk_disable(obj->clk); 382 } 383 384 #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) 385 386 ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) 387 { 388 if (!obj || !buf) 389 return -EINVAL; 390 391 clk_enable(obj->clk); 392 393 bytes = arch_iommu->dump_ctx(obj, buf, bytes); 394 395 clk_disable(obj->clk); 396 397 return bytes; 398 } 399 EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); 400 401 static int 402 __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) 403 { 404 int i; 405 struct iotlb_lock saved; 406 struct cr_regs tmp; 407 struct cr_regs *p = crs; 408 409 clk_enable(obj->clk); 410 iotlb_lock_get(obj, &saved); 411 412 for_each_iotlb_cr(obj, num, i, tmp) { 413 if (!iotlb_cr_valid(&tmp)) 414 continue; 415 *p++ = tmp; 416 } 417 418 iotlb_lock_set(obj, &saved); 419 clk_disable(obj->clk); 420 421 return p - crs; 422 } 423 424 /** 425 * omap_dump_tlb_entries - dump cr arrays to given buffer 426 * @obj: target iommu 427 * @buf: output buffer 428 **/ 429 size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) 430 { 431 int i, num; 432 struct cr_regs *cr; 433 char *p = buf; 434 435 num = bytes / sizeof(*cr); 436 num = min(obj->nr_tlb_entries, num); 437 438 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); 439 if (!cr) 440 return 0; 441 442 num = __dump_tlb_entries(obj, cr, num); 443 for (i = 0; i < num; i++) 444 p += iotlb_dump_cr(obj, cr + i, p); 445 kfree(cr); 446 447 return p - buf; 448 } 449 EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); 450 451 int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) 452 { 453 return driver_for_each_device(&omap_iommu_driver.driver, 454 NULL, data, fn); 455 } 456 EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); 457 458 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ 459 460 /* 461 * H/W pagetable operations 462 */ 463 static void flush_iopgd_range(u32 *first, u32 *last) 464 { 465 /* FIXME: L2 cache should be taken care of if it exists */ 466 do { 467 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" 468 : : "r" (first)); 469 first += L1_CACHE_BYTES / sizeof(*first); 470 } while (first <= last); 471 } 472 473 static void flush_iopte_range(u32 *first, u32 *last) 474 { 475 /* FIXME: L2 cache should be taken care of if it exists */ 476 do { 477 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" 478 : : "r" (first)); 479 first += L1_CACHE_BYTES / sizeof(*first); 480 } while (first <= last); 481 } 482 483 static void iopte_free(u32 *iopte) 484 { 485 /* Note: freed iopte's must be clean ready for re-use */ 486 kmem_cache_free(iopte_cachep, iopte); 487 } 488 489 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) 490 { 491 u32 *iopte; 492 493 /* a table has already existed */ 494 if (*iopgd) 495 goto pte_ready; 496 497 /* 498 * do the allocation outside the page table lock 499 */ 500 spin_unlock(&obj->page_table_lock); 501 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); 502 spin_lock(&obj->page_table_lock); 503 504 if (!*iopgd) { 505 if (!iopte) 506 return ERR_PTR(-ENOMEM); 507 508 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; 509 flush_iopgd_range(iopgd, iopgd); 510 511 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); 512 } else { 513 /* We raced, free the reduniovant table */ 514 iopte_free(iopte); 515 } 516 517 pte_ready: 518 iopte = iopte_offset(iopgd, da); 519 520 dev_vdbg(obj->dev, 521 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", 522 __func__, da, iopgd, *iopgd, iopte, *iopte); 523 524 return iopte; 525 } 526 527 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 528 { 529 u32 *iopgd = iopgd_offset(obj, da); 530 531 if ((da | pa) & ~IOSECTION_MASK) { 532 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 533 __func__, da, pa, IOSECTION_SIZE); 534 return -EINVAL; 535 } 536 537 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; 538 flush_iopgd_range(iopgd, iopgd); 539 return 0; 540 } 541 542 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 543 { 544 u32 *iopgd = iopgd_offset(obj, da); 545 int i; 546 547 if ((da | pa) & ~IOSUPER_MASK) { 548 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 549 __func__, da, pa, IOSUPER_SIZE); 550 return -EINVAL; 551 } 552 553 for (i = 0; i < 16; i++) 554 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; 555 flush_iopgd_range(iopgd, iopgd + 15); 556 return 0; 557 } 558 559 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 560 { 561 u32 *iopgd = iopgd_offset(obj, da); 562 u32 *iopte = iopte_alloc(obj, iopgd, da); 563 564 if (IS_ERR(iopte)) 565 return PTR_ERR(iopte); 566 567 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; 568 flush_iopte_range(iopte, iopte); 569 570 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", 571 __func__, da, pa, iopte, *iopte); 572 573 return 0; 574 } 575 576 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 577 { 578 u32 *iopgd = iopgd_offset(obj, da); 579 u32 *iopte = iopte_alloc(obj, iopgd, da); 580 int i; 581 582 if ((da | pa) & ~IOLARGE_MASK) { 583 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 584 __func__, da, pa, IOLARGE_SIZE); 585 return -EINVAL; 586 } 587 588 if (IS_ERR(iopte)) 589 return PTR_ERR(iopte); 590 591 for (i = 0; i < 16; i++) 592 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; 593 flush_iopte_range(iopte, iopte + 15); 594 return 0; 595 } 596 597 static int 598 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) 599 { 600 int (*fn)(struct omap_iommu *, u32, u32, u32); 601 u32 prot; 602 int err; 603 604 if (!obj || !e) 605 return -EINVAL; 606 607 switch (e->pgsz) { 608 case MMU_CAM_PGSZ_16M: 609 fn = iopgd_alloc_super; 610 break; 611 case MMU_CAM_PGSZ_1M: 612 fn = iopgd_alloc_section; 613 break; 614 case MMU_CAM_PGSZ_64K: 615 fn = iopte_alloc_large; 616 break; 617 case MMU_CAM_PGSZ_4K: 618 fn = iopte_alloc_page; 619 break; 620 default: 621 fn = NULL; 622 BUG(); 623 break; 624 } 625 626 prot = get_iopte_attr(e); 627 628 spin_lock(&obj->page_table_lock); 629 err = fn(obj, e->da, e->pa, prot); 630 spin_unlock(&obj->page_table_lock); 631 632 return err; 633 } 634 635 /** 636 * omap_iopgtable_store_entry - Make an iommu pte entry 637 * @obj: target iommu 638 * @e: an iommu tlb entry info 639 **/ 640 int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 641 { 642 int err; 643 644 flush_iotlb_page(obj, e->da); 645 err = iopgtable_store_entry_core(obj, e); 646 if (!err) 647 prefetch_iotlb_entry(obj, e); 648 return err; 649 } 650 EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); 651 652 /** 653 * iopgtable_lookup_entry - Lookup an iommu pte entry 654 * @obj: target iommu 655 * @da: iommu device virtual address 656 * @ppgd: iommu pgd entry pointer to be returned 657 * @ppte: iommu pte entry pointer to be returned 658 **/ 659 static void 660 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) 661 { 662 u32 *iopgd, *iopte = NULL; 663 664 iopgd = iopgd_offset(obj, da); 665 if (!*iopgd) 666 goto out; 667 668 if (iopgd_is_table(*iopgd)) 669 iopte = iopte_offset(iopgd, da); 670 out: 671 *ppgd = iopgd; 672 *ppte = iopte; 673 } 674 675 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) 676 { 677 size_t bytes; 678 u32 *iopgd = iopgd_offset(obj, da); 679 int nent = 1; 680 681 if (!*iopgd) 682 return 0; 683 684 if (iopgd_is_table(*iopgd)) { 685 int i; 686 u32 *iopte = iopte_offset(iopgd, da); 687 688 bytes = IOPTE_SIZE; 689 if (*iopte & IOPTE_LARGE) { 690 nent *= 16; 691 /* rewind to the 1st entry */ 692 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); 693 } 694 bytes *= nent; 695 memset(iopte, 0, nent * sizeof(*iopte)); 696 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); 697 698 /* 699 * do table walk to check if this table is necessary or not 700 */ 701 iopte = iopte_offset(iopgd, 0); 702 for (i = 0; i < PTRS_PER_IOPTE; i++) 703 if (iopte[i]) 704 goto out; 705 706 iopte_free(iopte); 707 nent = 1; /* for the next L1 entry */ 708 } else { 709 bytes = IOPGD_SIZE; 710 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { 711 nent *= 16; 712 /* rewind to the 1st entry */ 713 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); 714 } 715 bytes *= nent; 716 } 717 memset(iopgd, 0, nent * sizeof(*iopgd)); 718 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); 719 out: 720 return bytes; 721 } 722 723 /** 724 * iopgtable_clear_entry - Remove an iommu pte entry 725 * @obj: target iommu 726 * @da: iommu device virtual address 727 **/ 728 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) 729 { 730 size_t bytes; 731 732 spin_lock(&obj->page_table_lock); 733 734 bytes = iopgtable_clear_entry_core(obj, da); 735 flush_iotlb_page(obj, da); 736 737 spin_unlock(&obj->page_table_lock); 738 739 return bytes; 740 } 741 742 static void iopgtable_clear_entry_all(struct omap_iommu *obj) 743 { 744 int i; 745 746 spin_lock(&obj->page_table_lock); 747 748 for (i = 0; i < PTRS_PER_IOPGD; i++) { 749 u32 da; 750 u32 *iopgd; 751 752 da = i << IOPGD_SHIFT; 753 iopgd = iopgd_offset(obj, da); 754 755 if (!*iopgd) 756 continue; 757 758 if (iopgd_is_table(*iopgd)) 759 iopte_free(iopte_offset(iopgd, 0)); 760 761 *iopgd = 0; 762 flush_iopgd_range(iopgd, iopgd); 763 } 764 765 flush_iotlb_all(obj); 766 767 spin_unlock(&obj->page_table_lock); 768 } 769 770 /* 771 * Device IOMMU generic operations 772 */ 773 static irqreturn_t iommu_fault_handler(int irq, void *data) 774 { 775 u32 da, errs; 776 u32 *iopgd, *iopte; 777 struct omap_iommu *obj = data; 778 struct iommu_domain *domain = obj->domain; 779 780 if (!obj->refcount) 781 return IRQ_NONE; 782 783 clk_enable(obj->clk); 784 errs = iommu_report_fault(obj, &da); 785 clk_disable(obj->clk); 786 if (errs == 0) 787 return IRQ_HANDLED; 788 789 /* Fault callback or TLB/PTE Dynamic loading */ 790 if (!report_iommu_fault(domain, obj->dev, da, 0)) 791 return IRQ_HANDLED; 792 793 iommu_disable(obj); 794 795 iopgd = iopgd_offset(obj, da); 796 797 if (!iopgd_is_table(*iopgd)) { 798 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " 799 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); 800 return IRQ_NONE; 801 } 802 803 iopte = iopte_offset(iopgd, da); 804 805 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " 806 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, 807 iopte, *iopte); 808 809 return IRQ_NONE; 810 } 811 812 static int device_match_by_alias(struct device *dev, void *data) 813 { 814 struct omap_iommu *obj = to_iommu(dev); 815 const char *name = data; 816 817 pr_debug("%s: %s %s\n", __func__, obj->name, name); 818 819 return strcmp(obj->name, name) == 0; 820 } 821 822 /** 823 * omap_find_iommu_device() - find an omap iommu device by name 824 * @name: name of the iommu device 825 * 826 * The generic iommu API requires the caller to provide the device 827 * he wishes to attach to a certain iommu domain. 828 * 829 * Drivers generally should not bother with this as it should just 830 * be taken care of by the DMA-API using dev_archdata. 831 * 832 * This function is provided as an interim solution until the latter 833 * materializes, and omap3isp is fully migrated to the DMA-API. 834 */ 835 struct device *omap_find_iommu_device(const char *name) 836 { 837 return driver_find_device(&omap_iommu_driver.driver, NULL, 838 (void *)name, 839 device_match_by_alias); 840 } 841 EXPORT_SYMBOL_GPL(omap_find_iommu_device); 842 843 /** 844 * omap_iommu_attach() - attach iommu device to an iommu domain 845 * @dev: target omap iommu device 846 * @iopgd: page table 847 **/ 848 static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) 849 { 850 int err = -ENOMEM; 851 struct omap_iommu *obj = to_iommu(dev); 852 853 spin_lock(&obj->iommu_lock); 854 855 /* an iommu device can only be attached once */ 856 if (++obj->refcount > 1) { 857 dev_err(dev, "%s: already attached!\n", obj->name); 858 err = -EBUSY; 859 goto err_enable; 860 } 861 862 obj->iopgd = iopgd; 863 err = iommu_enable(obj); 864 if (err) 865 goto err_enable; 866 flush_iotlb_all(obj); 867 868 if (!try_module_get(obj->owner)) 869 goto err_module; 870 871 spin_unlock(&obj->iommu_lock); 872 873 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 874 return obj; 875 876 err_module: 877 if (obj->refcount == 1) 878 iommu_disable(obj); 879 err_enable: 880 obj->refcount--; 881 spin_unlock(&obj->iommu_lock); 882 return ERR_PTR(err); 883 } 884 885 /** 886 * omap_iommu_detach - release iommu device 887 * @obj: target iommu 888 **/ 889 static void omap_iommu_detach(struct omap_iommu *obj) 890 { 891 if (!obj || IS_ERR(obj)) 892 return; 893 894 spin_lock(&obj->iommu_lock); 895 896 if (--obj->refcount == 0) 897 iommu_disable(obj); 898 899 module_put(obj->owner); 900 901 obj->iopgd = NULL; 902 903 spin_unlock(&obj->iommu_lock); 904 905 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 906 } 907 908 /* 909 * OMAP Device MMU(IOMMU) detection 910 */ 911 static int __devinit omap_iommu_probe(struct platform_device *pdev) 912 { 913 int err = -ENODEV; 914 int irq; 915 struct omap_iommu *obj; 916 struct resource *res; 917 struct iommu_platform_data *pdata = pdev->dev.platform_data; 918 919 if (pdev->num_resources != 2) 920 return -EINVAL; 921 922 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 923 if (!obj) 924 return -ENOMEM; 925 926 obj->clk = clk_get(&pdev->dev, pdata->clk_name); 927 if (IS_ERR(obj->clk)) 928 goto err_clk; 929 930 obj->nr_tlb_entries = pdata->nr_tlb_entries; 931 obj->name = pdata->name; 932 obj->dev = &pdev->dev; 933 obj->ctx = (void *)obj + sizeof(*obj); 934 obj->da_start = pdata->da_start; 935 obj->da_end = pdata->da_end; 936 937 spin_lock_init(&obj->iommu_lock); 938 mutex_init(&obj->mmap_lock); 939 spin_lock_init(&obj->page_table_lock); 940 INIT_LIST_HEAD(&obj->mmap); 941 942 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 943 if (!res) { 944 err = -ENODEV; 945 goto err_mem; 946 } 947 948 res = request_mem_region(res->start, resource_size(res), 949 dev_name(&pdev->dev)); 950 if (!res) { 951 err = -EIO; 952 goto err_mem; 953 } 954 955 obj->regbase = ioremap(res->start, resource_size(res)); 956 if (!obj->regbase) { 957 err = -ENOMEM; 958 goto err_ioremap; 959 } 960 961 irq = platform_get_irq(pdev, 0); 962 if (irq < 0) { 963 err = -ENODEV; 964 goto err_irq; 965 } 966 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, 967 dev_name(&pdev->dev), obj); 968 if (err < 0) 969 goto err_irq; 970 platform_set_drvdata(pdev, obj); 971 972 dev_info(&pdev->dev, "%s registered\n", obj->name); 973 return 0; 974 975 err_irq: 976 iounmap(obj->regbase); 977 err_ioremap: 978 release_mem_region(res->start, resource_size(res)); 979 err_mem: 980 clk_put(obj->clk); 981 err_clk: 982 kfree(obj); 983 return err; 984 } 985 986 static int __devexit omap_iommu_remove(struct platform_device *pdev) 987 { 988 int irq; 989 struct resource *res; 990 struct omap_iommu *obj = platform_get_drvdata(pdev); 991 992 platform_set_drvdata(pdev, NULL); 993 994 iopgtable_clear_entry_all(obj); 995 996 irq = platform_get_irq(pdev, 0); 997 free_irq(irq, obj); 998 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 999 release_mem_region(res->start, resource_size(res)); 1000 iounmap(obj->regbase); 1001 1002 clk_put(obj->clk); 1003 dev_info(&pdev->dev, "%s removed\n", obj->name); 1004 kfree(obj); 1005 return 0; 1006 } 1007 1008 static struct platform_driver omap_iommu_driver = { 1009 .probe = omap_iommu_probe, 1010 .remove = __devexit_p(omap_iommu_remove), 1011 .driver = { 1012 .name = "omap-iommu", 1013 }, 1014 }; 1015 1016 static void iopte_cachep_ctor(void *iopte) 1017 { 1018 clean_dcache_area(iopte, IOPTE_TABLE_SIZE); 1019 } 1020 1021 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1022 phys_addr_t pa, int order, int prot) 1023 { 1024 struct omap_iommu_domain *omap_domain = domain->priv; 1025 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1026 struct device *dev = oiommu->dev; 1027 size_t bytes = PAGE_SIZE << order; 1028 struct iotlb_entry e; 1029 int omap_pgsz; 1030 u32 ret, flags; 1031 1032 /* we only support mapping a single iommu page for now */ 1033 omap_pgsz = bytes_to_iopgsz(bytes); 1034 if (omap_pgsz < 0) { 1035 dev_err(dev, "invalid size to map: %d\n", bytes); 1036 return -EINVAL; 1037 } 1038 1039 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); 1040 1041 flags = omap_pgsz | prot; 1042 1043 iotlb_init_entry(&e, da, pa, flags); 1044 1045 ret = omap_iopgtable_store_entry(oiommu, &e); 1046 if (ret) 1047 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); 1048 1049 return ret; 1050 } 1051 1052 static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1053 int order) 1054 { 1055 struct omap_iommu_domain *omap_domain = domain->priv; 1056 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1057 struct device *dev = oiommu->dev; 1058 size_t unmap_size; 1059 1060 dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order); 1061 1062 unmap_size = iopgtable_clear_entry(oiommu, da); 1063 1064 return unmap_size ? get_order(unmap_size) : -EINVAL; 1065 } 1066 1067 static int 1068 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 1069 { 1070 struct omap_iommu_domain *omap_domain = domain->priv; 1071 struct omap_iommu *oiommu; 1072 int ret = 0; 1073 1074 spin_lock(&omap_domain->lock); 1075 1076 /* only a single device is supported per domain for now */ 1077 if (omap_domain->iommu_dev) { 1078 dev_err(dev, "iommu domain is already attached\n"); 1079 ret = -EBUSY; 1080 goto out; 1081 } 1082 1083 /* get a handle to and enable the omap iommu */ 1084 oiommu = omap_iommu_attach(dev, omap_domain->pgtable); 1085 if (IS_ERR(oiommu)) { 1086 ret = PTR_ERR(oiommu); 1087 dev_err(dev, "can't get omap iommu: %d\n", ret); 1088 goto out; 1089 } 1090 1091 omap_domain->iommu_dev = oiommu; 1092 oiommu->domain = domain; 1093 1094 out: 1095 spin_unlock(&omap_domain->lock); 1096 return ret; 1097 } 1098 1099 static void omap_iommu_detach_dev(struct iommu_domain *domain, 1100 struct device *dev) 1101 { 1102 struct omap_iommu_domain *omap_domain = domain->priv; 1103 struct omap_iommu *oiommu = to_iommu(dev); 1104 1105 spin_lock(&omap_domain->lock); 1106 1107 /* only a single device is supported per domain for now */ 1108 if (omap_domain->iommu_dev != oiommu) { 1109 dev_err(dev, "invalid iommu device\n"); 1110 goto out; 1111 } 1112 1113 iopgtable_clear_entry_all(oiommu); 1114 1115 omap_iommu_detach(oiommu); 1116 1117 omap_domain->iommu_dev = NULL; 1118 1119 out: 1120 spin_unlock(&omap_domain->lock); 1121 } 1122 1123 static int omap_iommu_domain_init(struct iommu_domain *domain) 1124 { 1125 struct omap_iommu_domain *omap_domain; 1126 1127 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); 1128 if (!omap_domain) { 1129 pr_err("kzalloc failed\n"); 1130 goto out; 1131 } 1132 1133 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); 1134 if (!omap_domain->pgtable) { 1135 pr_err("kzalloc failed\n"); 1136 goto fail_nomem; 1137 } 1138 1139 /* 1140 * should never fail, but please keep this around to ensure 1141 * we keep the hardware happy 1142 */ 1143 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); 1144 1145 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); 1146 spin_lock_init(&omap_domain->lock); 1147 1148 domain->priv = omap_domain; 1149 1150 return 0; 1151 1152 fail_nomem: 1153 kfree(omap_domain); 1154 out: 1155 return -ENOMEM; 1156 } 1157 1158 /* assume device was already detached */ 1159 static void omap_iommu_domain_destroy(struct iommu_domain *domain) 1160 { 1161 struct omap_iommu_domain *omap_domain = domain->priv; 1162 1163 domain->priv = NULL; 1164 1165 kfree(omap_domain->pgtable); 1166 kfree(omap_domain); 1167 } 1168 1169 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, 1170 unsigned long da) 1171 { 1172 struct omap_iommu_domain *omap_domain = domain->priv; 1173 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1174 struct device *dev = oiommu->dev; 1175 u32 *pgd, *pte; 1176 phys_addr_t ret = 0; 1177 1178 iopgtable_lookup_entry(oiommu, da, &pgd, &pte); 1179 1180 if (pte) { 1181 if (iopte_is_small(*pte)) 1182 ret = omap_iommu_translate(*pte, da, IOPTE_MASK); 1183 else if (iopte_is_large(*pte)) 1184 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); 1185 else 1186 dev_err(dev, "bogus pte 0x%x", *pte); 1187 } else { 1188 if (iopgd_is_section(*pgd)) 1189 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); 1190 else if (iopgd_is_super(*pgd)) 1191 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); 1192 else 1193 dev_err(dev, "bogus pgd 0x%x", *pgd); 1194 } 1195 1196 return ret; 1197 } 1198 1199 static int omap_iommu_domain_has_cap(struct iommu_domain *domain, 1200 unsigned long cap) 1201 { 1202 return 0; 1203 } 1204 1205 static struct iommu_ops omap_iommu_ops = { 1206 .domain_init = omap_iommu_domain_init, 1207 .domain_destroy = omap_iommu_domain_destroy, 1208 .attach_dev = omap_iommu_attach_dev, 1209 .detach_dev = omap_iommu_detach_dev, 1210 .map = omap_iommu_map, 1211 .unmap = omap_iommu_unmap, 1212 .iova_to_phys = omap_iommu_iova_to_phys, 1213 .domain_has_cap = omap_iommu_domain_has_cap, 1214 }; 1215 1216 static int __init omap_iommu_init(void) 1217 { 1218 struct kmem_cache *p; 1219 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1220 size_t align = 1 << 10; /* L2 pagetable alignement */ 1221 1222 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1223 iopte_cachep_ctor); 1224 if (!p) 1225 return -ENOMEM; 1226 iopte_cachep = p; 1227 1228 bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1229 1230 return platform_driver_register(&omap_iommu_driver); 1231 } 1232 module_init(omap_iommu_init); 1233 1234 static void __exit omap_iommu_exit(void) 1235 { 1236 kmem_cache_destroy(iopte_cachep); 1237 1238 platform_driver_unregister(&omap_iommu_driver); 1239 } 1240 module_exit(omap_iommu_exit); 1241 1242 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); 1243 MODULE_ALIAS("platform:omap-iommu"); 1244 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); 1245 MODULE_LICENSE("GPL v2"); 1246