1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 */ 6 7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG 8 #define DEBUG 9 #endif 10 11 #include <linux/clk.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/io.h> 15 #include <linux/iommu.h> 16 #include <linux/interrupt.h> 17 #include <linux/kmemleak.h> 18 #include <linux/list.h> 19 #include <linux/of.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/slab.h> 24 25 #include "dma-iommu.h" 26 #include "iommu-pages.h" 27 28 typedef u32 sysmmu_iova_t; 29 typedef u32 sysmmu_pte_t; 30 static struct iommu_domain exynos_identity_domain; 31 32 /* We do not consider super section mapping (16MB) */ 33 #define SECT_ORDER 20 34 #define LPAGE_ORDER 16 35 #define SPAGE_ORDER 12 36 37 #define SECT_SIZE (1 << SECT_ORDER) 38 #define LPAGE_SIZE (1 << LPAGE_ORDER) 39 #define SPAGE_SIZE (1 << SPAGE_ORDER) 40 41 #define SECT_MASK (~(SECT_SIZE - 1)) 42 #define LPAGE_MASK (~(LPAGE_SIZE - 1)) 43 #define SPAGE_MASK (~(SPAGE_SIZE - 1)) 44 45 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ 46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) 47 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) 48 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) 49 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ 50 ((*(sent) & 3) == 1)) 51 #define lv1ent_section(sent) ((*(sent) & 3) == 2) 52 53 #define lv2ent_fault(pent) ((*(pent) & 3) == 0) 54 #define lv2ent_small(pent) ((*(pent) & 2) == 2) 55 #define lv2ent_large(pent) ((*(pent) & 3) == 1) 56 57 /* 58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces 59 * v5.0 introduced support for 36bit physical address space by shifting 60 * all page entry values by 4 bits. 61 * All SYSMMU controllers in the system support the address spaces of the same 62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper 63 * value (0 or 4). 64 */ 65 static short PG_ENT_SHIFT = -1; 66 #define SYSMMU_PG_ENT_SHIFT 0 67 #define SYSMMU_V5_PG_ENT_SHIFT 4 68 69 static const sysmmu_pte_t *LV1_PROT; 70 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = { 71 ((0 << 15) | (0 << 10)), /* no access */ 72 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */ 73 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */ 74 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */ 75 }; 76 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = { 77 (0 << 4), /* no access */ 78 (1 << 4), /* IOMMU_READ only */ 79 (2 << 4), /* IOMMU_WRITE only */ 80 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */ 81 }; 82 83 static const sysmmu_pte_t *LV2_PROT; 84 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = { 85 ((0 << 9) | (0 << 4)), /* no access */ 86 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */ 87 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */ 88 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */ 89 }; 90 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = { 91 (0 << 2), /* no access */ 92 (1 << 2), /* IOMMU_READ only */ 93 (2 << 2), /* IOMMU_WRITE only */ 94 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */ 95 }; 96 97 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE) 98 99 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) 100 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) 101 #define section_offs(iova) (iova & (SECT_SIZE - 1)) 102 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) 103 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) 104 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) 105 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) 106 107 #define NUM_LV1ENTRIES 4096 108 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) 109 110 static u32 lv1ent_offset(sysmmu_iova_t iova) 111 { 112 return iova >> SECT_ORDER; 113 } 114 115 static u32 lv2ent_offset(sysmmu_iova_t iova) 116 { 117 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); 118 } 119 120 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) 121 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) 122 123 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) 124 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) 125 126 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2) 127 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) 128 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1) 129 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2) 130 131 #define CTRL_ENABLE 0x5 132 #define CTRL_BLOCK 0x7 133 #define CTRL_DISABLE 0x0 134 135 #define CFG_LRU 0x1 136 #define CFG_EAP (1 << 2) 137 #define CFG_QOS(n) ((n & 0xF) << 7) 138 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ 139 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ 140 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ 141 142 #define CTRL_VM_ENABLE BIT(0) 143 #define CTRL_VM_FAULT_MODE_STALL BIT(3) 144 #define CAPA0_CAPA1_EXIST BIT(11) 145 #define CAPA1_VCR_ENABLED BIT(14) 146 147 /* common registers */ 148 #define REG_MMU_CTRL 0x000 149 #define REG_MMU_CFG 0x004 150 #define REG_MMU_STATUS 0x008 151 #define REG_MMU_VERSION 0x034 152 153 #define MMU_MAJ_VER(val) ((val) >> 7) 154 #define MMU_MIN_VER(val) ((val) & 0x7F) 155 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ 156 157 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) 158 159 /* v1.x - v3.x registers */ 160 #define REG_PAGE_FAULT_ADDR 0x024 161 #define REG_AW_FAULT_ADDR 0x028 162 #define REG_AR_FAULT_ADDR 0x02C 163 #define REG_DEFAULT_SLAVE_ADDR 0x030 164 165 /* v5.x registers */ 166 #define REG_V5_FAULT_AR_VA 0x070 167 #define REG_V5_FAULT_AW_VA 0x080 168 169 /* v7.x registers */ 170 #define REG_V7_CAPA0 0x870 171 #define REG_V7_CAPA1 0x874 172 #define REG_V7_CTRL_VM 0x8000 173 174 #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL) 175 176 static struct device *dma_dev; 177 static struct kmem_cache *lv2table_kmem_cache; 178 static sysmmu_pte_t *zero_lv2_table; 179 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) 180 181 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) 182 { 183 return pgtable + lv1ent_offset(iova); 184 } 185 186 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) 187 { 188 return (sysmmu_pte_t *)phys_to_virt( 189 lv2table_base(sent)) + lv2ent_offset(iova); 190 } 191 192 struct sysmmu_fault { 193 sysmmu_iova_t addr; /* IOVA address that caused fault */ 194 const char *name; /* human readable fault name */ 195 unsigned int type; /* fault type for report_iommu_fault() */ 196 }; 197 198 struct sysmmu_v1_fault_info { 199 unsigned short addr_reg; /* register to read IOVA fault address */ 200 const char *name; /* human readable fault name */ 201 unsigned int type; /* fault type for report_iommu_fault */ 202 }; 203 204 static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = { 205 { REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ }, 206 { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ }, 207 { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE }, 208 { REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ }, 209 { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ }, 210 { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ }, 211 { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE }, 212 { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE }, 213 }; 214 215 /* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */ 216 static const char * const sysmmu_v5_fault_names[] = { 217 "PTW", 218 "PAGE", 219 "MULTI-HIT", 220 "ACCESS PROTECTION", 221 "SECURITY PROTECTION" 222 }; 223 224 static const char * const sysmmu_v7_fault_names[] = { 225 "PTW", 226 "PAGE", 227 "ACCESS PROTECTION", 228 "RESERVED" 229 }; 230 231 /* 232 * This structure is attached to dev->iommu->priv of the master device 233 * on device add, contains a list of SYSMMU controllers defined by device tree, 234 * which are bound to given master device. It is usually referenced by 'owner' 235 * pointer. 236 */ 237 struct exynos_iommu_owner { 238 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ 239 struct iommu_domain *domain; /* domain this device is attached */ 240 struct mutex rpm_lock; /* for runtime pm of all sysmmus */ 241 }; 242 243 /* 244 * This structure exynos specific generalization of struct iommu_domain. 245 * It contains list of SYSMMU controllers from all master devices, which has 246 * been attached to this domain and page tables of IO address space defined by 247 * it. It is usually referenced by 'domain' pointer. 248 */ 249 struct exynos_iommu_domain { 250 struct list_head clients; /* list of sysmmu_drvdata.domain_node */ 251 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ 252 short *lv2entcnt; /* free lv2 entry counter for each section */ 253 spinlock_t lock; /* lock for modifying list of clients */ 254 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ 255 struct iommu_domain domain; /* generic domain data structure */ 256 }; 257 258 struct sysmmu_drvdata; 259 260 /* 261 * SysMMU version specific data. Contains offsets for the registers which can 262 * be found in different SysMMU variants, but have different offset values. 263 * Also contains version specific callbacks to abstract the hardware. 264 */ 265 struct sysmmu_variant { 266 u32 pt_base; /* page table base address (physical) */ 267 u32 flush_all; /* invalidate all TLB entries */ 268 u32 flush_entry; /* invalidate specific TLB entry */ 269 u32 flush_range; /* invalidate TLB entries in specified range */ 270 u32 flush_start; /* start address of range invalidation */ 271 u32 flush_end; /* end address of range invalidation */ 272 u32 int_status; /* interrupt status information */ 273 u32 int_clear; /* clear the interrupt */ 274 u32 fault_va; /* IOVA address that caused fault */ 275 u32 fault_info; /* fault transaction info */ 276 277 int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype, 278 struct sysmmu_fault *fault); 279 }; 280 281 /* 282 * This structure hold all data of a single SYSMMU controller, this includes 283 * hw resources like registers and clocks, pointers and list nodes to connect 284 * it to all other structures, internal state and parameters read from device 285 * tree. It is usually referenced by 'data' pointer. 286 */ 287 struct sysmmu_drvdata { 288 struct device *sysmmu; /* SYSMMU controller device */ 289 struct device *master; /* master device (owner) */ 290 struct device_link *link; /* runtime PM link to master */ 291 void __iomem *sfrbase; /* our registers */ 292 struct clk *clk; /* SYSMMU's clock */ 293 struct clk *aclk; /* SYSMMU's aclk clock */ 294 struct clk *pclk; /* SYSMMU's pclk clock */ 295 struct clk *clk_master; /* master's device clock */ 296 spinlock_t lock; /* lock for modifying state */ 297 bool active; /* current status */ 298 struct exynos_iommu_domain *domain; /* domain we belong to */ 299 struct list_head domain_node; /* node for domain clients list */ 300 struct list_head owner_node; /* node for owner controllers list */ 301 phys_addr_t pgtable; /* assigned page table structure */ 302 unsigned int version; /* our version */ 303 304 struct iommu_device iommu; /* IOMMU core handle */ 305 const struct sysmmu_variant *variant; /* version specific data */ 306 307 /* v7 fields */ 308 bool has_vcr; /* virtual machine control register */ 309 }; 310 311 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg) 312 313 static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data, 314 unsigned int itype, 315 struct sysmmu_fault *fault) 316 { 317 const struct sysmmu_v1_fault_info *finfo; 318 319 if (itype >= ARRAY_SIZE(sysmmu_v1_faults)) 320 return -ENXIO; 321 322 finfo = &sysmmu_v1_faults[itype]; 323 fault->addr = readl(data->sfrbase + finfo->addr_reg); 324 fault->name = finfo->name; 325 fault->type = finfo->type; 326 327 return 0; 328 } 329 330 static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data, 331 unsigned int itype, 332 struct sysmmu_fault *fault) 333 { 334 unsigned int addr_reg; 335 336 if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) { 337 fault->type = IOMMU_FAULT_READ; 338 addr_reg = REG_V5_FAULT_AR_VA; 339 } else if (itype >= 16 && itype <= 20) { 340 fault->type = IOMMU_FAULT_WRITE; 341 addr_reg = REG_V5_FAULT_AW_VA; 342 itype -= 16; 343 } else { 344 return -ENXIO; 345 } 346 347 fault->name = sysmmu_v5_fault_names[itype]; 348 fault->addr = readl(data->sfrbase + addr_reg); 349 350 return 0; 351 } 352 353 static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data, 354 unsigned int itype, 355 struct sysmmu_fault *fault) 356 { 357 u32 info = readl(SYSMMU_REG(data, fault_info)); 358 359 fault->addr = readl(SYSMMU_REG(data, fault_va)); 360 fault->name = sysmmu_v7_fault_names[itype % 4]; 361 fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 362 363 return 0; 364 } 365 366 /* SysMMU v1..v3 */ 367 static const struct sysmmu_variant sysmmu_v1_variant = { 368 .flush_all = 0x0c, 369 .flush_entry = 0x10, 370 .pt_base = 0x14, 371 .int_status = 0x18, 372 .int_clear = 0x1c, 373 374 .get_fault_info = exynos_sysmmu_v1_get_fault_info, 375 }; 376 377 /* SysMMU v5 */ 378 static const struct sysmmu_variant sysmmu_v5_variant = { 379 .pt_base = 0x0c, 380 .flush_all = 0x10, 381 .flush_entry = 0x14, 382 .flush_range = 0x18, 383 .flush_start = 0x20, 384 .flush_end = 0x24, 385 .int_status = 0x60, 386 .int_clear = 0x64, 387 388 .get_fault_info = exynos_sysmmu_v5_get_fault_info, 389 }; 390 391 /* SysMMU v7: non-VM capable register layout */ 392 static const struct sysmmu_variant sysmmu_v7_variant = { 393 .pt_base = 0x0c, 394 .flush_all = 0x10, 395 .flush_entry = 0x14, 396 .flush_range = 0x18, 397 .flush_start = 0x20, 398 .flush_end = 0x24, 399 .int_status = 0x60, 400 .int_clear = 0x64, 401 .fault_va = 0x70, 402 .fault_info = 0x78, 403 404 .get_fault_info = exynos_sysmmu_v7_get_fault_info, 405 }; 406 407 /* SysMMU v7: VM capable register layout */ 408 static const struct sysmmu_variant sysmmu_v7_vm_variant = { 409 .pt_base = 0x800c, 410 .flush_all = 0x8010, 411 .flush_entry = 0x8014, 412 .flush_range = 0x8018, 413 .flush_start = 0x8020, 414 .flush_end = 0x8024, 415 .int_status = 0x60, 416 .int_clear = 0x64, 417 .fault_va = 0x1000, 418 .fault_info = 0x1004, 419 420 .get_fault_info = exynos_sysmmu_v7_get_fault_info, 421 }; 422 423 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) 424 { 425 return container_of(dom, struct exynos_iommu_domain, domain); 426 } 427 428 static void sysmmu_unblock(struct sysmmu_drvdata *data) 429 { 430 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 431 } 432 433 static bool sysmmu_block(struct sysmmu_drvdata *data) 434 { 435 int i = 120; 436 437 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 438 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) 439 --i; 440 441 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { 442 sysmmu_unblock(data); 443 return false; 444 } 445 446 return true; 447 } 448 449 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) 450 { 451 writel(0x1, SYSMMU_REG(data, flush_all)); 452 } 453 454 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 455 sysmmu_iova_t iova, unsigned int num_inv) 456 { 457 unsigned int i; 458 459 if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { 460 for (i = 0; i < num_inv; i++) { 461 writel((iova & SPAGE_MASK) | 1, 462 SYSMMU_REG(data, flush_entry)); 463 iova += SPAGE_SIZE; 464 } 465 } else { 466 writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start)); 467 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, 468 SYSMMU_REG(data, flush_end)); 469 writel(0x1, SYSMMU_REG(data, flush_range)); 470 } 471 } 472 473 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) 474 { 475 u32 pt_base; 476 477 if (MMU_MAJ_VER(data->version) < 5) 478 pt_base = pgd; 479 else 480 pt_base = pgd >> SPAGE_ORDER; 481 482 writel(pt_base, SYSMMU_REG(data, pt_base)); 483 __sysmmu_tlb_invalidate(data); 484 } 485 486 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) 487 { 488 BUG_ON(clk_prepare_enable(data->clk_master)); 489 BUG_ON(clk_prepare_enable(data->clk)); 490 BUG_ON(clk_prepare_enable(data->pclk)); 491 BUG_ON(clk_prepare_enable(data->aclk)); 492 } 493 494 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) 495 { 496 clk_disable_unprepare(data->aclk); 497 clk_disable_unprepare(data->pclk); 498 clk_disable_unprepare(data->clk); 499 clk_disable_unprepare(data->clk_master); 500 } 501 502 static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data) 503 { 504 u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0); 505 506 return capa0 & CAPA0_CAPA1_EXIST; 507 } 508 509 static void __sysmmu_get_vcr(struct sysmmu_drvdata *data) 510 { 511 u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1); 512 513 data->has_vcr = capa1 & CAPA1_VCR_ENABLED; 514 } 515 516 static void __sysmmu_get_version(struct sysmmu_drvdata *data) 517 { 518 u32 ver; 519 520 __sysmmu_enable_clocks(data); 521 522 ver = readl(data->sfrbase + REG_MMU_VERSION); 523 524 /* controllers on some SoCs don't report proper version */ 525 if (ver == 0x80000001u) 526 data->version = MAKE_MMU_VER(1, 0); 527 else 528 data->version = MMU_RAW_VER(ver); 529 530 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", 531 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); 532 533 if (MMU_MAJ_VER(data->version) < 5) { 534 data->variant = &sysmmu_v1_variant; 535 } else if (MMU_MAJ_VER(data->version) < 7) { 536 data->variant = &sysmmu_v5_variant; 537 } else { 538 if (__sysmmu_has_capa1(data)) 539 __sysmmu_get_vcr(data); 540 if (data->has_vcr) 541 data->variant = &sysmmu_v7_vm_variant; 542 else 543 data->variant = &sysmmu_v7_variant; 544 } 545 546 __sysmmu_disable_clocks(data); 547 } 548 549 static void show_fault_information(struct sysmmu_drvdata *data, 550 const struct sysmmu_fault *fault) 551 { 552 sysmmu_pte_t *ent; 553 554 dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n", 555 dev_name(data->master), 556 fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE", 557 fault->name, fault->addr); 558 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); 559 ent = section_entry(phys_to_virt(data->pgtable), fault->addr); 560 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); 561 if (lv1ent_page(ent)) { 562 ent = page_entry(ent, fault->addr); 563 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); 564 } 565 } 566 567 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) 568 { 569 struct sysmmu_drvdata *data = dev_id; 570 unsigned int itype; 571 struct sysmmu_fault fault; 572 int ret = -ENOSYS; 573 574 WARN_ON(!data->active); 575 576 spin_lock(&data->lock); 577 clk_enable(data->clk_master); 578 579 itype = __ffs(readl(SYSMMU_REG(data, int_status))); 580 ret = data->variant->get_fault_info(data, itype, &fault); 581 if (ret) { 582 dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype); 583 goto out; 584 } 585 show_fault_information(data, &fault); 586 587 if (data->domain) { 588 ret = report_iommu_fault(&data->domain->domain, data->master, 589 fault.addr, fault.type); 590 } 591 if (ret) 592 panic("Unrecoverable System MMU Fault!"); 593 594 out: 595 writel(1 << itype, SYSMMU_REG(data, int_clear)); 596 597 /* SysMMU is in blocked state when interrupt occurred */ 598 sysmmu_unblock(data); 599 clk_disable(data->clk_master); 600 spin_unlock(&data->lock); 601 602 return IRQ_HANDLED; 603 } 604 605 static void __sysmmu_disable(struct sysmmu_drvdata *data) 606 { 607 unsigned long flags; 608 609 clk_enable(data->clk_master); 610 611 spin_lock_irqsave(&data->lock, flags); 612 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); 613 writel(0, data->sfrbase + REG_MMU_CFG); 614 data->active = false; 615 spin_unlock_irqrestore(&data->lock, flags); 616 617 __sysmmu_disable_clocks(data); 618 } 619 620 static void __sysmmu_init_config(struct sysmmu_drvdata *data) 621 { 622 unsigned int cfg; 623 624 if (data->version <= MAKE_MMU_VER(3, 1)) 625 cfg = CFG_LRU | CFG_QOS(15); 626 else if (data->version <= MAKE_MMU_VER(3, 2)) 627 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; 628 else 629 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; 630 631 cfg |= CFG_EAP; /* enable access protection bits check */ 632 633 writel(cfg, data->sfrbase + REG_MMU_CFG); 634 } 635 636 static void __sysmmu_enable_vid(struct sysmmu_drvdata *data) 637 { 638 u32 ctrl; 639 640 if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) 641 return; 642 643 ctrl = readl(data->sfrbase + REG_V7_CTRL_VM); 644 ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL; 645 writel(ctrl, data->sfrbase + REG_V7_CTRL_VM); 646 } 647 648 static void __sysmmu_enable(struct sysmmu_drvdata *data) 649 { 650 unsigned long flags; 651 652 __sysmmu_enable_clocks(data); 653 654 spin_lock_irqsave(&data->lock, flags); 655 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 656 __sysmmu_init_config(data); 657 __sysmmu_set_ptbase(data, data->pgtable); 658 __sysmmu_enable_vid(data); 659 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 660 data->active = true; 661 spin_unlock_irqrestore(&data->lock, flags); 662 663 /* 664 * SYSMMU driver keeps master's clock enabled only for the short 665 * time, while accessing the registers. For performing address 666 * translation during DMA transaction it relies on the client 667 * driver to enable it. 668 */ 669 clk_disable(data->clk_master); 670 } 671 672 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, 673 sysmmu_iova_t iova) 674 { 675 unsigned long flags; 676 677 spin_lock_irqsave(&data->lock, flags); 678 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { 679 clk_enable(data->clk_master); 680 if (sysmmu_block(data)) { 681 if (data->version >= MAKE_MMU_VER(5, 0)) 682 __sysmmu_tlb_invalidate(data); 683 else 684 __sysmmu_tlb_invalidate_entry(data, iova, 1); 685 sysmmu_unblock(data); 686 } 687 clk_disable(data->clk_master); 688 } 689 spin_unlock_irqrestore(&data->lock, flags); 690 } 691 692 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 693 sysmmu_iova_t iova, size_t size) 694 { 695 unsigned long flags; 696 697 spin_lock_irqsave(&data->lock, flags); 698 if (data->active) { 699 unsigned int num_inv = 1; 700 701 clk_enable(data->clk_master); 702 703 /* 704 * L2TLB invalidation required 705 * 4KB page: 1 invalidation 706 * 64KB page: 16 invalidations 707 * 1MB page: 64 invalidations 708 * because it is set-associative TLB 709 * with 8-way and 64 sets. 710 * 1MB page can be cached in one of all sets. 711 * 64KB page can be one of 16 consecutive sets. 712 */ 713 if (MMU_MAJ_VER(data->version) == 2) 714 num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64); 715 716 if (sysmmu_block(data)) { 717 __sysmmu_tlb_invalidate_entry(data, iova, num_inv); 718 sysmmu_unblock(data); 719 } 720 clk_disable(data->clk_master); 721 } 722 spin_unlock_irqrestore(&data->lock, flags); 723 } 724 725 static const struct iommu_ops exynos_iommu_ops; 726 727 static int exynos_sysmmu_probe(struct platform_device *pdev) 728 { 729 int irq, ret; 730 struct device *dev = &pdev->dev; 731 struct sysmmu_drvdata *data; 732 struct resource *res; 733 734 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 735 if (!data) 736 return -ENOMEM; 737 738 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 739 data->sfrbase = devm_ioremap_resource(dev, res); 740 if (IS_ERR(data->sfrbase)) 741 return PTR_ERR(data->sfrbase); 742 743 irq = platform_get_irq(pdev, 0); 744 if (irq <= 0) 745 return irq; 746 747 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, 748 dev_name(dev), data); 749 if (ret) { 750 dev_err(dev, "Unable to register handler of irq %d\n", irq); 751 return ret; 752 } 753 754 data->clk = devm_clk_get_optional(dev, "sysmmu"); 755 if (IS_ERR(data->clk)) 756 return PTR_ERR(data->clk); 757 758 data->aclk = devm_clk_get_optional(dev, "aclk"); 759 if (IS_ERR(data->aclk)) 760 return PTR_ERR(data->aclk); 761 762 data->pclk = devm_clk_get_optional(dev, "pclk"); 763 if (IS_ERR(data->pclk)) 764 return PTR_ERR(data->pclk); 765 766 if (!data->clk && (!data->aclk || !data->pclk)) { 767 dev_err(dev, "Failed to get device clock(s)!\n"); 768 return -ENOSYS; 769 } 770 771 data->clk_master = devm_clk_get_optional(dev, "master"); 772 if (IS_ERR(data->clk_master)) 773 return PTR_ERR(data->clk_master); 774 775 data->sysmmu = dev; 776 spin_lock_init(&data->lock); 777 778 __sysmmu_get_version(data); 779 780 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, 781 dev_name(data->sysmmu)); 782 if (ret) 783 return ret; 784 785 platform_set_drvdata(pdev, data); 786 787 if (PG_ENT_SHIFT < 0) { 788 if (MMU_MAJ_VER(data->version) < 5) { 789 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; 790 LV1_PROT = SYSMMU_LV1_PROT; 791 LV2_PROT = SYSMMU_LV2_PROT; 792 } else { 793 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; 794 LV1_PROT = SYSMMU_V5_LV1_PROT; 795 LV2_PROT = SYSMMU_V5_LV2_PROT; 796 } 797 } 798 799 if (MMU_MAJ_VER(data->version) >= 5) { 800 ret = dma_set_mask(dev, DMA_BIT_MASK(36)); 801 if (ret) { 802 dev_err(dev, "Unable to set DMA mask: %d\n", ret); 803 goto err_dma_set_mask; 804 } 805 } 806 807 /* 808 * use the first registered sysmmu device for performing 809 * dma mapping operations on iommu page tables (cpu cache flush) 810 */ 811 if (!dma_dev) 812 dma_dev = &pdev->dev; 813 814 pm_runtime_enable(dev); 815 816 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); 817 if (ret) 818 goto err_dma_set_mask; 819 820 return 0; 821 822 err_dma_set_mask: 823 iommu_device_sysfs_remove(&data->iommu); 824 return ret; 825 } 826 827 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) 828 { 829 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 830 struct device *master = data->master; 831 832 if (master) { 833 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 834 835 mutex_lock(&owner->rpm_lock); 836 if (data->domain) { 837 dev_dbg(data->sysmmu, "saving state\n"); 838 __sysmmu_disable(data); 839 } 840 mutex_unlock(&owner->rpm_lock); 841 } 842 return 0; 843 } 844 845 static int __maybe_unused exynos_sysmmu_resume(struct device *dev) 846 { 847 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 848 struct device *master = data->master; 849 850 if (master) { 851 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 852 853 mutex_lock(&owner->rpm_lock); 854 if (data->domain) { 855 dev_dbg(data->sysmmu, "restoring state\n"); 856 __sysmmu_enable(data); 857 } 858 mutex_unlock(&owner->rpm_lock); 859 } 860 return 0; 861 } 862 863 static const struct dev_pm_ops sysmmu_pm_ops = { 864 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL) 865 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 866 pm_runtime_force_resume) 867 }; 868 869 static const struct of_device_id sysmmu_of_match[] = { 870 { .compatible = "samsung,exynos-sysmmu", }, 871 { }, 872 }; 873 874 static struct platform_driver exynos_sysmmu_driver __refdata = { 875 .probe = exynos_sysmmu_probe, 876 .driver = { 877 .name = "exynos-sysmmu", 878 .of_match_table = sysmmu_of_match, 879 .pm = &sysmmu_pm_ops, 880 .suppress_bind_attrs = true, 881 } 882 }; 883 884 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) 885 { 886 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), 887 DMA_TO_DEVICE); 888 *ent = cpu_to_le32(val); 889 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), 890 DMA_TO_DEVICE); 891 } 892 893 static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) 894 { 895 struct exynos_iommu_domain *domain; 896 dma_addr_t handle; 897 int i; 898 899 /* Check if correct PTE offsets are initialized */ 900 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); 901 902 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 903 if (!domain) 904 return NULL; 905 906 domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K); 907 if (!domain->pgtable) 908 goto err_pgtable; 909 910 domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K); 911 if (!domain->lv2entcnt) 912 goto err_counter; 913 914 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ 915 for (i = 0; i < NUM_LV1ENTRIES; i++) 916 domain->pgtable[i] = ZERO_LV2LINK; 917 918 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, 919 DMA_TO_DEVICE); 920 /* For mapping page table entries we rely on dma == phys */ 921 BUG_ON(handle != virt_to_phys(domain->pgtable)); 922 if (dma_mapping_error(dma_dev, handle)) 923 goto err_lv2ent; 924 925 spin_lock_init(&domain->lock); 926 spin_lock_init(&domain->pgtablelock); 927 INIT_LIST_HEAD(&domain->clients); 928 929 domain->domain.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE; 930 931 domain->domain.geometry.aperture_start = 0; 932 domain->domain.geometry.aperture_end = ~0UL; 933 domain->domain.geometry.force_aperture = true; 934 935 return &domain->domain; 936 937 err_lv2ent: 938 iommu_free_pages(domain->lv2entcnt); 939 err_counter: 940 iommu_free_pages(domain->pgtable); 941 err_pgtable: 942 kfree(domain); 943 return NULL; 944 } 945 946 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) 947 { 948 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 949 struct sysmmu_drvdata *data, *next; 950 unsigned long flags; 951 int i; 952 953 WARN_ON(!list_empty(&domain->clients)); 954 955 spin_lock_irqsave(&domain->lock, flags); 956 957 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 958 spin_lock(&data->lock); 959 __sysmmu_disable(data); 960 data->pgtable = 0; 961 data->domain = NULL; 962 list_del_init(&data->domain_node); 963 spin_unlock(&data->lock); 964 } 965 966 spin_unlock_irqrestore(&domain->lock, flags); 967 968 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, 969 DMA_TO_DEVICE); 970 971 for (i = 0; i < NUM_LV1ENTRIES; i++) 972 if (lv1ent_page(domain->pgtable + i)) { 973 phys_addr_t base = lv2table_base(domain->pgtable + i); 974 975 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, 976 DMA_TO_DEVICE); 977 kmem_cache_free(lv2table_kmem_cache, 978 phys_to_virt(base)); 979 } 980 981 iommu_free_pages(domain->pgtable); 982 iommu_free_pages(domain->lv2entcnt); 983 kfree(domain); 984 } 985 986 static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain, 987 struct device *dev) 988 { 989 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 990 struct exynos_iommu_domain *domain; 991 phys_addr_t pagetable; 992 struct sysmmu_drvdata *data, *next; 993 unsigned long flags; 994 995 if (owner->domain == identity_domain) 996 return 0; 997 998 domain = to_exynos_domain(owner->domain); 999 pagetable = virt_to_phys(domain->pgtable); 1000 1001 mutex_lock(&owner->rpm_lock); 1002 1003 list_for_each_entry(data, &owner->controllers, owner_node) { 1004 pm_runtime_get_noresume(data->sysmmu); 1005 if (pm_runtime_active(data->sysmmu)) 1006 __sysmmu_disable(data); 1007 pm_runtime_put(data->sysmmu); 1008 } 1009 1010 spin_lock_irqsave(&domain->lock, flags); 1011 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 1012 spin_lock(&data->lock); 1013 data->pgtable = 0; 1014 data->domain = NULL; 1015 list_del_init(&data->domain_node); 1016 spin_unlock(&data->lock); 1017 } 1018 owner->domain = identity_domain; 1019 spin_unlock_irqrestore(&domain->lock, flags); 1020 1021 mutex_unlock(&owner->rpm_lock); 1022 1023 dev_dbg(dev, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n", 1024 __func__, &pagetable); 1025 return 0; 1026 } 1027 1028 static struct iommu_domain_ops exynos_identity_ops = { 1029 .attach_dev = exynos_iommu_identity_attach, 1030 }; 1031 1032 static struct iommu_domain exynos_identity_domain = { 1033 .type = IOMMU_DOMAIN_IDENTITY, 1034 .ops = &exynos_identity_ops, 1035 }; 1036 1037 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, 1038 struct device *dev) 1039 { 1040 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1041 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1042 struct sysmmu_drvdata *data; 1043 phys_addr_t pagetable = virt_to_phys(domain->pgtable); 1044 unsigned long flags; 1045 int err; 1046 1047 err = exynos_iommu_identity_attach(&exynos_identity_domain, dev); 1048 if (err) 1049 return err; 1050 1051 mutex_lock(&owner->rpm_lock); 1052 1053 spin_lock_irqsave(&domain->lock, flags); 1054 list_for_each_entry(data, &owner->controllers, owner_node) { 1055 spin_lock(&data->lock); 1056 data->pgtable = pagetable; 1057 data->domain = domain; 1058 list_add_tail(&data->domain_node, &domain->clients); 1059 spin_unlock(&data->lock); 1060 } 1061 owner->domain = iommu_domain; 1062 spin_unlock_irqrestore(&domain->lock, flags); 1063 1064 list_for_each_entry(data, &owner->controllers, owner_node) { 1065 pm_runtime_get_noresume(data->sysmmu); 1066 if (pm_runtime_active(data->sysmmu)) 1067 __sysmmu_enable(data); 1068 pm_runtime_put(data->sysmmu); 1069 } 1070 1071 mutex_unlock(&owner->rpm_lock); 1072 1073 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__, 1074 &pagetable); 1075 1076 return 0; 1077 } 1078 1079 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, 1080 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) 1081 { 1082 if (lv1ent_section(sent)) { 1083 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); 1084 return ERR_PTR(-EADDRINUSE); 1085 } 1086 1087 if (lv1ent_fault(sent)) { 1088 dma_addr_t handle; 1089 sysmmu_pte_t *pent; 1090 bool need_flush_flpd_cache = lv1ent_zero(sent); 1091 1092 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); 1093 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); 1094 if (!pent) 1095 return ERR_PTR(-ENOMEM); 1096 1097 exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); 1098 kmemleak_ignore(pent); 1099 *pgcounter = NUM_LV2ENTRIES; 1100 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, 1101 DMA_TO_DEVICE); 1102 if (dma_mapping_error(dma_dev, handle)) { 1103 kmem_cache_free(lv2table_kmem_cache, pent); 1104 return ERR_PTR(-EADDRINUSE); 1105 } 1106 1107 /* 1108 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 1109 * FLPD cache may cache the address of zero_l2_table. This 1110 * function replaces the zero_l2_table with new L2 page table 1111 * to write valid mappings. 1112 * Accessing the valid area may cause page fault since FLPD 1113 * cache may still cache zero_l2_table for the valid area 1114 * instead of new L2 page table that has the mapping 1115 * information of the valid area. 1116 * Thus any replacement of zero_l2_table with other valid L2 1117 * page table must involve FLPD cache invalidation for System 1118 * MMU v3.3. 1119 * FLPD cache invalidation is performed with TLB invalidation 1120 * by VPN without blocking. It is safe to invalidate TLB without 1121 * blocking because the target address of TLB invalidation is 1122 * not currently mapped. 1123 */ 1124 if (need_flush_flpd_cache) { 1125 struct sysmmu_drvdata *data; 1126 1127 spin_lock(&domain->lock); 1128 list_for_each_entry(data, &domain->clients, domain_node) 1129 sysmmu_tlb_invalidate_flpdcache(data, iova); 1130 spin_unlock(&domain->lock); 1131 } 1132 } 1133 1134 return page_entry(sent, iova); 1135 } 1136 1137 static int lv1set_section(struct exynos_iommu_domain *domain, 1138 sysmmu_pte_t *sent, sysmmu_iova_t iova, 1139 phys_addr_t paddr, int prot, short *pgcnt) 1140 { 1141 if (lv1ent_section(sent)) { 1142 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 1143 iova); 1144 return -EADDRINUSE; 1145 } 1146 1147 if (lv1ent_page(sent)) { 1148 if (*pgcnt != NUM_LV2ENTRIES) { 1149 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 1150 iova); 1151 return -EADDRINUSE; 1152 } 1153 1154 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); 1155 *pgcnt = 0; 1156 } 1157 1158 exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot)); 1159 1160 spin_lock(&domain->lock); 1161 if (lv1ent_page_zero(sent)) { 1162 struct sysmmu_drvdata *data; 1163 /* 1164 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD 1165 * entry by speculative prefetch of SLPD which has no mapping. 1166 */ 1167 list_for_each_entry(data, &domain->clients, domain_node) 1168 sysmmu_tlb_invalidate_flpdcache(data, iova); 1169 } 1170 spin_unlock(&domain->lock); 1171 1172 return 0; 1173 } 1174 1175 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, 1176 int prot, short *pgcnt) 1177 { 1178 if (size == SPAGE_SIZE) { 1179 if (WARN_ON(!lv2ent_fault(pent))) 1180 return -EADDRINUSE; 1181 1182 exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot)); 1183 *pgcnt -= 1; 1184 } else { /* size == LPAGE_SIZE */ 1185 int i; 1186 dma_addr_t pent_base = virt_to_phys(pent); 1187 1188 dma_sync_single_for_cpu(dma_dev, pent_base, 1189 sizeof(*pent) * SPAGES_PER_LPAGE, 1190 DMA_TO_DEVICE); 1191 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { 1192 if (WARN_ON(!lv2ent_fault(pent))) { 1193 if (i > 0) 1194 memset(pent - i, 0, sizeof(*pent) * i); 1195 return -EADDRINUSE; 1196 } 1197 1198 *pent = mk_lv2ent_lpage(paddr, prot); 1199 } 1200 dma_sync_single_for_device(dma_dev, pent_base, 1201 sizeof(*pent) * SPAGES_PER_LPAGE, 1202 DMA_TO_DEVICE); 1203 *pgcnt -= SPAGES_PER_LPAGE; 1204 } 1205 1206 return 0; 1207 } 1208 1209 /* 1210 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: 1211 * 1212 * System MMU v3.x has advanced logic to improve address translation 1213 * performance with caching more page table entries by a page table walk. 1214 * However, the logic has a bug that while caching faulty page table entries, 1215 * System MMU reports page fault if the cached fault entry is hit even though 1216 * the fault entry is updated to a valid entry after the entry is cached. 1217 * To prevent caching faulty page table entries which may be updated to valid 1218 * entries later, the virtual memory manager should care about the workaround 1219 * for the problem. The following describes the workaround. 1220 * 1221 * Any two consecutive I/O virtual address regions must have a hole of 128KiB 1222 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). 1223 * 1224 * Precisely, any start address of I/O virtual region must be aligned with 1225 * the following sizes for System MMU v3.1 and v3.2. 1226 * System MMU v3.1: 128KiB 1227 * System MMU v3.2: 256KiB 1228 * 1229 * Because System MMU v3.3 caches page table entries more aggressively, it needs 1230 * more workarounds. 1231 * - Any two consecutive I/O virtual regions must have a hole of size larger 1232 * than or equal to 128KiB. 1233 * - Start address of an I/O virtual region must be aligned by 128KiB. 1234 */ 1235 static int exynos_iommu_map(struct iommu_domain *iommu_domain, 1236 unsigned long l_iova, phys_addr_t paddr, size_t size, 1237 size_t count, int prot, gfp_t gfp, size_t *mapped) 1238 { 1239 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1240 sysmmu_pte_t *entry; 1241 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1242 unsigned long flags; 1243 int ret = -ENOMEM; 1244 1245 BUG_ON(domain->pgtable == NULL); 1246 prot &= SYSMMU_SUPPORTED_PROT_BITS; 1247 1248 spin_lock_irqsave(&domain->pgtablelock, flags); 1249 1250 entry = section_entry(domain->pgtable, iova); 1251 1252 if (size == SECT_SIZE) { 1253 ret = lv1set_section(domain, entry, iova, paddr, prot, 1254 &domain->lv2entcnt[lv1ent_offset(iova)]); 1255 } else { 1256 sysmmu_pte_t *pent; 1257 1258 pent = alloc_lv2entry(domain, entry, iova, 1259 &domain->lv2entcnt[lv1ent_offset(iova)]); 1260 1261 if (IS_ERR(pent)) 1262 ret = PTR_ERR(pent); 1263 else 1264 ret = lv2set_page(pent, paddr, size, prot, 1265 &domain->lv2entcnt[lv1ent_offset(iova)]); 1266 } 1267 1268 if (ret) 1269 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", 1270 __func__, ret, size, iova); 1271 else 1272 *mapped = size; 1273 1274 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1275 1276 return ret; 1277 } 1278 1279 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, 1280 sysmmu_iova_t iova, size_t size) 1281 { 1282 struct sysmmu_drvdata *data; 1283 unsigned long flags; 1284 1285 spin_lock_irqsave(&domain->lock, flags); 1286 1287 list_for_each_entry(data, &domain->clients, domain_node) 1288 sysmmu_tlb_invalidate_entry(data, iova, size); 1289 1290 spin_unlock_irqrestore(&domain->lock, flags); 1291 } 1292 1293 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, 1294 unsigned long l_iova, size_t size, size_t count, 1295 struct iommu_iotlb_gather *gather) 1296 { 1297 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1298 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1299 sysmmu_pte_t *ent; 1300 size_t err_pgsize; 1301 unsigned long flags; 1302 1303 BUG_ON(domain->pgtable == NULL); 1304 1305 spin_lock_irqsave(&domain->pgtablelock, flags); 1306 1307 ent = section_entry(domain->pgtable, iova); 1308 1309 if (lv1ent_section(ent)) { 1310 if (WARN_ON(size < SECT_SIZE)) { 1311 err_pgsize = SECT_SIZE; 1312 goto err; 1313 } 1314 1315 /* workaround for h/w bug in System MMU v3.3 */ 1316 exynos_iommu_set_pte(ent, ZERO_LV2LINK); 1317 size = SECT_SIZE; 1318 goto done; 1319 } 1320 1321 if (unlikely(lv1ent_fault(ent))) { 1322 if (size > SECT_SIZE) 1323 size = SECT_SIZE; 1324 goto done; 1325 } 1326 1327 /* lv1ent_page(sent) == true here */ 1328 1329 ent = page_entry(ent, iova); 1330 1331 if (unlikely(lv2ent_fault(ent))) { 1332 size = SPAGE_SIZE; 1333 goto done; 1334 } 1335 1336 if (lv2ent_small(ent)) { 1337 exynos_iommu_set_pte(ent, 0); 1338 size = SPAGE_SIZE; 1339 domain->lv2entcnt[lv1ent_offset(iova)] += 1; 1340 goto done; 1341 } 1342 1343 /* lv1ent_large(ent) == true here */ 1344 if (WARN_ON(size < LPAGE_SIZE)) { 1345 err_pgsize = LPAGE_SIZE; 1346 goto err; 1347 } 1348 1349 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), 1350 sizeof(*ent) * SPAGES_PER_LPAGE, 1351 DMA_TO_DEVICE); 1352 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); 1353 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), 1354 sizeof(*ent) * SPAGES_PER_LPAGE, 1355 DMA_TO_DEVICE); 1356 size = LPAGE_SIZE; 1357 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; 1358 done: 1359 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1360 1361 exynos_iommu_tlb_invalidate_entry(domain, iova, size); 1362 1363 return size; 1364 err: 1365 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1366 1367 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", 1368 __func__, size, iova, err_pgsize); 1369 1370 return 0; 1371 } 1372 1373 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, 1374 dma_addr_t iova) 1375 { 1376 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1377 sysmmu_pte_t *entry; 1378 unsigned long flags; 1379 phys_addr_t phys = 0; 1380 1381 spin_lock_irqsave(&domain->pgtablelock, flags); 1382 1383 entry = section_entry(domain->pgtable, iova); 1384 1385 if (lv1ent_section(entry)) { 1386 phys = section_phys(entry) + section_offs(iova); 1387 } else if (lv1ent_page(entry)) { 1388 entry = page_entry(entry, iova); 1389 1390 if (lv2ent_large(entry)) 1391 phys = lpage_phys(entry) + lpage_offs(iova); 1392 else if (lv2ent_small(entry)) 1393 phys = spage_phys(entry) + spage_offs(iova); 1394 } 1395 1396 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1397 1398 return phys; 1399 } 1400 1401 static struct iommu_device *exynos_iommu_probe_device(struct device *dev) 1402 { 1403 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1404 struct sysmmu_drvdata *data; 1405 1406 if (!has_sysmmu(dev)) 1407 return ERR_PTR(-ENODEV); 1408 1409 list_for_each_entry(data, &owner->controllers, owner_node) { 1410 /* 1411 * SYSMMU will be runtime activated via device link 1412 * (dependency) to its master device, so there are no 1413 * direct calls to pm_runtime_get/put in this driver. 1414 */ 1415 data->link = device_link_add(dev, data->sysmmu, 1416 DL_FLAG_STATELESS | 1417 DL_FLAG_PM_RUNTIME); 1418 } 1419 1420 /* There is always at least one entry, see exynos_iommu_of_xlate() */ 1421 data = list_first_entry(&owner->controllers, 1422 struct sysmmu_drvdata, owner_node); 1423 1424 return &data->iommu; 1425 } 1426 1427 static void exynos_iommu_release_device(struct device *dev) 1428 { 1429 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1430 struct sysmmu_drvdata *data; 1431 1432 WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev)); 1433 1434 list_for_each_entry(data, &owner->controllers, owner_node) 1435 device_link_del(data->link); 1436 } 1437 1438 static int exynos_iommu_of_xlate(struct device *dev, 1439 const struct of_phandle_args *spec) 1440 { 1441 struct platform_device *sysmmu = of_find_device_by_node(spec->np); 1442 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1443 struct sysmmu_drvdata *data, *entry; 1444 1445 if (!sysmmu) 1446 return -ENODEV; 1447 1448 data = platform_get_drvdata(sysmmu); 1449 if (!data) { 1450 put_device(&sysmmu->dev); 1451 return -ENODEV; 1452 } 1453 1454 if (!owner) { 1455 owner = kzalloc(sizeof(*owner), GFP_KERNEL); 1456 if (!owner) { 1457 put_device(&sysmmu->dev); 1458 return -ENOMEM; 1459 } 1460 1461 INIT_LIST_HEAD(&owner->controllers); 1462 mutex_init(&owner->rpm_lock); 1463 owner->domain = &exynos_identity_domain; 1464 dev_iommu_priv_set(dev, owner); 1465 } 1466 1467 list_for_each_entry(entry, &owner->controllers, owner_node) 1468 if (entry == data) 1469 return 0; 1470 1471 list_add_tail(&data->owner_node, &owner->controllers); 1472 data->master = dev; 1473 1474 return 0; 1475 } 1476 1477 static const struct iommu_ops exynos_iommu_ops = { 1478 .identity_domain = &exynos_identity_domain, 1479 .domain_alloc_paging = exynos_iommu_domain_alloc_paging, 1480 .device_group = generic_device_group, 1481 .probe_device = exynos_iommu_probe_device, 1482 .release_device = exynos_iommu_release_device, 1483 .get_resv_regions = iommu_dma_get_resv_regions, 1484 .of_xlate = exynos_iommu_of_xlate, 1485 .default_domain_ops = &(const struct iommu_domain_ops) { 1486 .attach_dev = exynos_iommu_attach_device, 1487 .map_pages = exynos_iommu_map, 1488 .unmap_pages = exynos_iommu_unmap, 1489 .iova_to_phys = exynos_iommu_iova_to_phys, 1490 .free = exynos_iommu_domain_free, 1491 } 1492 }; 1493 1494 static int __init exynos_iommu_init(void) 1495 { 1496 struct device_node *np; 1497 int ret; 1498 1499 np = of_find_matching_node(NULL, sysmmu_of_match); 1500 if (!np) 1501 return 0; 1502 1503 of_node_put(np); 1504 1505 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1506 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1507 if (!lv2table_kmem_cache) { 1508 pr_err("%s: Failed to create kmem cache\n", __func__); 1509 return -ENOMEM; 1510 } 1511 1512 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); 1513 if (zero_lv2_table == NULL) { 1514 pr_err("%s: Failed to allocate zero level2 page table\n", 1515 __func__); 1516 ret = -ENOMEM; 1517 goto err_zero_lv2; 1518 } 1519 1520 ret = platform_driver_register(&exynos_sysmmu_driver); 1521 if (ret) { 1522 pr_err("%s: Failed to register driver\n", __func__); 1523 goto err_reg_driver; 1524 } 1525 1526 return 0; 1527 err_reg_driver: 1528 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); 1529 err_zero_lv2: 1530 kmem_cache_destroy(lv2table_kmem_cache); 1531 return ret; 1532 } 1533 core_initcall(exynos_iommu_init); 1534