1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 */ 6 7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG 8 #define DEBUG 9 #endif 10 11 #include <linux/clk.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/io.h> 15 #include <linux/iommu.h> 16 #include <linux/interrupt.h> 17 #include <linux/kmemleak.h> 18 #include <linux/list.h> 19 #include <linux/of.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/slab.h> 24 25 typedef u32 sysmmu_iova_t; 26 typedef u32 sysmmu_pte_t; 27 28 /* We do not consider super section mapping (16MB) */ 29 #define SECT_ORDER 20 30 #define LPAGE_ORDER 16 31 #define SPAGE_ORDER 12 32 33 #define SECT_SIZE (1 << SECT_ORDER) 34 #define LPAGE_SIZE (1 << LPAGE_ORDER) 35 #define SPAGE_SIZE (1 << SPAGE_ORDER) 36 37 #define SECT_MASK (~(SECT_SIZE - 1)) 38 #define LPAGE_MASK (~(LPAGE_SIZE - 1)) 39 #define SPAGE_MASK (~(SPAGE_SIZE - 1)) 40 41 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ 42 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) 43 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) 44 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) 45 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ 46 ((*(sent) & 3) == 1)) 47 #define lv1ent_section(sent) ((*(sent) & 3) == 2) 48 49 #define lv2ent_fault(pent) ((*(pent) & 3) == 0) 50 #define lv2ent_small(pent) ((*(pent) & 2) == 2) 51 #define lv2ent_large(pent) ((*(pent) & 3) == 1) 52 53 /* 54 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces 55 * v5.0 introduced support for 36bit physical address space by shifting 56 * all page entry values by 4 bits. 57 * All SYSMMU controllers in the system support the address spaces of the same 58 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper 59 * value (0 or 4). 60 */ 61 static short PG_ENT_SHIFT = -1; 62 #define SYSMMU_PG_ENT_SHIFT 0 63 #define SYSMMU_V5_PG_ENT_SHIFT 4 64 65 static const sysmmu_pte_t *LV1_PROT; 66 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = { 67 ((0 << 15) | (0 << 10)), /* no access */ 68 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */ 69 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */ 70 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */ 71 }; 72 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = { 73 (0 << 4), /* no access */ 74 (1 << 4), /* IOMMU_READ only */ 75 (2 << 4), /* IOMMU_WRITE only */ 76 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */ 77 }; 78 79 static const sysmmu_pte_t *LV2_PROT; 80 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = { 81 ((0 << 9) | (0 << 4)), /* no access */ 82 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */ 83 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */ 84 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */ 85 }; 86 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = { 87 (0 << 2), /* no access */ 88 (1 << 2), /* IOMMU_READ only */ 89 (2 << 2), /* IOMMU_WRITE only */ 90 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */ 91 }; 92 93 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE) 94 95 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) 96 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) 97 #define section_offs(iova) (iova & (SECT_SIZE - 1)) 98 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) 99 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) 100 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) 101 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) 102 103 #define NUM_LV1ENTRIES 4096 104 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) 105 106 static u32 lv1ent_offset(sysmmu_iova_t iova) 107 { 108 return iova >> SECT_ORDER; 109 } 110 111 static u32 lv2ent_offset(sysmmu_iova_t iova) 112 { 113 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); 114 } 115 116 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) 117 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) 118 119 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) 120 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) 121 122 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2) 123 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) 124 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1) 125 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2) 126 127 #define CTRL_ENABLE 0x5 128 #define CTRL_BLOCK 0x7 129 #define CTRL_DISABLE 0x0 130 131 #define CFG_LRU 0x1 132 #define CFG_EAP (1 << 2) 133 #define CFG_QOS(n) ((n & 0xF) << 7) 134 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ 135 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ 136 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ 137 138 #define CTRL_VM_ENABLE BIT(0) 139 #define CTRL_VM_FAULT_MODE_STALL BIT(3) 140 #define CAPA0_CAPA1_EXIST BIT(11) 141 #define CAPA1_VCR_ENABLED BIT(14) 142 143 /* common registers */ 144 #define REG_MMU_CTRL 0x000 145 #define REG_MMU_CFG 0x004 146 #define REG_MMU_STATUS 0x008 147 #define REG_MMU_VERSION 0x034 148 149 #define MMU_MAJ_VER(val) ((val) >> 7) 150 #define MMU_MIN_VER(val) ((val) & 0x7F) 151 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ 152 153 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) 154 155 /* v1.x - v3.x registers */ 156 #define REG_PAGE_FAULT_ADDR 0x024 157 #define REG_AW_FAULT_ADDR 0x028 158 #define REG_AR_FAULT_ADDR 0x02C 159 #define REG_DEFAULT_SLAVE_ADDR 0x030 160 161 /* v5.x registers */ 162 #define REG_V5_FAULT_AR_VA 0x070 163 #define REG_V5_FAULT_AW_VA 0x080 164 165 /* v7.x registers */ 166 #define REG_V7_CAPA0 0x870 167 #define REG_V7_CAPA1 0x874 168 #define REG_V7_CTRL_VM 0x8000 169 170 #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL) 171 172 static struct device *dma_dev; 173 static struct kmem_cache *lv2table_kmem_cache; 174 static sysmmu_pte_t *zero_lv2_table; 175 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) 176 177 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) 178 { 179 return pgtable + lv1ent_offset(iova); 180 } 181 182 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) 183 { 184 return (sysmmu_pte_t *)phys_to_virt( 185 lv2table_base(sent)) + lv2ent_offset(iova); 186 } 187 188 struct sysmmu_fault { 189 sysmmu_iova_t addr; /* IOVA address that caused fault */ 190 const char *name; /* human readable fault name */ 191 unsigned int type; /* fault type for report_iommu_fault() */ 192 }; 193 194 struct sysmmu_v1_fault_info { 195 unsigned short addr_reg; /* register to read IOVA fault address */ 196 const char *name; /* human readable fault name */ 197 unsigned int type; /* fault type for report_iommu_fault */ 198 }; 199 200 static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = { 201 { REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ }, 202 { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ }, 203 { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE }, 204 { REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ }, 205 { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ }, 206 { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ }, 207 { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE }, 208 { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE }, 209 }; 210 211 /* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */ 212 static const char * const sysmmu_v5_fault_names[] = { 213 "PTW", 214 "PAGE", 215 "MULTI-HIT", 216 "ACCESS PROTECTION", 217 "SECURITY PROTECTION" 218 }; 219 220 static const char * const sysmmu_v7_fault_names[] = { 221 "PTW", 222 "PAGE", 223 "ACCESS PROTECTION", 224 "RESERVED" 225 }; 226 227 /* 228 * This structure is attached to dev->iommu->priv of the master device 229 * on device add, contains a list of SYSMMU controllers defined by device tree, 230 * which are bound to given master device. It is usually referenced by 'owner' 231 * pointer. 232 */ 233 struct exynos_iommu_owner { 234 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ 235 struct iommu_domain *domain; /* domain this device is attached */ 236 struct mutex rpm_lock; /* for runtime pm of all sysmmus */ 237 }; 238 239 /* 240 * This structure exynos specific generalization of struct iommu_domain. 241 * It contains list of SYSMMU controllers from all master devices, which has 242 * been attached to this domain and page tables of IO address space defined by 243 * it. It is usually referenced by 'domain' pointer. 244 */ 245 struct exynos_iommu_domain { 246 struct list_head clients; /* list of sysmmu_drvdata.domain_node */ 247 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ 248 short *lv2entcnt; /* free lv2 entry counter for each section */ 249 spinlock_t lock; /* lock for modyfying list of clients */ 250 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ 251 struct iommu_domain domain; /* generic domain data structure */ 252 }; 253 254 struct sysmmu_drvdata; 255 256 /* 257 * SysMMU version specific data. Contains offsets for the registers which can 258 * be found in different SysMMU variants, but have different offset values. 259 * Also contains version specific callbacks to abstract the hardware. 260 */ 261 struct sysmmu_variant { 262 u32 pt_base; /* page table base address (physical) */ 263 u32 flush_all; /* invalidate all TLB entries */ 264 u32 flush_entry; /* invalidate specific TLB entry */ 265 u32 flush_range; /* invalidate TLB entries in specified range */ 266 u32 flush_start; /* start address of range invalidation */ 267 u32 flush_end; /* end address of range invalidation */ 268 u32 int_status; /* interrupt status information */ 269 u32 int_clear; /* clear the interrupt */ 270 u32 fault_va; /* IOVA address that caused fault */ 271 u32 fault_info; /* fault transaction info */ 272 273 int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype, 274 struct sysmmu_fault *fault); 275 }; 276 277 /* 278 * This structure hold all data of a single SYSMMU controller, this includes 279 * hw resources like registers and clocks, pointers and list nodes to connect 280 * it to all other structures, internal state and parameters read from device 281 * tree. It is usually referenced by 'data' pointer. 282 */ 283 struct sysmmu_drvdata { 284 struct device *sysmmu; /* SYSMMU controller device */ 285 struct device *master; /* master device (owner) */ 286 struct device_link *link; /* runtime PM link to master */ 287 void __iomem *sfrbase; /* our registers */ 288 struct clk *clk; /* SYSMMU's clock */ 289 struct clk *aclk; /* SYSMMU's aclk clock */ 290 struct clk *pclk; /* SYSMMU's pclk clock */ 291 struct clk *clk_master; /* master's device clock */ 292 spinlock_t lock; /* lock for modyfying state */ 293 bool active; /* current status */ 294 struct exynos_iommu_domain *domain; /* domain we belong to */ 295 struct list_head domain_node; /* node for domain clients list */ 296 struct list_head owner_node; /* node for owner controllers list */ 297 phys_addr_t pgtable; /* assigned page table structure */ 298 unsigned int version; /* our version */ 299 300 struct iommu_device iommu; /* IOMMU core handle */ 301 const struct sysmmu_variant *variant; /* version specific data */ 302 303 /* v7 fields */ 304 bool has_vcr; /* virtual machine control register */ 305 }; 306 307 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg) 308 309 static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data, 310 unsigned int itype, 311 struct sysmmu_fault *fault) 312 { 313 const struct sysmmu_v1_fault_info *finfo; 314 315 if (itype >= ARRAY_SIZE(sysmmu_v1_faults)) 316 return -ENXIO; 317 318 finfo = &sysmmu_v1_faults[itype]; 319 fault->addr = readl(data->sfrbase + finfo->addr_reg); 320 fault->name = finfo->name; 321 fault->type = finfo->type; 322 323 return 0; 324 } 325 326 static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data, 327 unsigned int itype, 328 struct sysmmu_fault *fault) 329 { 330 unsigned int addr_reg; 331 332 if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) { 333 fault->type = IOMMU_FAULT_READ; 334 addr_reg = REG_V5_FAULT_AR_VA; 335 } else if (itype >= 16 && itype <= 20) { 336 fault->type = IOMMU_FAULT_WRITE; 337 addr_reg = REG_V5_FAULT_AW_VA; 338 itype -= 16; 339 } else { 340 return -ENXIO; 341 } 342 343 fault->name = sysmmu_v5_fault_names[itype]; 344 fault->addr = readl(data->sfrbase + addr_reg); 345 346 return 0; 347 } 348 349 static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data, 350 unsigned int itype, 351 struct sysmmu_fault *fault) 352 { 353 u32 info = readl(SYSMMU_REG(data, fault_info)); 354 355 fault->addr = readl(SYSMMU_REG(data, fault_va)); 356 fault->name = sysmmu_v7_fault_names[itype % 4]; 357 fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 358 359 return 0; 360 } 361 362 /* SysMMU v1..v3 */ 363 static const struct sysmmu_variant sysmmu_v1_variant = { 364 .flush_all = 0x0c, 365 .flush_entry = 0x10, 366 .pt_base = 0x14, 367 .int_status = 0x18, 368 .int_clear = 0x1c, 369 370 .get_fault_info = exynos_sysmmu_v1_get_fault_info, 371 }; 372 373 /* SysMMU v5 */ 374 static const struct sysmmu_variant sysmmu_v5_variant = { 375 .pt_base = 0x0c, 376 .flush_all = 0x10, 377 .flush_entry = 0x14, 378 .flush_range = 0x18, 379 .flush_start = 0x20, 380 .flush_end = 0x24, 381 .int_status = 0x60, 382 .int_clear = 0x64, 383 384 .get_fault_info = exynos_sysmmu_v5_get_fault_info, 385 }; 386 387 /* SysMMU v7: non-VM capable register layout */ 388 static const struct sysmmu_variant sysmmu_v7_variant = { 389 .pt_base = 0x0c, 390 .flush_all = 0x10, 391 .flush_entry = 0x14, 392 .flush_range = 0x18, 393 .flush_start = 0x20, 394 .flush_end = 0x24, 395 .int_status = 0x60, 396 .int_clear = 0x64, 397 .fault_va = 0x70, 398 .fault_info = 0x78, 399 400 .get_fault_info = exynos_sysmmu_v7_get_fault_info, 401 }; 402 403 /* SysMMU v7: VM capable register layout */ 404 static const struct sysmmu_variant sysmmu_v7_vm_variant = { 405 .pt_base = 0x800c, 406 .flush_all = 0x8010, 407 .flush_entry = 0x8014, 408 .flush_range = 0x8018, 409 .flush_start = 0x8020, 410 .flush_end = 0x8024, 411 .int_status = 0x60, 412 .int_clear = 0x64, 413 .fault_va = 0x1000, 414 .fault_info = 0x1004, 415 416 .get_fault_info = exynos_sysmmu_v7_get_fault_info, 417 }; 418 419 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) 420 { 421 return container_of(dom, struct exynos_iommu_domain, domain); 422 } 423 424 static void sysmmu_unblock(struct sysmmu_drvdata *data) 425 { 426 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 427 } 428 429 static bool sysmmu_block(struct sysmmu_drvdata *data) 430 { 431 int i = 120; 432 433 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 434 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) 435 --i; 436 437 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { 438 sysmmu_unblock(data); 439 return false; 440 } 441 442 return true; 443 } 444 445 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) 446 { 447 writel(0x1, SYSMMU_REG(data, flush_all)); 448 } 449 450 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 451 sysmmu_iova_t iova, unsigned int num_inv) 452 { 453 unsigned int i; 454 455 if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { 456 for (i = 0; i < num_inv; i++) { 457 writel((iova & SPAGE_MASK) | 1, 458 SYSMMU_REG(data, flush_entry)); 459 iova += SPAGE_SIZE; 460 } 461 } else { 462 writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start)); 463 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, 464 SYSMMU_REG(data, flush_end)); 465 writel(0x1, SYSMMU_REG(data, flush_range)); 466 } 467 } 468 469 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) 470 { 471 u32 pt_base; 472 473 if (MMU_MAJ_VER(data->version) < 5) 474 pt_base = pgd; 475 else 476 pt_base = pgd >> SPAGE_ORDER; 477 478 writel(pt_base, SYSMMU_REG(data, pt_base)); 479 __sysmmu_tlb_invalidate(data); 480 } 481 482 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) 483 { 484 BUG_ON(clk_prepare_enable(data->clk_master)); 485 BUG_ON(clk_prepare_enable(data->clk)); 486 BUG_ON(clk_prepare_enable(data->pclk)); 487 BUG_ON(clk_prepare_enable(data->aclk)); 488 } 489 490 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) 491 { 492 clk_disable_unprepare(data->aclk); 493 clk_disable_unprepare(data->pclk); 494 clk_disable_unprepare(data->clk); 495 clk_disable_unprepare(data->clk_master); 496 } 497 498 static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data) 499 { 500 u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0); 501 502 return capa0 & CAPA0_CAPA1_EXIST; 503 } 504 505 static void __sysmmu_get_vcr(struct sysmmu_drvdata *data) 506 { 507 u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1); 508 509 data->has_vcr = capa1 & CAPA1_VCR_ENABLED; 510 } 511 512 static void __sysmmu_get_version(struct sysmmu_drvdata *data) 513 { 514 u32 ver; 515 516 __sysmmu_enable_clocks(data); 517 518 ver = readl(data->sfrbase + REG_MMU_VERSION); 519 520 /* controllers on some SoCs don't report proper version */ 521 if (ver == 0x80000001u) 522 data->version = MAKE_MMU_VER(1, 0); 523 else 524 data->version = MMU_RAW_VER(ver); 525 526 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", 527 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); 528 529 if (MMU_MAJ_VER(data->version) < 5) { 530 data->variant = &sysmmu_v1_variant; 531 } else if (MMU_MAJ_VER(data->version) < 7) { 532 data->variant = &sysmmu_v5_variant; 533 } else { 534 if (__sysmmu_has_capa1(data)) 535 __sysmmu_get_vcr(data); 536 if (data->has_vcr) 537 data->variant = &sysmmu_v7_vm_variant; 538 else 539 data->variant = &sysmmu_v7_variant; 540 } 541 542 __sysmmu_disable_clocks(data); 543 } 544 545 static void show_fault_information(struct sysmmu_drvdata *data, 546 const struct sysmmu_fault *fault) 547 { 548 sysmmu_pte_t *ent; 549 550 dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n", 551 dev_name(data->master), 552 fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE", 553 fault->name, fault->addr); 554 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); 555 ent = section_entry(phys_to_virt(data->pgtable), fault->addr); 556 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); 557 if (lv1ent_page(ent)) { 558 ent = page_entry(ent, fault->addr); 559 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); 560 } 561 } 562 563 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) 564 { 565 struct sysmmu_drvdata *data = dev_id; 566 unsigned int itype; 567 struct sysmmu_fault fault; 568 int ret = -ENOSYS; 569 570 WARN_ON(!data->active); 571 572 spin_lock(&data->lock); 573 clk_enable(data->clk_master); 574 575 itype = __ffs(readl(SYSMMU_REG(data, int_status))); 576 ret = data->variant->get_fault_info(data, itype, &fault); 577 if (ret) { 578 dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype); 579 goto out; 580 } 581 show_fault_information(data, &fault); 582 583 if (data->domain) { 584 ret = report_iommu_fault(&data->domain->domain, data->master, 585 fault.addr, fault.type); 586 } 587 if (ret) 588 panic("Unrecoverable System MMU Fault!"); 589 590 out: 591 writel(1 << itype, SYSMMU_REG(data, int_clear)); 592 593 /* SysMMU is in blocked state when interrupt occurred */ 594 sysmmu_unblock(data); 595 clk_disable(data->clk_master); 596 spin_unlock(&data->lock); 597 598 return IRQ_HANDLED; 599 } 600 601 static void __sysmmu_disable(struct sysmmu_drvdata *data) 602 { 603 unsigned long flags; 604 605 clk_enable(data->clk_master); 606 607 spin_lock_irqsave(&data->lock, flags); 608 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); 609 writel(0, data->sfrbase + REG_MMU_CFG); 610 data->active = false; 611 spin_unlock_irqrestore(&data->lock, flags); 612 613 __sysmmu_disable_clocks(data); 614 } 615 616 static void __sysmmu_init_config(struct sysmmu_drvdata *data) 617 { 618 unsigned int cfg; 619 620 if (data->version <= MAKE_MMU_VER(3, 1)) 621 cfg = CFG_LRU | CFG_QOS(15); 622 else if (data->version <= MAKE_MMU_VER(3, 2)) 623 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; 624 else 625 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; 626 627 cfg |= CFG_EAP; /* enable access protection bits check */ 628 629 writel(cfg, data->sfrbase + REG_MMU_CFG); 630 } 631 632 static void __sysmmu_enable_vid(struct sysmmu_drvdata *data) 633 { 634 u32 ctrl; 635 636 if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) 637 return; 638 639 ctrl = readl(data->sfrbase + REG_V7_CTRL_VM); 640 ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL; 641 writel(ctrl, data->sfrbase + REG_V7_CTRL_VM); 642 } 643 644 static void __sysmmu_enable(struct sysmmu_drvdata *data) 645 { 646 unsigned long flags; 647 648 __sysmmu_enable_clocks(data); 649 650 spin_lock_irqsave(&data->lock, flags); 651 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 652 __sysmmu_init_config(data); 653 __sysmmu_set_ptbase(data, data->pgtable); 654 __sysmmu_enable_vid(data); 655 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 656 data->active = true; 657 spin_unlock_irqrestore(&data->lock, flags); 658 659 /* 660 * SYSMMU driver keeps master's clock enabled only for the short 661 * time, while accessing the registers. For performing address 662 * translation during DMA transaction it relies on the client 663 * driver to enable it. 664 */ 665 clk_disable(data->clk_master); 666 } 667 668 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, 669 sysmmu_iova_t iova) 670 { 671 unsigned long flags; 672 673 spin_lock_irqsave(&data->lock, flags); 674 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { 675 clk_enable(data->clk_master); 676 if (sysmmu_block(data)) { 677 if (data->version >= MAKE_MMU_VER(5, 0)) 678 __sysmmu_tlb_invalidate(data); 679 else 680 __sysmmu_tlb_invalidate_entry(data, iova, 1); 681 sysmmu_unblock(data); 682 } 683 clk_disable(data->clk_master); 684 } 685 spin_unlock_irqrestore(&data->lock, flags); 686 } 687 688 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 689 sysmmu_iova_t iova, size_t size) 690 { 691 unsigned long flags; 692 693 spin_lock_irqsave(&data->lock, flags); 694 if (data->active) { 695 unsigned int num_inv = 1; 696 697 clk_enable(data->clk_master); 698 699 /* 700 * L2TLB invalidation required 701 * 4KB page: 1 invalidation 702 * 64KB page: 16 invalidations 703 * 1MB page: 64 invalidations 704 * because it is set-associative TLB 705 * with 8-way and 64 sets. 706 * 1MB page can be cached in one of all sets. 707 * 64KB page can be one of 16 consecutive sets. 708 */ 709 if (MMU_MAJ_VER(data->version) == 2) 710 num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64); 711 712 if (sysmmu_block(data)) { 713 __sysmmu_tlb_invalidate_entry(data, iova, num_inv); 714 sysmmu_unblock(data); 715 } 716 clk_disable(data->clk_master); 717 } 718 spin_unlock_irqrestore(&data->lock, flags); 719 } 720 721 static const struct iommu_ops exynos_iommu_ops; 722 723 static int exynos_sysmmu_probe(struct platform_device *pdev) 724 { 725 int irq, ret; 726 struct device *dev = &pdev->dev; 727 struct sysmmu_drvdata *data; 728 struct resource *res; 729 730 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 731 if (!data) 732 return -ENOMEM; 733 734 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 735 data->sfrbase = devm_ioremap_resource(dev, res); 736 if (IS_ERR(data->sfrbase)) 737 return PTR_ERR(data->sfrbase); 738 739 irq = platform_get_irq(pdev, 0); 740 if (irq <= 0) 741 return irq; 742 743 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, 744 dev_name(dev), data); 745 if (ret) { 746 dev_err(dev, "Unabled to register handler of irq %d\n", irq); 747 return ret; 748 } 749 750 data->clk = devm_clk_get(dev, "sysmmu"); 751 if (PTR_ERR(data->clk) == -ENOENT) 752 data->clk = NULL; 753 else if (IS_ERR(data->clk)) 754 return PTR_ERR(data->clk); 755 756 data->aclk = devm_clk_get(dev, "aclk"); 757 if (PTR_ERR(data->aclk) == -ENOENT) 758 data->aclk = NULL; 759 else if (IS_ERR(data->aclk)) 760 return PTR_ERR(data->aclk); 761 762 data->pclk = devm_clk_get(dev, "pclk"); 763 if (PTR_ERR(data->pclk) == -ENOENT) 764 data->pclk = NULL; 765 else if (IS_ERR(data->pclk)) 766 return PTR_ERR(data->pclk); 767 768 if (!data->clk && (!data->aclk || !data->pclk)) { 769 dev_err(dev, "Failed to get device clock(s)!\n"); 770 return -ENOSYS; 771 } 772 773 data->clk_master = devm_clk_get(dev, "master"); 774 if (PTR_ERR(data->clk_master) == -ENOENT) 775 data->clk_master = NULL; 776 else if (IS_ERR(data->clk_master)) 777 return PTR_ERR(data->clk_master); 778 779 data->sysmmu = dev; 780 spin_lock_init(&data->lock); 781 782 __sysmmu_get_version(data); 783 784 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, 785 dev_name(data->sysmmu)); 786 if (ret) 787 return ret; 788 789 platform_set_drvdata(pdev, data); 790 791 if (PG_ENT_SHIFT < 0) { 792 if (MMU_MAJ_VER(data->version) < 5) { 793 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; 794 LV1_PROT = SYSMMU_LV1_PROT; 795 LV2_PROT = SYSMMU_LV2_PROT; 796 } else { 797 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; 798 LV1_PROT = SYSMMU_V5_LV1_PROT; 799 LV2_PROT = SYSMMU_V5_LV2_PROT; 800 } 801 } 802 803 if (MMU_MAJ_VER(data->version) >= 5) { 804 ret = dma_set_mask(dev, DMA_BIT_MASK(36)); 805 if (ret) { 806 dev_err(dev, "Unable to set DMA mask: %d\n", ret); 807 goto err_dma_set_mask; 808 } 809 } 810 811 /* 812 * use the first registered sysmmu device for performing 813 * dma mapping operations on iommu page tables (cpu cache flush) 814 */ 815 if (!dma_dev) 816 dma_dev = &pdev->dev; 817 818 pm_runtime_enable(dev); 819 820 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); 821 if (ret) 822 goto err_dma_set_mask; 823 824 return 0; 825 826 err_dma_set_mask: 827 iommu_device_sysfs_remove(&data->iommu); 828 return ret; 829 } 830 831 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) 832 { 833 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 834 struct device *master = data->master; 835 836 if (master) { 837 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 838 839 mutex_lock(&owner->rpm_lock); 840 if (data->domain) { 841 dev_dbg(data->sysmmu, "saving state\n"); 842 __sysmmu_disable(data); 843 } 844 mutex_unlock(&owner->rpm_lock); 845 } 846 return 0; 847 } 848 849 static int __maybe_unused exynos_sysmmu_resume(struct device *dev) 850 { 851 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 852 struct device *master = data->master; 853 854 if (master) { 855 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 856 857 mutex_lock(&owner->rpm_lock); 858 if (data->domain) { 859 dev_dbg(data->sysmmu, "restoring state\n"); 860 __sysmmu_enable(data); 861 } 862 mutex_unlock(&owner->rpm_lock); 863 } 864 return 0; 865 } 866 867 static const struct dev_pm_ops sysmmu_pm_ops = { 868 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL) 869 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 870 pm_runtime_force_resume) 871 }; 872 873 static const struct of_device_id sysmmu_of_match[] = { 874 { .compatible = "samsung,exynos-sysmmu", }, 875 { }, 876 }; 877 878 static struct platform_driver exynos_sysmmu_driver __refdata = { 879 .probe = exynos_sysmmu_probe, 880 .driver = { 881 .name = "exynos-sysmmu", 882 .of_match_table = sysmmu_of_match, 883 .pm = &sysmmu_pm_ops, 884 .suppress_bind_attrs = true, 885 } 886 }; 887 888 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) 889 { 890 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), 891 DMA_TO_DEVICE); 892 *ent = cpu_to_le32(val); 893 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), 894 DMA_TO_DEVICE); 895 } 896 897 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) 898 { 899 struct exynos_iommu_domain *domain; 900 dma_addr_t handle; 901 int i; 902 903 /* Check if correct PTE offsets are initialized */ 904 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); 905 906 if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED) 907 return NULL; 908 909 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 910 if (!domain) 911 return NULL; 912 913 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); 914 if (!domain->pgtable) 915 goto err_pgtable; 916 917 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); 918 if (!domain->lv2entcnt) 919 goto err_counter; 920 921 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ 922 for (i = 0; i < NUM_LV1ENTRIES; i++) 923 domain->pgtable[i] = ZERO_LV2LINK; 924 925 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, 926 DMA_TO_DEVICE); 927 /* For mapping page table entries we rely on dma == phys */ 928 BUG_ON(handle != virt_to_phys(domain->pgtable)); 929 if (dma_mapping_error(dma_dev, handle)) 930 goto err_lv2ent; 931 932 spin_lock_init(&domain->lock); 933 spin_lock_init(&domain->pgtablelock); 934 INIT_LIST_HEAD(&domain->clients); 935 936 domain->domain.geometry.aperture_start = 0; 937 domain->domain.geometry.aperture_end = ~0UL; 938 domain->domain.geometry.force_aperture = true; 939 940 return &domain->domain; 941 942 err_lv2ent: 943 free_pages((unsigned long)domain->lv2entcnt, 1); 944 err_counter: 945 free_pages((unsigned long)domain->pgtable, 2); 946 err_pgtable: 947 kfree(domain); 948 return NULL; 949 } 950 951 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) 952 { 953 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 954 struct sysmmu_drvdata *data, *next; 955 unsigned long flags; 956 int i; 957 958 WARN_ON(!list_empty(&domain->clients)); 959 960 spin_lock_irqsave(&domain->lock, flags); 961 962 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 963 spin_lock(&data->lock); 964 __sysmmu_disable(data); 965 data->pgtable = 0; 966 data->domain = NULL; 967 list_del_init(&data->domain_node); 968 spin_unlock(&data->lock); 969 } 970 971 spin_unlock_irqrestore(&domain->lock, flags); 972 973 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, 974 DMA_TO_DEVICE); 975 976 for (i = 0; i < NUM_LV1ENTRIES; i++) 977 if (lv1ent_page(domain->pgtable + i)) { 978 phys_addr_t base = lv2table_base(domain->pgtable + i); 979 980 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, 981 DMA_TO_DEVICE); 982 kmem_cache_free(lv2table_kmem_cache, 983 phys_to_virt(base)); 984 } 985 986 free_pages((unsigned long)domain->pgtable, 2); 987 free_pages((unsigned long)domain->lv2entcnt, 1); 988 kfree(domain); 989 } 990 991 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, 992 struct device *dev) 993 { 994 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 995 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 996 phys_addr_t pagetable = virt_to_phys(domain->pgtable); 997 struct sysmmu_drvdata *data, *next; 998 unsigned long flags; 999 1000 if (!has_sysmmu(dev) || owner->domain != iommu_domain) 1001 return; 1002 1003 mutex_lock(&owner->rpm_lock); 1004 1005 list_for_each_entry(data, &owner->controllers, owner_node) { 1006 pm_runtime_get_noresume(data->sysmmu); 1007 if (pm_runtime_active(data->sysmmu)) 1008 __sysmmu_disable(data); 1009 pm_runtime_put(data->sysmmu); 1010 } 1011 1012 spin_lock_irqsave(&domain->lock, flags); 1013 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 1014 spin_lock(&data->lock); 1015 data->pgtable = 0; 1016 data->domain = NULL; 1017 list_del_init(&data->domain_node); 1018 spin_unlock(&data->lock); 1019 } 1020 owner->domain = NULL; 1021 spin_unlock_irqrestore(&domain->lock, flags); 1022 1023 mutex_unlock(&owner->rpm_lock); 1024 1025 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__, 1026 &pagetable); 1027 } 1028 1029 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, 1030 struct device *dev) 1031 { 1032 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1033 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1034 struct sysmmu_drvdata *data; 1035 phys_addr_t pagetable = virt_to_phys(domain->pgtable); 1036 unsigned long flags; 1037 1038 if (!has_sysmmu(dev)) 1039 return -ENODEV; 1040 1041 if (owner->domain) 1042 exynos_iommu_detach_device(owner->domain, dev); 1043 1044 mutex_lock(&owner->rpm_lock); 1045 1046 spin_lock_irqsave(&domain->lock, flags); 1047 list_for_each_entry(data, &owner->controllers, owner_node) { 1048 spin_lock(&data->lock); 1049 data->pgtable = pagetable; 1050 data->domain = domain; 1051 list_add_tail(&data->domain_node, &domain->clients); 1052 spin_unlock(&data->lock); 1053 } 1054 owner->domain = iommu_domain; 1055 spin_unlock_irqrestore(&domain->lock, flags); 1056 1057 list_for_each_entry(data, &owner->controllers, owner_node) { 1058 pm_runtime_get_noresume(data->sysmmu); 1059 if (pm_runtime_active(data->sysmmu)) 1060 __sysmmu_enable(data); 1061 pm_runtime_put(data->sysmmu); 1062 } 1063 1064 mutex_unlock(&owner->rpm_lock); 1065 1066 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__, 1067 &pagetable); 1068 1069 return 0; 1070 } 1071 1072 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, 1073 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) 1074 { 1075 if (lv1ent_section(sent)) { 1076 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); 1077 return ERR_PTR(-EADDRINUSE); 1078 } 1079 1080 if (lv1ent_fault(sent)) { 1081 dma_addr_t handle; 1082 sysmmu_pte_t *pent; 1083 bool need_flush_flpd_cache = lv1ent_zero(sent); 1084 1085 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); 1086 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); 1087 if (!pent) 1088 return ERR_PTR(-ENOMEM); 1089 1090 exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); 1091 kmemleak_ignore(pent); 1092 *pgcounter = NUM_LV2ENTRIES; 1093 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, 1094 DMA_TO_DEVICE); 1095 if (dma_mapping_error(dma_dev, handle)) { 1096 kmem_cache_free(lv2table_kmem_cache, pent); 1097 return ERR_PTR(-EADDRINUSE); 1098 } 1099 1100 /* 1101 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 1102 * FLPD cache may cache the address of zero_l2_table. This 1103 * function replaces the zero_l2_table with new L2 page table 1104 * to write valid mappings. 1105 * Accessing the valid area may cause page fault since FLPD 1106 * cache may still cache zero_l2_table for the valid area 1107 * instead of new L2 page table that has the mapping 1108 * information of the valid area. 1109 * Thus any replacement of zero_l2_table with other valid L2 1110 * page table must involve FLPD cache invalidation for System 1111 * MMU v3.3. 1112 * FLPD cache invalidation is performed with TLB invalidation 1113 * by VPN without blocking. It is safe to invalidate TLB without 1114 * blocking because the target address of TLB invalidation is 1115 * not currently mapped. 1116 */ 1117 if (need_flush_flpd_cache) { 1118 struct sysmmu_drvdata *data; 1119 1120 spin_lock(&domain->lock); 1121 list_for_each_entry(data, &domain->clients, domain_node) 1122 sysmmu_tlb_invalidate_flpdcache(data, iova); 1123 spin_unlock(&domain->lock); 1124 } 1125 } 1126 1127 return page_entry(sent, iova); 1128 } 1129 1130 static int lv1set_section(struct exynos_iommu_domain *domain, 1131 sysmmu_pte_t *sent, sysmmu_iova_t iova, 1132 phys_addr_t paddr, int prot, short *pgcnt) 1133 { 1134 if (lv1ent_section(sent)) { 1135 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 1136 iova); 1137 return -EADDRINUSE; 1138 } 1139 1140 if (lv1ent_page(sent)) { 1141 if (*pgcnt != NUM_LV2ENTRIES) { 1142 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 1143 iova); 1144 return -EADDRINUSE; 1145 } 1146 1147 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); 1148 *pgcnt = 0; 1149 } 1150 1151 exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot)); 1152 1153 spin_lock(&domain->lock); 1154 if (lv1ent_page_zero(sent)) { 1155 struct sysmmu_drvdata *data; 1156 /* 1157 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD 1158 * entry by speculative prefetch of SLPD which has no mapping. 1159 */ 1160 list_for_each_entry(data, &domain->clients, domain_node) 1161 sysmmu_tlb_invalidate_flpdcache(data, iova); 1162 } 1163 spin_unlock(&domain->lock); 1164 1165 return 0; 1166 } 1167 1168 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, 1169 int prot, short *pgcnt) 1170 { 1171 if (size == SPAGE_SIZE) { 1172 if (WARN_ON(!lv2ent_fault(pent))) 1173 return -EADDRINUSE; 1174 1175 exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot)); 1176 *pgcnt -= 1; 1177 } else { /* size == LPAGE_SIZE */ 1178 int i; 1179 dma_addr_t pent_base = virt_to_phys(pent); 1180 1181 dma_sync_single_for_cpu(dma_dev, pent_base, 1182 sizeof(*pent) * SPAGES_PER_LPAGE, 1183 DMA_TO_DEVICE); 1184 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { 1185 if (WARN_ON(!lv2ent_fault(pent))) { 1186 if (i > 0) 1187 memset(pent - i, 0, sizeof(*pent) * i); 1188 return -EADDRINUSE; 1189 } 1190 1191 *pent = mk_lv2ent_lpage(paddr, prot); 1192 } 1193 dma_sync_single_for_device(dma_dev, pent_base, 1194 sizeof(*pent) * SPAGES_PER_LPAGE, 1195 DMA_TO_DEVICE); 1196 *pgcnt -= SPAGES_PER_LPAGE; 1197 } 1198 1199 return 0; 1200 } 1201 1202 /* 1203 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: 1204 * 1205 * System MMU v3.x has advanced logic to improve address translation 1206 * performance with caching more page table entries by a page table walk. 1207 * However, the logic has a bug that while caching faulty page table entries, 1208 * System MMU reports page fault if the cached fault entry is hit even though 1209 * the fault entry is updated to a valid entry after the entry is cached. 1210 * To prevent caching faulty page table entries which may be updated to valid 1211 * entries later, the virtual memory manager should care about the workaround 1212 * for the problem. The following describes the workaround. 1213 * 1214 * Any two consecutive I/O virtual address regions must have a hole of 128KiB 1215 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). 1216 * 1217 * Precisely, any start address of I/O virtual region must be aligned with 1218 * the following sizes for System MMU v3.1 and v3.2. 1219 * System MMU v3.1: 128KiB 1220 * System MMU v3.2: 256KiB 1221 * 1222 * Because System MMU v3.3 caches page table entries more aggressively, it needs 1223 * more workarounds. 1224 * - Any two consecutive I/O virtual regions must have a hole of size larger 1225 * than or equal to 128KiB. 1226 * - Start address of an I/O virtual region must be aligned by 128KiB. 1227 */ 1228 static int exynos_iommu_map(struct iommu_domain *iommu_domain, 1229 unsigned long l_iova, phys_addr_t paddr, size_t size, 1230 int prot, gfp_t gfp) 1231 { 1232 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1233 sysmmu_pte_t *entry; 1234 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1235 unsigned long flags; 1236 int ret = -ENOMEM; 1237 1238 BUG_ON(domain->pgtable == NULL); 1239 prot &= SYSMMU_SUPPORTED_PROT_BITS; 1240 1241 spin_lock_irqsave(&domain->pgtablelock, flags); 1242 1243 entry = section_entry(domain->pgtable, iova); 1244 1245 if (size == SECT_SIZE) { 1246 ret = lv1set_section(domain, entry, iova, paddr, prot, 1247 &domain->lv2entcnt[lv1ent_offset(iova)]); 1248 } else { 1249 sysmmu_pte_t *pent; 1250 1251 pent = alloc_lv2entry(domain, entry, iova, 1252 &domain->lv2entcnt[lv1ent_offset(iova)]); 1253 1254 if (IS_ERR(pent)) 1255 ret = PTR_ERR(pent); 1256 else 1257 ret = lv2set_page(pent, paddr, size, prot, 1258 &domain->lv2entcnt[lv1ent_offset(iova)]); 1259 } 1260 1261 if (ret) 1262 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", 1263 __func__, ret, size, iova); 1264 1265 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1266 1267 return ret; 1268 } 1269 1270 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, 1271 sysmmu_iova_t iova, size_t size) 1272 { 1273 struct sysmmu_drvdata *data; 1274 unsigned long flags; 1275 1276 spin_lock_irqsave(&domain->lock, flags); 1277 1278 list_for_each_entry(data, &domain->clients, domain_node) 1279 sysmmu_tlb_invalidate_entry(data, iova, size); 1280 1281 spin_unlock_irqrestore(&domain->lock, flags); 1282 } 1283 1284 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, 1285 unsigned long l_iova, size_t size, 1286 struct iommu_iotlb_gather *gather) 1287 { 1288 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1289 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1290 sysmmu_pte_t *ent; 1291 size_t err_pgsize; 1292 unsigned long flags; 1293 1294 BUG_ON(domain->pgtable == NULL); 1295 1296 spin_lock_irqsave(&domain->pgtablelock, flags); 1297 1298 ent = section_entry(domain->pgtable, iova); 1299 1300 if (lv1ent_section(ent)) { 1301 if (WARN_ON(size < SECT_SIZE)) { 1302 err_pgsize = SECT_SIZE; 1303 goto err; 1304 } 1305 1306 /* workaround for h/w bug in System MMU v3.3 */ 1307 exynos_iommu_set_pte(ent, ZERO_LV2LINK); 1308 size = SECT_SIZE; 1309 goto done; 1310 } 1311 1312 if (unlikely(lv1ent_fault(ent))) { 1313 if (size > SECT_SIZE) 1314 size = SECT_SIZE; 1315 goto done; 1316 } 1317 1318 /* lv1ent_page(sent) == true here */ 1319 1320 ent = page_entry(ent, iova); 1321 1322 if (unlikely(lv2ent_fault(ent))) { 1323 size = SPAGE_SIZE; 1324 goto done; 1325 } 1326 1327 if (lv2ent_small(ent)) { 1328 exynos_iommu_set_pte(ent, 0); 1329 size = SPAGE_SIZE; 1330 domain->lv2entcnt[lv1ent_offset(iova)] += 1; 1331 goto done; 1332 } 1333 1334 /* lv1ent_large(ent) == true here */ 1335 if (WARN_ON(size < LPAGE_SIZE)) { 1336 err_pgsize = LPAGE_SIZE; 1337 goto err; 1338 } 1339 1340 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), 1341 sizeof(*ent) * SPAGES_PER_LPAGE, 1342 DMA_TO_DEVICE); 1343 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); 1344 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), 1345 sizeof(*ent) * SPAGES_PER_LPAGE, 1346 DMA_TO_DEVICE); 1347 size = LPAGE_SIZE; 1348 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; 1349 done: 1350 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1351 1352 exynos_iommu_tlb_invalidate_entry(domain, iova, size); 1353 1354 return size; 1355 err: 1356 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1357 1358 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", 1359 __func__, size, iova, err_pgsize); 1360 1361 return 0; 1362 } 1363 1364 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, 1365 dma_addr_t iova) 1366 { 1367 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1368 sysmmu_pte_t *entry; 1369 unsigned long flags; 1370 phys_addr_t phys = 0; 1371 1372 spin_lock_irqsave(&domain->pgtablelock, flags); 1373 1374 entry = section_entry(domain->pgtable, iova); 1375 1376 if (lv1ent_section(entry)) { 1377 phys = section_phys(entry) + section_offs(iova); 1378 } else if (lv1ent_page(entry)) { 1379 entry = page_entry(entry, iova); 1380 1381 if (lv2ent_large(entry)) 1382 phys = lpage_phys(entry) + lpage_offs(iova); 1383 else if (lv2ent_small(entry)) 1384 phys = spage_phys(entry) + spage_offs(iova); 1385 } 1386 1387 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1388 1389 return phys; 1390 } 1391 1392 static struct iommu_device *exynos_iommu_probe_device(struct device *dev) 1393 { 1394 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1395 struct sysmmu_drvdata *data; 1396 1397 if (!has_sysmmu(dev)) 1398 return ERR_PTR(-ENODEV); 1399 1400 list_for_each_entry(data, &owner->controllers, owner_node) { 1401 /* 1402 * SYSMMU will be runtime activated via device link 1403 * (dependency) to its master device, so there are no 1404 * direct calls to pm_runtime_get/put in this driver. 1405 */ 1406 data->link = device_link_add(dev, data->sysmmu, 1407 DL_FLAG_STATELESS | 1408 DL_FLAG_PM_RUNTIME); 1409 } 1410 1411 /* There is always at least one entry, see exynos_iommu_of_xlate() */ 1412 data = list_first_entry(&owner->controllers, 1413 struct sysmmu_drvdata, owner_node); 1414 1415 return &data->iommu; 1416 } 1417 1418 static void exynos_iommu_release_device(struct device *dev) 1419 { 1420 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1421 struct sysmmu_drvdata *data; 1422 1423 if (owner->domain) { 1424 struct iommu_group *group = iommu_group_get(dev); 1425 1426 if (group) { 1427 #ifndef CONFIG_ARM 1428 WARN_ON(owner->domain != 1429 iommu_group_default_domain(group)); 1430 #endif 1431 exynos_iommu_detach_device(owner->domain, dev); 1432 iommu_group_put(group); 1433 } 1434 } 1435 1436 list_for_each_entry(data, &owner->controllers, owner_node) 1437 device_link_del(data->link); 1438 } 1439 1440 static int exynos_iommu_of_xlate(struct device *dev, 1441 struct of_phandle_args *spec) 1442 { 1443 struct platform_device *sysmmu = of_find_device_by_node(spec->np); 1444 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1445 struct sysmmu_drvdata *data, *entry; 1446 1447 if (!sysmmu) 1448 return -ENODEV; 1449 1450 data = platform_get_drvdata(sysmmu); 1451 if (!data) { 1452 put_device(&sysmmu->dev); 1453 return -ENODEV; 1454 } 1455 1456 if (!owner) { 1457 owner = kzalloc(sizeof(*owner), GFP_KERNEL); 1458 if (!owner) { 1459 put_device(&sysmmu->dev); 1460 return -ENOMEM; 1461 } 1462 1463 INIT_LIST_HEAD(&owner->controllers); 1464 mutex_init(&owner->rpm_lock); 1465 dev_iommu_priv_set(dev, owner); 1466 } 1467 1468 list_for_each_entry(entry, &owner->controllers, owner_node) 1469 if (entry == data) 1470 return 0; 1471 1472 list_add_tail(&data->owner_node, &owner->controllers); 1473 data->master = dev; 1474 1475 return 0; 1476 } 1477 1478 static const struct iommu_ops exynos_iommu_ops = { 1479 .domain_alloc = exynos_iommu_domain_alloc, 1480 .device_group = generic_device_group, 1481 #ifdef CONFIG_ARM 1482 .set_platform_dma_ops = exynos_iommu_release_device, 1483 #endif 1484 .probe_device = exynos_iommu_probe_device, 1485 .release_device = exynos_iommu_release_device, 1486 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, 1487 .of_xlate = exynos_iommu_of_xlate, 1488 .default_domain_ops = &(const struct iommu_domain_ops) { 1489 .attach_dev = exynos_iommu_attach_device, 1490 .map = exynos_iommu_map, 1491 .unmap = exynos_iommu_unmap, 1492 .iova_to_phys = exynos_iommu_iova_to_phys, 1493 .free = exynos_iommu_domain_free, 1494 } 1495 }; 1496 1497 static int __init exynos_iommu_init(void) 1498 { 1499 struct device_node *np; 1500 int ret; 1501 1502 np = of_find_matching_node(NULL, sysmmu_of_match); 1503 if (!np) 1504 return 0; 1505 1506 of_node_put(np); 1507 1508 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1509 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1510 if (!lv2table_kmem_cache) { 1511 pr_err("%s: Failed to create kmem cache\n", __func__); 1512 return -ENOMEM; 1513 } 1514 1515 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); 1516 if (zero_lv2_table == NULL) { 1517 pr_err("%s: Failed to allocate zero level2 page table\n", 1518 __func__); 1519 ret = -ENOMEM; 1520 goto err_zero_lv2; 1521 } 1522 1523 ret = platform_driver_register(&exynos_sysmmu_driver); 1524 if (ret) { 1525 pr_err("%s: Failed to register driver\n", __func__); 1526 goto err_reg_driver; 1527 } 1528 1529 return 0; 1530 err_reg_driver: 1531 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); 1532 err_zero_lv2: 1533 kmem_cache_destroy(lv2table_kmem_cache); 1534 return ret; 1535 } 1536 core_initcall(exynos_iommu_init); 1537