1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 */ 6 7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG 8 #define DEBUG 9 #endif 10 11 #include <linux/clk.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/io.h> 15 #include <linux/iommu.h> 16 #include <linux/interrupt.h> 17 #include <linux/kmemleak.h> 18 #include <linux/list.h> 19 #include <linux/of.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/slab.h> 24 25 #include "dma-iommu.h" 26 #include "iommu-pages.h" 27 28 typedef u32 sysmmu_iova_t; 29 typedef u32 sysmmu_pte_t; 30 static struct iommu_domain exynos_identity_domain; 31 32 /* We do not consider super section mapping (16MB) */ 33 #define SECT_ORDER 20 34 #define LPAGE_ORDER 16 35 #define SPAGE_ORDER 12 36 37 #define SECT_SIZE (1 << SECT_ORDER) 38 #define LPAGE_SIZE (1 << LPAGE_ORDER) 39 #define SPAGE_SIZE (1 << SPAGE_ORDER) 40 41 #define SECT_MASK (~(SECT_SIZE - 1)) 42 #define LPAGE_MASK (~(LPAGE_SIZE - 1)) 43 #define SPAGE_MASK (~(SPAGE_SIZE - 1)) 44 45 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ 46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) 47 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) 48 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) 49 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ 50 ((*(sent) & 3) == 1)) 51 #define lv1ent_section(sent) ((*(sent) & 3) == 2) 52 53 #define lv2ent_fault(pent) ((*(pent) & 3) == 0) 54 #define lv2ent_small(pent) ((*(pent) & 2) == 2) 55 #define lv2ent_large(pent) ((*(pent) & 3) == 1) 56 57 /* 58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces 59 * v5.0 introduced support for 36bit physical address space by shifting 60 * all page entry values by 4 bits. 61 * All SYSMMU controllers in the system support the address spaces of the same 62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper 63 * value (0 or 4). 64 */ 65 static short PG_ENT_SHIFT = -1; 66 #define SYSMMU_PG_ENT_SHIFT 0 67 #define SYSMMU_V5_PG_ENT_SHIFT 4 68 69 static const sysmmu_pte_t *LV1_PROT; 70 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = { 71 ((0 << 15) | (0 << 10)), /* no access */ 72 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */ 73 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */ 74 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */ 75 }; 76 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = { 77 (0 << 4), /* no access */ 78 (1 << 4), /* IOMMU_READ only */ 79 (2 << 4), /* IOMMU_WRITE only */ 80 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */ 81 }; 82 83 static const sysmmu_pte_t *LV2_PROT; 84 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = { 85 ((0 << 9) | (0 << 4)), /* no access */ 86 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */ 87 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */ 88 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */ 89 }; 90 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = { 91 (0 << 2), /* no access */ 92 (1 << 2), /* IOMMU_READ only */ 93 (2 << 2), /* IOMMU_WRITE only */ 94 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */ 95 }; 96 97 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE) 98 99 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) 100 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) 101 #define section_offs(iova) (iova & (SECT_SIZE - 1)) 102 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) 103 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) 104 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) 105 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) 106 107 #define NUM_LV1ENTRIES 4096 108 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) 109 110 static u32 lv1ent_offset(sysmmu_iova_t iova) 111 { 112 return iova >> SECT_ORDER; 113 } 114 115 static u32 lv2ent_offset(sysmmu_iova_t iova) 116 { 117 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); 118 } 119 120 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) 121 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) 122 123 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) 124 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) 125 126 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2) 127 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) 128 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1) 129 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2) 130 131 #define CTRL_ENABLE 0x5 132 #define CTRL_BLOCK 0x7 133 #define CTRL_DISABLE 0x0 134 135 #define CFG_LRU 0x1 136 #define CFG_EAP (1 << 2) 137 #define CFG_QOS(n) ((n & 0xF) << 7) 138 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ 139 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ 140 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ 141 142 #define CTRL_VM_ENABLE BIT(0) 143 #define CTRL_VM_FAULT_MODE_STALL BIT(3) 144 #define CAPA0_CAPA1_EXIST BIT(11) 145 #define CAPA1_VCR_ENABLED BIT(14) 146 147 /* common registers */ 148 #define REG_MMU_CTRL 0x000 149 #define REG_MMU_CFG 0x004 150 #define REG_MMU_STATUS 0x008 151 #define REG_MMU_VERSION 0x034 152 153 #define MMU_MAJ_VER(val) ((val) >> 7) 154 #define MMU_MIN_VER(val) ((val) & 0x7F) 155 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ 156 157 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) 158 159 /* v1.x - v3.x registers */ 160 #define REG_PAGE_FAULT_ADDR 0x024 161 #define REG_AW_FAULT_ADDR 0x028 162 #define REG_AR_FAULT_ADDR 0x02C 163 #define REG_DEFAULT_SLAVE_ADDR 0x030 164 165 /* v5.x registers */ 166 #define REG_V5_FAULT_AR_VA 0x070 167 #define REG_V5_FAULT_AW_VA 0x080 168 169 /* v7.x registers */ 170 #define REG_V7_CAPA0 0x870 171 #define REG_V7_CAPA1 0x874 172 #define REG_V7_CTRL_VM 0x8000 173 174 #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL) 175 176 static struct device *dma_dev; 177 static struct kmem_cache *lv2table_kmem_cache; 178 static sysmmu_pte_t *zero_lv2_table; 179 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) 180 181 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) 182 { 183 return pgtable + lv1ent_offset(iova); 184 } 185 186 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) 187 { 188 return (sysmmu_pte_t *)phys_to_virt( 189 lv2table_base(sent)) + lv2ent_offset(iova); 190 } 191 192 struct sysmmu_fault { 193 sysmmu_iova_t addr; /* IOVA address that caused fault */ 194 const char *name; /* human readable fault name */ 195 unsigned int type; /* fault type for report_iommu_fault() */ 196 }; 197 198 struct sysmmu_v1_fault_info { 199 unsigned short addr_reg; /* register to read IOVA fault address */ 200 const char *name; /* human readable fault name */ 201 unsigned int type; /* fault type for report_iommu_fault */ 202 }; 203 204 static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = { 205 { REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ }, 206 { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ }, 207 { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE }, 208 { REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ }, 209 { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ }, 210 { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ }, 211 { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE }, 212 { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE }, 213 }; 214 215 /* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */ 216 static const char * const sysmmu_v5_fault_names[] = { 217 "PTW", 218 "PAGE", 219 "MULTI-HIT", 220 "ACCESS PROTECTION", 221 "SECURITY PROTECTION" 222 }; 223 224 static const char * const sysmmu_v7_fault_names[] = { 225 "PTW", 226 "PAGE", 227 "ACCESS PROTECTION", 228 "RESERVED" 229 }; 230 231 /* 232 * This structure is attached to dev->iommu->priv of the master device 233 * on device add, contains a list of SYSMMU controllers defined by device tree, 234 * which are bound to given master device. It is usually referenced by 'owner' 235 * pointer. 236 */ 237 struct exynos_iommu_owner { 238 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ 239 struct iommu_domain *domain; /* domain this device is attached */ 240 struct mutex rpm_lock; /* for runtime pm of all sysmmus */ 241 }; 242 243 /* 244 * This structure exynos specific generalization of struct iommu_domain. 245 * It contains list of SYSMMU controllers from all master devices, which has 246 * been attached to this domain and page tables of IO address space defined by 247 * it. It is usually referenced by 'domain' pointer. 248 */ 249 struct exynos_iommu_domain { 250 struct list_head clients; /* list of sysmmu_drvdata.domain_node */ 251 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ 252 short *lv2entcnt; /* free lv2 entry counter for each section */ 253 spinlock_t lock; /* lock for modifying list of clients */ 254 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ 255 struct iommu_domain domain; /* generic domain data structure */ 256 }; 257 258 struct sysmmu_drvdata; 259 260 /* 261 * SysMMU version specific data. Contains offsets for the registers which can 262 * be found in different SysMMU variants, but have different offset values. 263 * Also contains version specific callbacks to abstract the hardware. 264 */ 265 struct sysmmu_variant { 266 u32 pt_base; /* page table base address (physical) */ 267 u32 flush_all; /* invalidate all TLB entries */ 268 u32 flush_entry; /* invalidate specific TLB entry */ 269 u32 flush_range; /* invalidate TLB entries in specified range */ 270 u32 flush_start; /* start address of range invalidation */ 271 u32 flush_end; /* end address of range invalidation */ 272 u32 int_status; /* interrupt status information */ 273 u32 int_clear; /* clear the interrupt */ 274 u32 fault_va; /* IOVA address that caused fault */ 275 u32 fault_info; /* fault transaction info */ 276 277 int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype, 278 struct sysmmu_fault *fault); 279 }; 280 281 /* 282 * This structure hold all data of a single SYSMMU controller, this includes 283 * hw resources like registers and clocks, pointers and list nodes to connect 284 * it to all other structures, internal state and parameters read from device 285 * tree. It is usually referenced by 'data' pointer. 286 */ 287 struct sysmmu_drvdata { 288 struct device *sysmmu; /* SYSMMU controller device */ 289 struct device *master; /* master device (owner) */ 290 struct device_link *link; /* runtime PM link to master */ 291 void __iomem *sfrbase; /* our registers */ 292 struct clk *clk; /* SYSMMU's clock */ 293 struct clk *aclk; /* SYSMMU's aclk clock */ 294 struct clk *pclk; /* SYSMMU's pclk clock */ 295 struct clk *clk_master; /* master's device clock */ 296 spinlock_t lock; /* lock for modifying state */ 297 bool active; /* current status */ 298 struct exynos_iommu_domain *domain; /* domain we belong to */ 299 struct list_head domain_node; /* node for domain clients list */ 300 struct list_head owner_node; /* node for owner controllers list */ 301 phys_addr_t pgtable; /* assigned page table structure */ 302 unsigned int version; /* our version */ 303 304 struct iommu_device iommu; /* IOMMU core handle */ 305 const struct sysmmu_variant *variant; /* version specific data */ 306 307 /* v7 fields */ 308 bool has_vcr; /* virtual machine control register */ 309 }; 310 311 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg) 312 313 static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data, 314 unsigned int itype, 315 struct sysmmu_fault *fault) 316 { 317 const struct sysmmu_v1_fault_info *finfo; 318 319 if (itype >= ARRAY_SIZE(sysmmu_v1_faults)) 320 return -ENXIO; 321 322 finfo = &sysmmu_v1_faults[itype]; 323 fault->addr = readl(data->sfrbase + finfo->addr_reg); 324 fault->name = finfo->name; 325 fault->type = finfo->type; 326 327 return 0; 328 } 329 330 static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data, 331 unsigned int itype, 332 struct sysmmu_fault *fault) 333 { 334 unsigned int addr_reg; 335 336 if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) { 337 fault->type = IOMMU_FAULT_READ; 338 addr_reg = REG_V5_FAULT_AR_VA; 339 } else if (itype >= 16 && itype <= 20) { 340 fault->type = IOMMU_FAULT_WRITE; 341 addr_reg = REG_V5_FAULT_AW_VA; 342 itype -= 16; 343 } else { 344 return -ENXIO; 345 } 346 347 fault->name = sysmmu_v5_fault_names[itype]; 348 fault->addr = readl(data->sfrbase + addr_reg); 349 350 return 0; 351 } 352 353 static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data, 354 unsigned int itype, 355 struct sysmmu_fault *fault) 356 { 357 u32 info = readl(SYSMMU_REG(data, fault_info)); 358 359 fault->addr = readl(SYSMMU_REG(data, fault_va)); 360 fault->name = sysmmu_v7_fault_names[itype % 4]; 361 fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 362 363 return 0; 364 } 365 366 /* SysMMU v1..v3 */ 367 static const struct sysmmu_variant sysmmu_v1_variant = { 368 .flush_all = 0x0c, 369 .flush_entry = 0x10, 370 .pt_base = 0x14, 371 .int_status = 0x18, 372 .int_clear = 0x1c, 373 374 .get_fault_info = exynos_sysmmu_v1_get_fault_info, 375 }; 376 377 /* SysMMU v5 */ 378 static const struct sysmmu_variant sysmmu_v5_variant = { 379 .pt_base = 0x0c, 380 .flush_all = 0x10, 381 .flush_entry = 0x14, 382 .flush_range = 0x18, 383 .flush_start = 0x20, 384 .flush_end = 0x24, 385 .int_status = 0x60, 386 .int_clear = 0x64, 387 388 .get_fault_info = exynos_sysmmu_v5_get_fault_info, 389 }; 390 391 /* SysMMU v7: non-VM capable register layout */ 392 static const struct sysmmu_variant sysmmu_v7_variant = { 393 .pt_base = 0x0c, 394 .flush_all = 0x10, 395 .flush_entry = 0x14, 396 .flush_range = 0x18, 397 .flush_start = 0x20, 398 .flush_end = 0x24, 399 .int_status = 0x60, 400 .int_clear = 0x64, 401 .fault_va = 0x70, 402 .fault_info = 0x78, 403 404 .get_fault_info = exynos_sysmmu_v7_get_fault_info, 405 }; 406 407 /* SysMMU v7: VM capable register layout */ 408 static const struct sysmmu_variant sysmmu_v7_vm_variant = { 409 .pt_base = 0x800c, 410 .flush_all = 0x8010, 411 .flush_entry = 0x8014, 412 .flush_range = 0x8018, 413 .flush_start = 0x8020, 414 .flush_end = 0x8024, 415 .int_status = 0x60, 416 .int_clear = 0x64, 417 .fault_va = 0x1000, 418 .fault_info = 0x1004, 419 420 .get_fault_info = exynos_sysmmu_v7_get_fault_info, 421 }; 422 423 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) 424 { 425 return container_of(dom, struct exynos_iommu_domain, domain); 426 } 427 428 static void sysmmu_unblock(struct sysmmu_drvdata *data) 429 { 430 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 431 } 432 433 static bool sysmmu_block(struct sysmmu_drvdata *data) 434 { 435 int i = 120; 436 437 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 438 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) 439 --i; 440 441 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { 442 sysmmu_unblock(data); 443 return false; 444 } 445 446 return true; 447 } 448 449 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) 450 { 451 writel(0x1, SYSMMU_REG(data, flush_all)); 452 } 453 454 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 455 sysmmu_iova_t iova, unsigned int num_inv) 456 { 457 unsigned int i; 458 459 if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { 460 for (i = 0; i < num_inv; i++) { 461 writel((iova & SPAGE_MASK) | 1, 462 SYSMMU_REG(data, flush_entry)); 463 iova += SPAGE_SIZE; 464 } 465 } else { 466 writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start)); 467 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, 468 SYSMMU_REG(data, flush_end)); 469 writel(0x1, SYSMMU_REG(data, flush_range)); 470 } 471 } 472 473 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) 474 { 475 u32 pt_base; 476 477 if (MMU_MAJ_VER(data->version) < 5) 478 pt_base = pgd; 479 else 480 pt_base = pgd >> SPAGE_ORDER; 481 482 writel(pt_base, SYSMMU_REG(data, pt_base)); 483 __sysmmu_tlb_invalidate(data); 484 } 485 486 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) 487 { 488 BUG_ON(clk_prepare_enable(data->clk_master)); 489 BUG_ON(clk_prepare_enable(data->clk)); 490 BUG_ON(clk_prepare_enable(data->pclk)); 491 BUG_ON(clk_prepare_enable(data->aclk)); 492 } 493 494 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) 495 { 496 clk_disable_unprepare(data->aclk); 497 clk_disable_unprepare(data->pclk); 498 clk_disable_unprepare(data->clk); 499 clk_disable_unprepare(data->clk_master); 500 } 501 502 static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data) 503 { 504 u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0); 505 506 return capa0 & CAPA0_CAPA1_EXIST; 507 } 508 509 static void __sysmmu_get_vcr(struct sysmmu_drvdata *data) 510 { 511 u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1); 512 513 data->has_vcr = capa1 & CAPA1_VCR_ENABLED; 514 } 515 516 static void __sysmmu_get_version(struct sysmmu_drvdata *data) 517 { 518 u32 ver; 519 520 __sysmmu_enable_clocks(data); 521 522 ver = readl(data->sfrbase + REG_MMU_VERSION); 523 524 /* controllers on some SoCs don't report proper version */ 525 if (ver == 0x80000001u) 526 data->version = MAKE_MMU_VER(1, 0); 527 else 528 data->version = MMU_RAW_VER(ver); 529 530 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", 531 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); 532 533 if (MMU_MAJ_VER(data->version) < 5) { 534 data->variant = &sysmmu_v1_variant; 535 } else if (MMU_MAJ_VER(data->version) < 7) { 536 data->variant = &sysmmu_v5_variant; 537 } else { 538 if (__sysmmu_has_capa1(data)) 539 __sysmmu_get_vcr(data); 540 if (data->has_vcr) 541 data->variant = &sysmmu_v7_vm_variant; 542 else 543 data->variant = &sysmmu_v7_variant; 544 } 545 546 __sysmmu_disable_clocks(data); 547 } 548 549 static void show_fault_information(struct sysmmu_drvdata *data, 550 const struct sysmmu_fault *fault) 551 { 552 sysmmu_pte_t *ent; 553 554 dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n", 555 dev_name(data->master), 556 fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE", 557 fault->name, fault->addr); 558 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); 559 ent = section_entry(phys_to_virt(data->pgtable), fault->addr); 560 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); 561 if (lv1ent_page(ent)) { 562 ent = page_entry(ent, fault->addr); 563 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); 564 } 565 } 566 567 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) 568 { 569 struct sysmmu_drvdata *data = dev_id; 570 unsigned int itype; 571 struct sysmmu_fault fault; 572 int ret = -ENOSYS; 573 574 WARN_ON(!data->active); 575 576 spin_lock(&data->lock); 577 clk_enable(data->clk_master); 578 579 itype = __ffs(readl(SYSMMU_REG(data, int_status))); 580 ret = data->variant->get_fault_info(data, itype, &fault); 581 if (ret) { 582 dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype); 583 goto out; 584 } 585 show_fault_information(data, &fault); 586 587 if (data->domain) { 588 ret = report_iommu_fault(&data->domain->domain, data->master, 589 fault.addr, fault.type); 590 } 591 if (ret) 592 panic("Unrecoverable System MMU Fault!"); 593 594 out: 595 writel(1 << itype, SYSMMU_REG(data, int_clear)); 596 597 /* SysMMU is in blocked state when interrupt occurred */ 598 sysmmu_unblock(data); 599 clk_disable(data->clk_master); 600 spin_unlock(&data->lock); 601 602 return IRQ_HANDLED; 603 } 604 605 static void __sysmmu_disable(struct sysmmu_drvdata *data) 606 { 607 unsigned long flags; 608 609 clk_enable(data->clk_master); 610 611 spin_lock_irqsave(&data->lock, flags); 612 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); 613 writel(0, data->sfrbase + REG_MMU_CFG); 614 data->active = false; 615 spin_unlock_irqrestore(&data->lock, flags); 616 617 __sysmmu_disable_clocks(data); 618 } 619 620 static void __sysmmu_init_config(struct sysmmu_drvdata *data) 621 { 622 unsigned int cfg; 623 624 if (data->version <= MAKE_MMU_VER(3, 1)) 625 cfg = CFG_LRU | CFG_QOS(15); 626 else if (data->version <= MAKE_MMU_VER(3, 2)) 627 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; 628 else 629 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; 630 631 cfg |= CFG_EAP; /* enable access protection bits check */ 632 633 writel(cfg, data->sfrbase + REG_MMU_CFG); 634 } 635 636 static void __sysmmu_enable_vid(struct sysmmu_drvdata *data) 637 { 638 u32 ctrl; 639 640 if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) 641 return; 642 643 ctrl = readl(data->sfrbase + REG_V7_CTRL_VM); 644 ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL; 645 writel(ctrl, data->sfrbase + REG_V7_CTRL_VM); 646 } 647 648 static void __sysmmu_enable(struct sysmmu_drvdata *data) 649 { 650 unsigned long flags; 651 652 __sysmmu_enable_clocks(data); 653 654 spin_lock_irqsave(&data->lock, flags); 655 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 656 __sysmmu_init_config(data); 657 __sysmmu_set_ptbase(data, data->pgtable); 658 __sysmmu_enable_vid(data); 659 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 660 data->active = true; 661 spin_unlock_irqrestore(&data->lock, flags); 662 663 /* 664 * SYSMMU driver keeps master's clock enabled only for the short 665 * time, while accessing the registers. For performing address 666 * translation during DMA transaction it relies on the client 667 * driver to enable it. 668 */ 669 clk_disable(data->clk_master); 670 } 671 672 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, 673 sysmmu_iova_t iova) 674 { 675 unsigned long flags; 676 677 spin_lock_irqsave(&data->lock, flags); 678 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { 679 clk_enable(data->clk_master); 680 if (sysmmu_block(data)) { 681 if (data->version >= MAKE_MMU_VER(5, 0)) 682 __sysmmu_tlb_invalidate(data); 683 else 684 __sysmmu_tlb_invalidate_entry(data, iova, 1); 685 sysmmu_unblock(data); 686 } 687 clk_disable(data->clk_master); 688 } 689 spin_unlock_irqrestore(&data->lock, flags); 690 } 691 692 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 693 sysmmu_iova_t iova, size_t size) 694 { 695 unsigned long flags; 696 697 spin_lock_irqsave(&data->lock, flags); 698 if (data->active) { 699 unsigned int num_inv = 1; 700 701 clk_enable(data->clk_master); 702 703 /* 704 * L2TLB invalidation required 705 * 4KB page: 1 invalidation 706 * 64KB page: 16 invalidations 707 * 1MB page: 64 invalidations 708 * because it is set-associative TLB 709 * with 8-way and 64 sets. 710 * 1MB page can be cached in one of all sets. 711 * 64KB page can be one of 16 consecutive sets. 712 */ 713 if (MMU_MAJ_VER(data->version) == 2) 714 num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64); 715 716 if (sysmmu_block(data)) { 717 __sysmmu_tlb_invalidate_entry(data, iova, num_inv); 718 sysmmu_unblock(data); 719 } 720 clk_disable(data->clk_master); 721 } 722 spin_unlock_irqrestore(&data->lock, flags); 723 } 724 725 static const struct iommu_ops exynos_iommu_ops; 726 727 static int exynos_sysmmu_probe(struct platform_device *pdev) 728 { 729 int irq, ret; 730 struct device *dev = &pdev->dev; 731 struct sysmmu_drvdata *data; 732 struct resource *res; 733 734 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 735 if (!data) 736 return -ENOMEM; 737 738 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 739 data->sfrbase = devm_ioremap_resource(dev, res); 740 if (IS_ERR(data->sfrbase)) 741 return PTR_ERR(data->sfrbase); 742 743 irq = platform_get_irq(pdev, 0); 744 if (irq <= 0) 745 return irq; 746 747 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, 748 dev_name(dev), data); 749 if (ret) { 750 dev_err(dev, "Unable to register handler of irq %d\n", irq); 751 return ret; 752 } 753 754 data->clk = devm_clk_get_optional(dev, "sysmmu"); 755 if (IS_ERR(data->clk)) 756 return PTR_ERR(data->clk); 757 758 data->aclk = devm_clk_get_optional(dev, "aclk"); 759 if (IS_ERR(data->aclk)) 760 return PTR_ERR(data->aclk); 761 762 data->pclk = devm_clk_get_optional(dev, "pclk"); 763 if (IS_ERR(data->pclk)) 764 return PTR_ERR(data->pclk); 765 766 if (!data->clk && (!data->aclk || !data->pclk)) { 767 dev_err(dev, "Failed to get device clock(s)!\n"); 768 return -ENOSYS; 769 } 770 771 data->clk_master = devm_clk_get_optional(dev, "master"); 772 if (IS_ERR(data->clk_master)) 773 return PTR_ERR(data->clk_master); 774 775 data->sysmmu = dev; 776 spin_lock_init(&data->lock); 777 778 __sysmmu_get_version(data); 779 780 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, 781 dev_name(data->sysmmu)); 782 if (ret) 783 return ret; 784 785 platform_set_drvdata(pdev, data); 786 787 if (PG_ENT_SHIFT < 0) { 788 if (MMU_MAJ_VER(data->version) < 5) { 789 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; 790 LV1_PROT = SYSMMU_LV1_PROT; 791 LV2_PROT = SYSMMU_LV2_PROT; 792 } else { 793 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; 794 LV1_PROT = SYSMMU_V5_LV1_PROT; 795 LV2_PROT = SYSMMU_V5_LV2_PROT; 796 } 797 } 798 799 if (MMU_MAJ_VER(data->version) >= 5) { 800 ret = dma_set_mask(dev, DMA_BIT_MASK(36)); 801 if (ret) { 802 dev_err(dev, "Unable to set DMA mask: %d\n", ret); 803 goto err_dma_set_mask; 804 } 805 } 806 807 /* 808 * use the first registered sysmmu device for performing 809 * dma mapping operations on iommu page tables (cpu cache flush) 810 */ 811 if (!dma_dev) 812 dma_dev = &pdev->dev; 813 814 pm_runtime_enable(dev); 815 816 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); 817 if (ret) 818 goto err_dma_set_mask; 819 820 return 0; 821 822 err_dma_set_mask: 823 iommu_device_sysfs_remove(&data->iommu); 824 return ret; 825 } 826 827 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) 828 { 829 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 830 struct device *master = data->master; 831 832 if (master) { 833 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 834 835 mutex_lock(&owner->rpm_lock); 836 if (data->domain) { 837 dev_dbg(data->sysmmu, "saving state\n"); 838 __sysmmu_disable(data); 839 } 840 mutex_unlock(&owner->rpm_lock); 841 } 842 return 0; 843 } 844 845 static int __maybe_unused exynos_sysmmu_resume(struct device *dev) 846 { 847 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 848 struct device *master = data->master; 849 850 if (master) { 851 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 852 853 mutex_lock(&owner->rpm_lock); 854 if (data->domain) { 855 dev_dbg(data->sysmmu, "restoring state\n"); 856 __sysmmu_enable(data); 857 } 858 mutex_unlock(&owner->rpm_lock); 859 } 860 return 0; 861 } 862 863 static const struct dev_pm_ops sysmmu_pm_ops = { 864 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL) 865 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 866 pm_runtime_force_resume) 867 }; 868 869 static const struct of_device_id sysmmu_of_match[] = { 870 { .compatible = "samsung,exynos-sysmmu", }, 871 { }, 872 }; 873 874 static struct platform_driver exynos_sysmmu_driver __refdata = { 875 .probe = exynos_sysmmu_probe, 876 .driver = { 877 .name = "exynos-sysmmu", 878 .of_match_table = sysmmu_of_match, 879 .pm = &sysmmu_pm_ops, 880 .suppress_bind_attrs = true, 881 } 882 }; 883 884 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) 885 { 886 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), 887 DMA_TO_DEVICE); 888 *ent = cpu_to_le32(val); 889 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), 890 DMA_TO_DEVICE); 891 } 892 893 static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) 894 { 895 struct exynos_iommu_domain *domain; 896 dma_addr_t handle; 897 int i; 898 899 /* Check if correct PTE offsets are initialized */ 900 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); 901 902 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 903 if (!domain) 904 return NULL; 905 906 domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K); 907 if (!domain->pgtable) 908 goto err_pgtable; 909 910 domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K); 911 if (!domain->lv2entcnt) 912 goto err_counter; 913 914 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ 915 for (i = 0; i < NUM_LV1ENTRIES; i++) 916 domain->pgtable[i] = ZERO_LV2LINK; 917 918 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, 919 DMA_TO_DEVICE); 920 /* For mapping page table entries we rely on dma == phys */ 921 BUG_ON(handle != virt_to_phys(domain->pgtable)); 922 if (dma_mapping_error(dma_dev, handle)) 923 goto err_lv2ent; 924 925 spin_lock_init(&domain->lock); 926 spin_lock_init(&domain->pgtablelock); 927 INIT_LIST_HEAD(&domain->clients); 928 929 domain->domain.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE; 930 931 domain->domain.geometry.aperture_start = 0; 932 domain->domain.geometry.aperture_end = ~0UL; 933 domain->domain.geometry.force_aperture = true; 934 935 return &domain->domain; 936 937 err_lv2ent: 938 iommu_free_pages(domain->lv2entcnt); 939 err_counter: 940 iommu_free_pages(domain->pgtable); 941 err_pgtable: 942 kfree(domain); 943 return NULL; 944 } 945 946 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) 947 { 948 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 949 struct sysmmu_drvdata *data, *next; 950 unsigned long flags; 951 int i; 952 953 WARN_ON(!list_empty(&domain->clients)); 954 955 spin_lock_irqsave(&domain->lock, flags); 956 957 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 958 spin_lock(&data->lock); 959 __sysmmu_disable(data); 960 data->pgtable = 0; 961 data->domain = NULL; 962 list_del_init(&data->domain_node); 963 spin_unlock(&data->lock); 964 } 965 966 spin_unlock_irqrestore(&domain->lock, flags); 967 968 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, 969 DMA_TO_DEVICE); 970 971 for (i = 0; i < NUM_LV1ENTRIES; i++) 972 if (lv1ent_page(domain->pgtable + i)) { 973 phys_addr_t base = lv2table_base(domain->pgtable + i); 974 975 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, 976 DMA_TO_DEVICE); 977 kmem_cache_free(lv2table_kmem_cache, 978 phys_to_virt(base)); 979 } 980 981 iommu_free_pages(domain->pgtable); 982 iommu_free_pages(domain->lv2entcnt); 983 kfree(domain); 984 } 985 986 static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain, 987 struct device *dev, 988 struct iommu_domain *old) 989 { 990 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 991 struct exynos_iommu_domain *domain; 992 phys_addr_t pagetable; 993 struct sysmmu_drvdata *data, *next; 994 unsigned long flags; 995 996 if (owner->domain == identity_domain) 997 return 0; 998 999 domain = to_exynos_domain(owner->domain); 1000 pagetable = virt_to_phys(domain->pgtable); 1001 1002 mutex_lock(&owner->rpm_lock); 1003 1004 list_for_each_entry(data, &owner->controllers, owner_node) { 1005 pm_runtime_get_noresume(data->sysmmu); 1006 if (pm_runtime_active(data->sysmmu)) 1007 __sysmmu_disable(data); 1008 pm_runtime_put(data->sysmmu); 1009 } 1010 1011 spin_lock_irqsave(&domain->lock, flags); 1012 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 1013 spin_lock(&data->lock); 1014 data->pgtable = 0; 1015 data->domain = NULL; 1016 list_del_init(&data->domain_node); 1017 spin_unlock(&data->lock); 1018 } 1019 owner->domain = identity_domain; 1020 spin_unlock_irqrestore(&domain->lock, flags); 1021 1022 mutex_unlock(&owner->rpm_lock); 1023 1024 dev_dbg(dev, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n", 1025 __func__, &pagetable); 1026 return 0; 1027 } 1028 1029 static struct iommu_domain_ops exynos_identity_ops = { 1030 .attach_dev = exynos_iommu_identity_attach, 1031 }; 1032 1033 static struct iommu_domain exynos_identity_domain = { 1034 .type = IOMMU_DOMAIN_IDENTITY, 1035 .ops = &exynos_identity_ops, 1036 }; 1037 1038 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, 1039 struct device *dev, 1040 struct iommu_domain *old) 1041 { 1042 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1043 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1044 struct sysmmu_drvdata *data; 1045 phys_addr_t pagetable = virt_to_phys(domain->pgtable); 1046 unsigned long flags; 1047 int err; 1048 1049 err = exynos_iommu_identity_attach(&exynos_identity_domain, dev, old); 1050 if (err) 1051 return err; 1052 1053 mutex_lock(&owner->rpm_lock); 1054 1055 spin_lock_irqsave(&domain->lock, flags); 1056 list_for_each_entry(data, &owner->controllers, owner_node) { 1057 spin_lock(&data->lock); 1058 data->pgtable = pagetable; 1059 data->domain = domain; 1060 list_add_tail(&data->domain_node, &domain->clients); 1061 spin_unlock(&data->lock); 1062 } 1063 owner->domain = iommu_domain; 1064 spin_unlock_irqrestore(&domain->lock, flags); 1065 1066 list_for_each_entry(data, &owner->controllers, owner_node) { 1067 pm_runtime_get_noresume(data->sysmmu); 1068 if (pm_runtime_active(data->sysmmu)) 1069 __sysmmu_enable(data); 1070 pm_runtime_put(data->sysmmu); 1071 } 1072 1073 mutex_unlock(&owner->rpm_lock); 1074 1075 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__, 1076 &pagetable); 1077 1078 return 0; 1079 } 1080 1081 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, 1082 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) 1083 { 1084 if (lv1ent_section(sent)) { 1085 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); 1086 return ERR_PTR(-EADDRINUSE); 1087 } 1088 1089 if (lv1ent_fault(sent)) { 1090 dma_addr_t handle; 1091 sysmmu_pte_t *pent; 1092 bool need_flush_flpd_cache = lv1ent_zero(sent); 1093 1094 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); 1095 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); 1096 if (!pent) 1097 return ERR_PTR(-ENOMEM); 1098 1099 exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); 1100 kmemleak_ignore(pent); 1101 *pgcounter = NUM_LV2ENTRIES; 1102 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, 1103 DMA_TO_DEVICE); 1104 if (dma_mapping_error(dma_dev, handle)) { 1105 kmem_cache_free(lv2table_kmem_cache, pent); 1106 return ERR_PTR(-EADDRINUSE); 1107 } 1108 1109 /* 1110 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 1111 * FLPD cache may cache the address of zero_l2_table. This 1112 * function replaces the zero_l2_table with new L2 page table 1113 * to write valid mappings. 1114 * Accessing the valid area may cause page fault since FLPD 1115 * cache may still cache zero_l2_table for the valid area 1116 * instead of new L2 page table that has the mapping 1117 * information of the valid area. 1118 * Thus any replacement of zero_l2_table with other valid L2 1119 * page table must involve FLPD cache invalidation for System 1120 * MMU v3.3. 1121 * FLPD cache invalidation is performed with TLB invalidation 1122 * by VPN without blocking. It is safe to invalidate TLB without 1123 * blocking because the target address of TLB invalidation is 1124 * not currently mapped. 1125 */ 1126 if (need_flush_flpd_cache) { 1127 struct sysmmu_drvdata *data; 1128 1129 spin_lock(&domain->lock); 1130 list_for_each_entry(data, &domain->clients, domain_node) 1131 sysmmu_tlb_invalidate_flpdcache(data, iova); 1132 spin_unlock(&domain->lock); 1133 } 1134 } 1135 1136 return page_entry(sent, iova); 1137 } 1138 1139 static int lv1set_section(struct exynos_iommu_domain *domain, 1140 sysmmu_pte_t *sent, sysmmu_iova_t iova, 1141 phys_addr_t paddr, int prot, short *pgcnt) 1142 { 1143 if (lv1ent_section(sent)) { 1144 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 1145 iova); 1146 return -EADDRINUSE; 1147 } 1148 1149 if (lv1ent_page(sent)) { 1150 if (*pgcnt != NUM_LV2ENTRIES) { 1151 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 1152 iova); 1153 return -EADDRINUSE; 1154 } 1155 1156 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); 1157 *pgcnt = 0; 1158 } 1159 1160 exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot)); 1161 1162 spin_lock(&domain->lock); 1163 if (lv1ent_page_zero(sent)) { 1164 struct sysmmu_drvdata *data; 1165 /* 1166 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD 1167 * entry by speculative prefetch of SLPD which has no mapping. 1168 */ 1169 list_for_each_entry(data, &domain->clients, domain_node) 1170 sysmmu_tlb_invalidate_flpdcache(data, iova); 1171 } 1172 spin_unlock(&domain->lock); 1173 1174 return 0; 1175 } 1176 1177 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, 1178 int prot, short *pgcnt) 1179 { 1180 if (size == SPAGE_SIZE) { 1181 if (WARN_ON(!lv2ent_fault(pent))) 1182 return -EADDRINUSE; 1183 1184 exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot)); 1185 *pgcnt -= 1; 1186 } else { /* size == LPAGE_SIZE */ 1187 int i; 1188 dma_addr_t pent_base = virt_to_phys(pent); 1189 1190 dma_sync_single_for_cpu(dma_dev, pent_base, 1191 sizeof(*pent) * SPAGES_PER_LPAGE, 1192 DMA_TO_DEVICE); 1193 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { 1194 if (WARN_ON(!lv2ent_fault(pent))) { 1195 if (i > 0) 1196 memset(pent - i, 0, sizeof(*pent) * i); 1197 return -EADDRINUSE; 1198 } 1199 1200 *pent = mk_lv2ent_lpage(paddr, prot); 1201 } 1202 dma_sync_single_for_device(dma_dev, pent_base, 1203 sizeof(*pent) * SPAGES_PER_LPAGE, 1204 DMA_TO_DEVICE); 1205 *pgcnt -= SPAGES_PER_LPAGE; 1206 } 1207 1208 return 0; 1209 } 1210 1211 /* 1212 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: 1213 * 1214 * System MMU v3.x has advanced logic to improve address translation 1215 * performance with caching more page table entries by a page table walk. 1216 * However, the logic has a bug that while caching faulty page table entries, 1217 * System MMU reports page fault if the cached fault entry is hit even though 1218 * the fault entry is updated to a valid entry after the entry is cached. 1219 * To prevent caching faulty page table entries which may be updated to valid 1220 * entries later, the virtual memory manager should care about the workaround 1221 * for the problem. The following describes the workaround. 1222 * 1223 * Any two consecutive I/O virtual address regions must have a hole of 128KiB 1224 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). 1225 * 1226 * Precisely, any start address of I/O virtual region must be aligned with 1227 * the following sizes for System MMU v3.1 and v3.2. 1228 * System MMU v3.1: 128KiB 1229 * System MMU v3.2: 256KiB 1230 * 1231 * Because System MMU v3.3 caches page table entries more aggressively, it needs 1232 * more workarounds. 1233 * - Any two consecutive I/O virtual regions must have a hole of size larger 1234 * than or equal to 128KiB. 1235 * - Start address of an I/O virtual region must be aligned by 128KiB. 1236 */ 1237 static int exynos_iommu_map(struct iommu_domain *iommu_domain, 1238 unsigned long l_iova, phys_addr_t paddr, size_t size, 1239 size_t count, int prot, gfp_t gfp, size_t *mapped) 1240 { 1241 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1242 sysmmu_pte_t *entry; 1243 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1244 unsigned long flags; 1245 int ret = -ENOMEM; 1246 1247 BUG_ON(domain->pgtable == NULL); 1248 prot &= SYSMMU_SUPPORTED_PROT_BITS; 1249 1250 spin_lock_irqsave(&domain->pgtablelock, flags); 1251 1252 entry = section_entry(domain->pgtable, iova); 1253 1254 if (size == SECT_SIZE) { 1255 ret = lv1set_section(domain, entry, iova, paddr, prot, 1256 &domain->lv2entcnt[lv1ent_offset(iova)]); 1257 } else { 1258 sysmmu_pte_t *pent; 1259 1260 pent = alloc_lv2entry(domain, entry, iova, 1261 &domain->lv2entcnt[lv1ent_offset(iova)]); 1262 1263 if (IS_ERR(pent)) 1264 ret = PTR_ERR(pent); 1265 else 1266 ret = lv2set_page(pent, paddr, size, prot, 1267 &domain->lv2entcnt[lv1ent_offset(iova)]); 1268 } 1269 1270 if (ret) 1271 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", 1272 __func__, ret, size, iova); 1273 else 1274 *mapped = size; 1275 1276 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1277 1278 return ret; 1279 } 1280 1281 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, 1282 sysmmu_iova_t iova, size_t size) 1283 { 1284 struct sysmmu_drvdata *data; 1285 unsigned long flags; 1286 1287 spin_lock_irqsave(&domain->lock, flags); 1288 1289 list_for_each_entry(data, &domain->clients, domain_node) 1290 sysmmu_tlb_invalidate_entry(data, iova, size); 1291 1292 spin_unlock_irqrestore(&domain->lock, flags); 1293 } 1294 1295 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, 1296 unsigned long l_iova, size_t size, size_t count, 1297 struct iommu_iotlb_gather *gather) 1298 { 1299 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1300 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1301 sysmmu_pte_t *ent; 1302 size_t err_pgsize; 1303 unsigned long flags; 1304 1305 BUG_ON(domain->pgtable == NULL); 1306 1307 spin_lock_irqsave(&domain->pgtablelock, flags); 1308 1309 ent = section_entry(domain->pgtable, iova); 1310 1311 if (lv1ent_section(ent)) { 1312 if (WARN_ON(size < SECT_SIZE)) { 1313 err_pgsize = SECT_SIZE; 1314 goto err; 1315 } 1316 1317 /* workaround for h/w bug in System MMU v3.3 */ 1318 exynos_iommu_set_pte(ent, ZERO_LV2LINK); 1319 size = SECT_SIZE; 1320 goto done; 1321 } 1322 1323 if (unlikely(lv1ent_fault(ent))) { 1324 if (size > SECT_SIZE) 1325 size = SECT_SIZE; 1326 goto done; 1327 } 1328 1329 /* lv1ent_page(sent) == true here */ 1330 1331 ent = page_entry(ent, iova); 1332 1333 if (unlikely(lv2ent_fault(ent))) { 1334 size = SPAGE_SIZE; 1335 goto done; 1336 } 1337 1338 if (lv2ent_small(ent)) { 1339 exynos_iommu_set_pte(ent, 0); 1340 size = SPAGE_SIZE; 1341 domain->lv2entcnt[lv1ent_offset(iova)] += 1; 1342 goto done; 1343 } 1344 1345 /* lv1ent_large(ent) == true here */ 1346 if (WARN_ON(size < LPAGE_SIZE)) { 1347 err_pgsize = LPAGE_SIZE; 1348 goto err; 1349 } 1350 1351 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), 1352 sizeof(*ent) * SPAGES_PER_LPAGE, 1353 DMA_TO_DEVICE); 1354 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); 1355 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), 1356 sizeof(*ent) * SPAGES_PER_LPAGE, 1357 DMA_TO_DEVICE); 1358 size = LPAGE_SIZE; 1359 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; 1360 done: 1361 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1362 1363 exynos_iommu_tlb_invalidate_entry(domain, iova, size); 1364 1365 return size; 1366 err: 1367 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1368 1369 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", 1370 __func__, size, iova, err_pgsize); 1371 1372 return 0; 1373 } 1374 1375 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, 1376 dma_addr_t iova) 1377 { 1378 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1379 sysmmu_pte_t *entry; 1380 unsigned long flags; 1381 phys_addr_t phys = 0; 1382 1383 spin_lock_irqsave(&domain->pgtablelock, flags); 1384 1385 entry = section_entry(domain->pgtable, iova); 1386 1387 if (lv1ent_section(entry)) { 1388 phys = section_phys(entry) + section_offs(iova); 1389 } else if (lv1ent_page(entry)) { 1390 entry = page_entry(entry, iova); 1391 1392 if (lv2ent_large(entry)) 1393 phys = lpage_phys(entry) + lpage_offs(iova); 1394 else if (lv2ent_small(entry)) 1395 phys = spage_phys(entry) + spage_offs(iova); 1396 } 1397 1398 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1399 1400 return phys; 1401 } 1402 1403 static struct iommu_device *exynos_iommu_probe_device(struct device *dev) 1404 { 1405 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1406 struct sysmmu_drvdata *data; 1407 1408 if (!has_sysmmu(dev)) 1409 return ERR_PTR(-ENODEV); 1410 1411 list_for_each_entry(data, &owner->controllers, owner_node) { 1412 /* 1413 * SYSMMU will be runtime activated via device link 1414 * (dependency) to its master device, so there are no 1415 * direct calls to pm_runtime_get/put in this driver. 1416 */ 1417 data->link = device_link_add(dev, data->sysmmu, 1418 DL_FLAG_STATELESS | 1419 DL_FLAG_PM_RUNTIME); 1420 } 1421 1422 /* There is always at least one entry, see exynos_iommu_of_xlate() */ 1423 data = list_first_entry(&owner->controllers, 1424 struct sysmmu_drvdata, owner_node); 1425 1426 return &data->iommu; 1427 } 1428 1429 static void exynos_iommu_release_device(struct device *dev) 1430 { 1431 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1432 struct sysmmu_drvdata *data; 1433 1434 list_for_each_entry(data, &owner->controllers, owner_node) 1435 device_link_del(data->link); 1436 } 1437 1438 static int exynos_iommu_of_xlate(struct device *dev, 1439 const struct of_phandle_args *spec) 1440 { 1441 struct platform_device *sysmmu = of_find_device_by_node(spec->np); 1442 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1443 struct sysmmu_drvdata *data, *entry; 1444 1445 if (!sysmmu) 1446 return -ENODEV; 1447 1448 data = platform_get_drvdata(sysmmu); 1449 put_device(&sysmmu->dev); 1450 if (!data) 1451 return -ENODEV; 1452 1453 if (!owner) { 1454 owner = kzalloc(sizeof(*owner), GFP_KERNEL); 1455 if (!owner) 1456 return -ENOMEM; 1457 1458 INIT_LIST_HEAD(&owner->controllers); 1459 mutex_init(&owner->rpm_lock); 1460 owner->domain = &exynos_identity_domain; 1461 dev_iommu_priv_set(dev, owner); 1462 } 1463 1464 list_for_each_entry(entry, &owner->controllers, owner_node) 1465 if (entry == data) 1466 return 0; 1467 1468 list_add_tail(&data->owner_node, &owner->controllers); 1469 data->master = dev; 1470 1471 return 0; 1472 } 1473 1474 static const struct iommu_ops exynos_iommu_ops = { 1475 .identity_domain = &exynos_identity_domain, 1476 .release_domain = &exynos_identity_domain, 1477 .domain_alloc_paging = exynos_iommu_domain_alloc_paging, 1478 .device_group = generic_device_group, 1479 .probe_device = exynos_iommu_probe_device, 1480 .release_device = exynos_iommu_release_device, 1481 .get_resv_regions = iommu_dma_get_resv_regions, 1482 .of_xlate = exynos_iommu_of_xlate, 1483 .default_domain_ops = &(const struct iommu_domain_ops) { 1484 .attach_dev = exynos_iommu_attach_device, 1485 .map_pages = exynos_iommu_map, 1486 .unmap_pages = exynos_iommu_unmap, 1487 .iova_to_phys = exynos_iommu_iova_to_phys, 1488 .free = exynos_iommu_domain_free, 1489 } 1490 }; 1491 1492 static int __init exynos_iommu_init(void) 1493 { 1494 struct device_node *np; 1495 int ret; 1496 1497 np = of_find_matching_node(NULL, sysmmu_of_match); 1498 if (!np) 1499 return 0; 1500 1501 of_node_put(np); 1502 1503 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1504 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1505 if (!lv2table_kmem_cache) { 1506 pr_err("%s: Failed to create kmem cache\n", __func__); 1507 return -ENOMEM; 1508 } 1509 1510 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); 1511 if (zero_lv2_table == NULL) { 1512 pr_err("%s: Failed to allocate zero level2 page table\n", 1513 __func__); 1514 ret = -ENOMEM; 1515 goto err_zero_lv2; 1516 } 1517 1518 ret = platform_driver_register(&exynos_sysmmu_driver); 1519 if (ret) { 1520 pr_err("%s: Failed to register driver\n", __func__); 1521 goto err_reg_driver; 1522 } 1523 1524 return 0; 1525 err_reg_driver: 1526 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); 1527 err_zero_lv2: 1528 kmem_cache_destroy(lv2table_kmem_cache); 1529 return ret; 1530 } 1531 core_initcall(exynos_iommu_init); 1532