1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for Renesas VMSA-compatible IPMMU 4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 5 * 6 * Copyright (C) 2014-2020 Renesas Electronics Corporation 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/delay.h> 11 #include <linux/dma-iommu.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/io-pgtable.h> 19 #include <linux/iommu.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/of_iommu.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/sizes.h> 26 #include <linux/slab.h> 27 #include <linux/sys_soc.h> 28 29 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 30 #include <asm/dma-iommu.h> 31 #else 32 #define arm_iommu_create_mapping(...) NULL 33 #define arm_iommu_attach_device(...) -ENODEV 34 #define arm_iommu_release_mapping(...) do {} while (0) 35 #define arm_iommu_detach_device(...) do {} while (0) 36 #endif 37 38 #define IPMMU_CTX_MAX 8U 39 #define IPMMU_CTX_INVALID -1 40 41 #define IPMMU_UTLB_MAX 48U 42 43 struct ipmmu_features { 44 bool use_ns_alias_offset; 45 bool has_cache_leaf_nodes; 46 unsigned int number_of_contexts; 47 unsigned int num_utlbs; 48 bool setup_imbuscr; 49 bool twobit_imttbcr_sl0; 50 bool reserved_context; 51 bool cache_snoop; 52 unsigned int ctx_offset_base; 53 unsigned int ctx_offset_stride; 54 unsigned int utlb_offset_base; 55 }; 56 57 struct ipmmu_vmsa_device { 58 struct device *dev; 59 void __iomem *base; 60 struct iommu_device iommu; 61 struct ipmmu_vmsa_device *root; 62 const struct ipmmu_features *features; 63 unsigned int num_ctx; 64 spinlock_t lock; /* Protects ctx and domains[] */ 65 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); 66 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; 67 s8 utlb_ctx[IPMMU_UTLB_MAX]; 68 69 struct iommu_group *group; 70 struct dma_iommu_mapping *mapping; 71 }; 72 73 struct ipmmu_vmsa_domain { 74 struct ipmmu_vmsa_device *mmu; 75 struct iommu_domain io_domain; 76 77 struct io_pgtable_cfg cfg; 78 struct io_pgtable_ops *iop; 79 80 unsigned int context_id; 81 struct mutex mutex; /* Protects mappings */ 82 }; 83 84 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) 85 { 86 return container_of(dom, struct ipmmu_vmsa_domain, io_domain); 87 } 88 89 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) 90 { 91 return dev_iommu_priv_get(dev); 92 } 93 94 #define TLB_LOOP_TIMEOUT 100 /* 100us */ 95 96 /* ----------------------------------------------------------------------------- 97 * Registers Definition 98 */ 99 100 #define IM_NS_ALIAS_OFFSET 0x800 101 102 /* MMU "context" registers */ 103 #define IMCTR 0x0000 /* R-Car Gen2/3 */ 104 #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ 105 #define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 106 #define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 107 108 #define IMTTBCR 0x0008 /* R-Car Gen2/3 */ 109 #define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ 110 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ 111 #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ 112 #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ 113 #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ 114 #define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ 115 116 #define IMBUSCR 0x000c /* R-Car Gen2 only */ 117 #define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ 118 #define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ 119 120 #define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ 121 #define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ 122 123 #define IMSTR 0x0020 /* R-Car Gen2/3 */ 124 #define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ 125 #define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ 126 #define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ 127 #define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ 128 129 #define IMMAIR0 0x0028 /* R-Car Gen2/3 */ 130 131 #define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ 132 #define IMEUAR 0x0034 /* R-Car Gen3 only */ 133 134 /* uTLB registers */ 135 #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) 136 #define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ 137 #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 138 #define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ 139 #define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 140 #define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 141 142 #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) 143 #define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ 144 #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 145 146 /* ----------------------------------------------------------------------------- 147 * Root device handling 148 */ 149 150 static struct platform_driver ipmmu_driver; 151 152 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) 153 { 154 return mmu->root == mmu; 155 } 156 157 static int __ipmmu_check_device(struct device *dev, void *data) 158 { 159 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 160 struct ipmmu_vmsa_device **rootp = data; 161 162 if (ipmmu_is_root(mmu)) 163 *rootp = mmu; 164 165 return 0; 166 } 167 168 static struct ipmmu_vmsa_device *ipmmu_find_root(void) 169 { 170 struct ipmmu_vmsa_device *root = NULL; 171 172 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, 173 __ipmmu_check_device) == 0 ? root : NULL; 174 } 175 176 /* ----------------------------------------------------------------------------- 177 * Read/Write Access 178 */ 179 180 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) 181 { 182 return ioread32(mmu->base + offset); 183 } 184 185 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, 186 u32 data) 187 { 188 iowrite32(data, mmu->base + offset); 189 } 190 191 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, 192 unsigned int context_id, unsigned int reg) 193 { 194 return mmu->features->ctx_offset_base + 195 context_id * mmu->features->ctx_offset_stride + reg; 196 } 197 198 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, 199 unsigned int context_id, unsigned int reg) 200 { 201 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); 202 } 203 204 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, 205 unsigned int context_id, unsigned int reg, u32 data) 206 { 207 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); 208 } 209 210 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, 211 unsigned int reg) 212 { 213 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); 214 } 215 216 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, 217 unsigned int reg, u32 data) 218 { 219 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 220 } 221 222 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, 223 unsigned int reg, u32 data) 224 { 225 if (domain->mmu != domain->mmu->root) 226 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); 227 228 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 229 } 230 231 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) 232 { 233 return mmu->features->utlb_offset_base + reg; 234 } 235 236 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, 237 unsigned int utlb, u32 data) 238 { 239 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); 240 } 241 242 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, 243 unsigned int utlb, u32 data) 244 { 245 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); 246 } 247 248 /* ----------------------------------------------------------------------------- 249 * TLB and microTLB Management 250 */ 251 252 /* Wait for any pending TLB invalidations to complete */ 253 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) 254 { 255 unsigned int count = 0; 256 257 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { 258 cpu_relax(); 259 if (++count == TLB_LOOP_TIMEOUT) { 260 dev_err_ratelimited(domain->mmu->dev, 261 "TLB sync timed out -- MMU may be deadlocked\n"); 262 return; 263 } 264 udelay(1); 265 } 266 } 267 268 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) 269 { 270 u32 reg; 271 272 reg = ipmmu_ctx_read_root(domain, IMCTR); 273 reg |= IMCTR_FLUSH; 274 ipmmu_ctx_write_all(domain, IMCTR, reg); 275 276 ipmmu_tlb_sync(domain); 277 } 278 279 /* 280 * Enable MMU translation for the microTLB. 281 */ 282 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, 283 unsigned int utlb) 284 { 285 struct ipmmu_vmsa_device *mmu = domain->mmu; 286 287 /* 288 * TODO: Reference-count the microTLB as several bus masters can be 289 * connected to the same microTLB. 290 */ 291 292 /* TODO: What should we set the ASID to ? */ 293 ipmmu_imuasid_write(mmu, utlb, 0); 294 /* TODO: Do we need to flush the microTLB ? */ 295 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | 296 IMUCTR_FLUSH | IMUCTR_MMUEN); 297 mmu->utlb_ctx[utlb] = domain->context_id; 298 } 299 300 /* 301 * Disable MMU translation for the microTLB. 302 */ 303 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, 304 unsigned int utlb) 305 { 306 struct ipmmu_vmsa_device *mmu = domain->mmu; 307 308 ipmmu_imuctr_write(mmu, utlb, 0); 309 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; 310 } 311 312 static void ipmmu_tlb_flush_all(void *cookie) 313 { 314 struct ipmmu_vmsa_domain *domain = cookie; 315 316 ipmmu_tlb_invalidate(domain); 317 } 318 319 static void ipmmu_tlb_flush(unsigned long iova, size_t size, 320 size_t granule, void *cookie) 321 { 322 ipmmu_tlb_flush_all(cookie); 323 } 324 325 static const struct iommu_flush_ops ipmmu_flush_ops = { 326 .tlb_flush_all = ipmmu_tlb_flush_all, 327 .tlb_flush_walk = ipmmu_tlb_flush, 328 }; 329 330 /* ----------------------------------------------------------------------------- 331 * Domain/Context Management 332 */ 333 334 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, 335 struct ipmmu_vmsa_domain *domain) 336 { 337 unsigned long flags; 338 int ret; 339 340 spin_lock_irqsave(&mmu->lock, flags); 341 342 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); 343 if (ret != mmu->num_ctx) { 344 mmu->domains[ret] = domain; 345 set_bit(ret, mmu->ctx); 346 } else 347 ret = -EBUSY; 348 349 spin_unlock_irqrestore(&mmu->lock, flags); 350 351 return ret; 352 } 353 354 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, 355 unsigned int context_id) 356 { 357 unsigned long flags; 358 359 spin_lock_irqsave(&mmu->lock, flags); 360 361 clear_bit(context_id, mmu->ctx); 362 mmu->domains[context_id] = NULL; 363 364 spin_unlock_irqrestore(&mmu->lock, flags); 365 } 366 367 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) 368 { 369 u64 ttbr; 370 u32 tmp; 371 372 /* TTBR0 */ 373 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; 374 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); 375 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); 376 377 /* 378 * TTBCR 379 * We use long descriptors and allocate the whole 32-bit VA space to 380 * TTBR0. 381 */ 382 if (domain->mmu->features->twobit_imttbcr_sl0) 383 tmp = IMTTBCR_SL0_TWOBIT_LVL_1; 384 else 385 tmp = IMTTBCR_SL0_LVL_1; 386 387 if (domain->mmu->features->cache_snoop) 388 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | 389 IMTTBCR_IRGN0_WB_WA; 390 391 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); 392 393 /* MAIR0 */ 394 ipmmu_ctx_write_root(domain, IMMAIR0, 395 domain->cfg.arm_lpae_s1_cfg.mair); 396 397 /* IMBUSCR */ 398 if (domain->mmu->features->setup_imbuscr) 399 ipmmu_ctx_write_root(domain, IMBUSCR, 400 ipmmu_ctx_read_root(domain, IMBUSCR) & 401 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); 402 403 /* 404 * IMSTR 405 * Clear all interrupt flags. 406 */ 407 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); 408 409 /* 410 * IMCTR 411 * Enable the MMU and interrupt generation. The long-descriptor 412 * translation table format doesn't use TEX remapping. Don't enable AF 413 * software management as we have no use for it. Flush the TLB as 414 * required when modifying the context registers. 415 */ 416 ipmmu_ctx_write_all(domain, IMCTR, 417 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); 418 } 419 420 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) 421 { 422 int ret; 423 424 /* 425 * Allocate the page table operations. 426 * 427 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory 428 * access, Long-descriptor format" that the NStable bit being set in a 429 * table descriptor will result in the NStable and NS bits of all child 430 * entries being ignored and considered as being set. The IPMMU seems 431 * not to comply with this, as it generates a secure access page fault 432 * if any of the NStable and NS bits isn't set when running in 433 * non-secure mode. 434 */ 435 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; 436 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; 437 domain->cfg.ias = 32; 438 domain->cfg.oas = 40; 439 domain->cfg.tlb = &ipmmu_flush_ops; 440 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); 441 domain->io_domain.geometry.force_aperture = true; 442 /* 443 * TODO: Add support for coherent walk through CCI with DVM and remove 444 * cache handling. For now, delegate it to the io-pgtable code. 445 */ 446 domain->cfg.coherent_walk = false; 447 domain->cfg.iommu_dev = domain->mmu->root->dev; 448 449 /* 450 * Find an unused context. 451 */ 452 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); 453 if (ret < 0) 454 return ret; 455 456 domain->context_id = ret; 457 458 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, 459 domain); 460 if (!domain->iop) { 461 ipmmu_domain_free_context(domain->mmu->root, 462 domain->context_id); 463 return -EINVAL; 464 } 465 466 ipmmu_domain_setup_context(domain); 467 return 0; 468 } 469 470 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) 471 { 472 if (!domain->mmu) 473 return; 474 475 /* 476 * Disable the context. Flush the TLB as required when modifying the 477 * context registers. 478 * 479 * TODO: Is TLB flush really needed ? 480 */ 481 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); 482 ipmmu_tlb_sync(domain); 483 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); 484 } 485 486 /* ----------------------------------------------------------------------------- 487 * Fault Handling 488 */ 489 490 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) 491 { 492 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; 493 struct ipmmu_vmsa_device *mmu = domain->mmu; 494 unsigned long iova; 495 u32 status; 496 497 status = ipmmu_ctx_read_root(domain, IMSTR); 498 if (!(status & err_mask)) 499 return IRQ_NONE; 500 501 iova = ipmmu_ctx_read_root(domain, IMELAR); 502 if (IS_ENABLED(CONFIG_64BIT)) 503 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; 504 505 /* 506 * Clear the error status flags. Unlike traditional interrupt flag 507 * registers that must be cleared by writing 1, this status register 508 * seems to require 0. The error address register must be read before, 509 * otherwise its value will be 0. 510 */ 511 ipmmu_ctx_write_root(domain, IMSTR, 0); 512 513 /* Log fatal errors. */ 514 if (status & IMSTR_MHIT) 515 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", 516 iova); 517 if (status & IMSTR_ABORT) 518 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", 519 iova); 520 521 if (!(status & (IMSTR_PF | IMSTR_TF))) 522 return IRQ_NONE; 523 524 /* 525 * Try to handle page faults and translation faults. 526 * 527 * TODO: We need to look up the faulty device based on the I/O VA. Use 528 * the IOMMU device for now. 529 */ 530 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) 531 return IRQ_HANDLED; 532 533 dev_err_ratelimited(mmu->dev, 534 "Unhandled fault: status 0x%08x iova 0x%lx\n", 535 status, iova); 536 537 return IRQ_HANDLED; 538 } 539 540 static irqreturn_t ipmmu_irq(int irq, void *dev) 541 { 542 struct ipmmu_vmsa_device *mmu = dev; 543 irqreturn_t status = IRQ_NONE; 544 unsigned int i; 545 unsigned long flags; 546 547 spin_lock_irqsave(&mmu->lock, flags); 548 549 /* 550 * Check interrupts for all active contexts. 551 */ 552 for (i = 0; i < mmu->num_ctx; i++) { 553 if (!mmu->domains[i]) 554 continue; 555 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) 556 status = IRQ_HANDLED; 557 } 558 559 spin_unlock_irqrestore(&mmu->lock, flags); 560 561 return status; 562 } 563 564 /* ----------------------------------------------------------------------------- 565 * IOMMU Operations 566 */ 567 568 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) 569 { 570 struct ipmmu_vmsa_domain *domain; 571 572 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 573 if (!domain) 574 return NULL; 575 576 mutex_init(&domain->mutex); 577 578 return &domain->io_domain; 579 } 580 581 static struct iommu_domain *ipmmu_domain_alloc(unsigned type) 582 { 583 struct iommu_domain *io_domain = NULL; 584 585 switch (type) { 586 case IOMMU_DOMAIN_UNMANAGED: 587 io_domain = __ipmmu_domain_alloc(type); 588 break; 589 590 case IOMMU_DOMAIN_DMA: 591 io_domain = __ipmmu_domain_alloc(type); 592 if (io_domain && iommu_get_dma_cookie(io_domain)) { 593 kfree(io_domain); 594 io_domain = NULL; 595 } 596 break; 597 } 598 599 return io_domain; 600 } 601 602 static void ipmmu_domain_free(struct iommu_domain *io_domain) 603 { 604 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 605 606 /* 607 * Free the domain resources. We assume that all devices have already 608 * been detached. 609 */ 610 iommu_put_dma_cookie(io_domain); 611 ipmmu_domain_destroy_context(domain); 612 free_io_pgtable_ops(domain->iop); 613 kfree(domain); 614 } 615 616 static int ipmmu_attach_device(struct iommu_domain *io_domain, 617 struct device *dev) 618 { 619 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 620 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 621 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 622 unsigned int i; 623 int ret = 0; 624 625 if (!mmu) { 626 dev_err(dev, "Cannot attach to IPMMU\n"); 627 return -ENXIO; 628 } 629 630 mutex_lock(&domain->mutex); 631 632 if (!domain->mmu) { 633 /* The domain hasn't been used yet, initialize it. */ 634 domain->mmu = mmu; 635 ret = ipmmu_domain_init_context(domain); 636 if (ret < 0) { 637 dev_err(dev, "Unable to initialize IPMMU context\n"); 638 domain->mmu = NULL; 639 } else { 640 dev_info(dev, "Using IPMMU context %u\n", 641 domain->context_id); 642 } 643 } else if (domain->mmu != mmu) { 644 /* 645 * Something is wrong, we can't attach two devices using 646 * different IOMMUs to the same domain. 647 */ 648 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", 649 dev_name(mmu->dev), dev_name(domain->mmu->dev)); 650 ret = -EINVAL; 651 } else 652 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); 653 654 mutex_unlock(&domain->mutex); 655 656 if (ret < 0) 657 return ret; 658 659 for (i = 0; i < fwspec->num_ids; ++i) 660 ipmmu_utlb_enable(domain, fwspec->ids[i]); 661 662 return 0; 663 } 664 665 static void ipmmu_detach_device(struct iommu_domain *io_domain, 666 struct device *dev) 667 { 668 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 669 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 670 unsigned int i; 671 672 for (i = 0; i < fwspec->num_ids; ++i) 673 ipmmu_utlb_disable(domain, fwspec->ids[i]); 674 675 /* 676 * TODO: Optimize by disabling the context when no device is attached. 677 */ 678 } 679 680 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, 681 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 682 { 683 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 684 685 if (!domain) 686 return -ENODEV; 687 688 return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); 689 } 690 691 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, 692 size_t size, struct iommu_iotlb_gather *gather) 693 { 694 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 695 696 return domain->iop->unmap(domain->iop, iova, size, gather); 697 } 698 699 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) 700 { 701 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 702 703 if (domain->mmu) 704 ipmmu_tlb_flush_all(domain); 705 } 706 707 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, 708 struct iommu_iotlb_gather *gather) 709 { 710 ipmmu_flush_iotlb_all(io_domain); 711 } 712 713 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, 714 dma_addr_t iova) 715 { 716 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 717 718 /* TODO: Is locking needed ? */ 719 720 return domain->iop->iova_to_phys(domain->iop, iova); 721 } 722 723 static int ipmmu_init_platform_device(struct device *dev, 724 struct of_phandle_args *args) 725 { 726 struct platform_device *ipmmu_pdev; 727 728 ipmmu_pdev = of_find_device_by_node(args->np); 729 if (!ipmmu_pdev) 730 return -ENODEV; 731 732 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev)); 733 734 return 0; 735 } 736 737 static const struct soc_device_attribute soc_needs_opt_in[] = { 738 { .family = "R-Car Gen3", }, 739 { .family = "RZ/G2", }, 740 { /* sentinel */ } 741 }; 742 743 static const struct soc_device_attribute soc_denylist[] = { 744 { .soc_id = "r8a774a1", }, 745 { .soc_id = "r8a7795", .revision = "ES1.*" }, 746 { .soc_id = "r8a7795", .revision = "ES2.*" }, 747 { .soc_id = "r8a7796", }, 748 { /* sentinel */ } 749 }; 750 751 static const char * const devices_allowlist[] = { 752 "ee100000.mmc", 753 "ee120000.mmc", 754 "ee140000.mmc", 755 "ee160000.mmc" 756 }; 757 758 static bool ipmmu_device_is_allowed(struct device *dev) 759 { 760 unsigned int i; 761 762 /* 763 * R-Car Gen3 and RZ/G2 use the allow list to opt-in devices. 764 * For Other SoCs, this returns true anyway. 765 */ 766 if (!soc_device_match(soc_needs_opt_in)) 767 return true; 768 769 /* Check whether this SoC can use the IPMMU correctly or not */ 770 if (soc_device_match(soc_denylist)) 771 return false; 772 773 /* Check whether this device can work with the IPMMU */ 774 for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) { 775 if (!strcmp(dev_name(dev), devices_allowlist[i])) 776 return true; 777 } 778 779 /* Otherwise, do not allow use of IPMMU */ 780 return false; 781 } 782 783 static int ipmmu_of_xlate(struct device *dev, 784 struct of_phandle_args *spec) 785 { 786 if (!ipmmu_device_is_allowed(dev)) 787 return -ENODEV; 788 789 iommu_fwspec_add_ids(dev, spec->args, 1); 790 791 /* Initialize once - xlate() will call multiple times */ 792 if (to_ipmmu(dev)) 793 return 0; 794 795 return ipmmu_init_platform_device(dev, spec); 796 } 797 798 static int ipmmu_init_arm_mapping(struct device *dev) 799 { 800 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 801 int ret; 802 803 /* 804 * Create the ARM mapping, used by the ARM DMA mapping core to allocate 805 * VAs. This will allocate a corresponding IOMMU domain. 806 * 807 * TODO: 808 * - Create one mapping per context (TLB). 809 * - Make the mapping size configurable ? We currently use a 2GB mapping 810 * at a 1GB offset to ensure that NULL VAs will fault. 811 */ 812 if (!mmu->mapping) { 813 struct dma_iommu_mapping *mapping; 814 815 mapping = arm_iommu_create_mapping(&platform_bus_type, 816 SZ_1G, SZ_2G); 817 if (IS_ERR(mapping)) { 818 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); 819 ret = PTR_ERR(mapping); 820 goto error; 821 } 822 823 mmu->mapping = mapping; 824 } 825 826 /* Attach the ARM VA mapping to the device. */ 827 ret = arm_iommu_attach_device(dev, mmu->mapping); 828 if (ret < 0) { 829 dev_err(dev, "Failed to attach device to VA mapping\n"); 830 goto error; 831 } 832 833 return 0; 834 835 error: 836 if (mmu->mapping) 837 arm_iommu_release_mapping(mmu->mapping); 838 839 return ret; 840 } 841 842 static struct iommu_device *ipmmu_probe_device(struct device *dev) 843 { 844 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 845 846 /* 847 * Only let through devices that have been verified in xlate() 848 */ 849 if (!mmu) 850 return ERR_PTR(-ENODEV); 851 852 return &mmu->iommu; 853 } 854 855 static void ipmmu_probe_finalize(struct device *dev) 856 { 857 int ret = 0; 858 859 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) 860 ret = ipmmu_init_arm_mapping(dev); 861 862 if (ret) 863 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); 864 } 865 866 static void ipmmu_release_device(struct device *dev) 867 { 868 arm_iommu_detach_device(dev); 869 } 870 871 static struct iommu_group *ipmmu_find_group(struct device *dev) 872 { 873 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 874 struct iommu_group *group; 875 876 if (mmu->group) 877 return iommu_group_ref_get(mmu->group); 878 879 group = iommu_group_alloc(); 880 if (!IS_ERR(group)) 881 mmu->group = group; 882 883 return group; 884 } 885 886 static const struct iommu_ops ipmmu_ops = { 887 .domain_alloc = ipmmu_domain_alloc, 888 .domain_free = ipmmu_domain_free, 889 .attach_dev = ipmmu_attach_device, 890 .detach_dev = ipmmu_detach_device, 891 .map = ipmmu_map, 892 .unmap = ipmmu_unmap, 893 .flush_iotlb_all = ipmmu_flush_iotlb_all, 894 .iotlb_sync = ipmmu_iotlb_sync, 895 .iova_to_phys = ipmmu_iova_to_phys, 896 .probe_device = ipmmu_probe_device, 897 .release_device = ipmmu_release_device, 898 .probe_finalize = ipmmu_probe_finalize, 899 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA) 900 ? generic_device_group : ipmmu_find_group, 901 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, 902 .of_xlate = ipmmu_of_xlate, 903 }; 904 905 /* ----------------------------------------------------------------------------- 906 * Probe/remove and init 907 */ 908 909 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) 910 { 911 unsigned int i; 912 913 /* Disable all contexts. */ 914 for (i = 0; i < mmu->num_ctx; ++i) 915 ipmmu_ctx_write(mmu, i, IMCTR, 0); 916 } 917 918 static const struct ipmmu_features ipmmu_features_default = { 919 .use_ns_alias_offset = true, 920 .has_cache_leaf_nodes = false, 921 .number_of_contexts = 1, /* software only tested with one context */ 922 .num_utlbs = 32, 923 .setup_imbuscr = true, 924 .twobit_imttbcr_sl0 = false, 925 .reserved_context = false, 926 .cache_snoop = true, 927 .ctx_offset_base = 0, 928 .ctx_offset_stride = 0x40, 929 .utlb_offset_base = 0, 930 }; 931 932 static const struct ipmmu_features ipmmu_features_rcar_gen3 = { 933 .use_ns_alias_offset = false, 934 .has_cache_leaf_nodes = true, 935 .number_of_contexts = 8, 936 .num_utlbs = 48, 937 .setup_imbuscr = false, 938 .twobit_imttbcr_sl0 = true, 939 .reserved_context = true, 940 .cache_snoop = false, 941 .ctx_offset_base = 0, 942 .ctx_offset_stride = 0x40, 943 .utlb_offset_base = 0, 944 }; 945 946 static const struct of_device_id ipmmu_of_ids[] = { 947 { 948 .compatible = "renesas,ipmmu-vmsa", 949 .data = &ipmmu_features_default, 950 }, { 951 .compatible = "renesas,ipmmu-r8a774a1", 952 .data = &ipmmu_features_rcar_gen3, 953 }, { 954 .compatible = "renesas,ipmmu-r8a774b1", 955 .data = &ipmmu_features_rcar_gen3, 956 }, { 957 .compatible = "renesas,ipmmu-r8a774c0", 958 .data = &ipmmu_features_rcar_gen3, 959 }, { 960 .compatible = "renesas,ipmmu-r8a774e1", 961 .data = &ipmmu_features_rcar_gen3, 962 }, { 963 .compatible = "renesas,ipmmu-r8a7795", 964 .data = &ipmmu_features_rcar_gen3, 965 }, { 966 .compatible = "renesas,ipmmu-r8a7796", 967 .data = &ipmmu_features_rcar_gen3, 968 }, { 969 .compatible = "renesas,ipmmu-r8a77961", 970 .data = &ipmmu_features_rcar_gen3, 971 }, { 972 .compatible = "renesas,ipmmu-r8a77965", 973 .data = &ipmmu_features_rcar_gen3, 974 }, { 975 .compatible = "renesas,ipmmu-r8a77970", 976 .data = &ipmmu_features_rcar_gen3, 977 }, { 978 .compatible = "renesas,ipmmu-r8a77990", 979 .data = &ipmmu_features_rcar_gen3, 980 }, { 981 .compatible = "renesas,ipmmu-r8a77995", 982 .data = &ipmmu_features_rcar_gen3, 983 }, { 984 /* Terminator */ 985 }, 986 }; 987 988 static int ipmmu_probe(struct platform_device *pdev) 989 { 990 struct ipmmu_vmsa_device *mmu; 991 struct resource *res; 992 int irq; 993 int ret; 994 995 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); 996 if (!mmu) { 997 dev_err(&pdev->dev, "cannot allocate device data\n"); 998 return -ENOMEM; 999 } 1000 1001 mmu->dev = &pdev->dev; 1002 spin_lock_init(&mmu->lock); 1003 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); 1004 mmu->features = of_device_get_match_data(&pdev->dev); 1005 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); 1006 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1007 1008 /* Map I/O memory and request IRQ. */ 1009 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1010 mmu->base = devm_ioremap_resource(&pdev->dev, res); 1011 if (IS_ERR(mmu->base)) 1012 return PTR_ERR(mmu->base); 1013 1014 /* 1015 * The IPMMU has two register banks, for secure and non-secure modes. 1016 * The bank mapped at the beginning of the IPMMU address space 1017 * corresponds to the running mode of the CPU. When running in secure 1018 * mode the non-secure register bank is also available at an offset. 1019 * 1020 * Secure mode operation isn't clearly documented and is thus currently 1021 * not implemented in the driver. Furthermore, preliminary tests of 1022 * non-secure operation with the main register bank were not successful. 1023 * Offset the registers base unconditionally to point to the non-secure 1024 * alias space for now. 1025 */ 1026 if (mmu->features->use_ns_alias_offset) 1027 mmu->base += IM_NS_ALIAS_OFFSET; 1028 1029 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); 1030 1031 /* 1032 * Determine if this IPMMU instance is a root device by checking for 1033 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. 1034 */ 1035 if (!mmu->features->has_cache_leaf_nodes || 1036 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL)) 1037 mmu->root = mmu; 1038 else 1039 mmu->root = ipmmu_find_root(); 1040 1041 /* 1042 * Wait until the root device has been registered for sure. 1043 */ 1044 if (!mmu->root) 1045 return -EPROBE_DEFER; 1046 1047 /* Root devices have mandatory IRQs */ 1048 if (ipmmu_is_root(mmu)) { 1049 irq = platform_get_irq(pdev, 0); 1050 if (irq < 0) 1051 return irq; 1052 1053 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, 1054 dev_name(&pdev->dev), mmu); 1055 if (ret < 0) { 1056 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); 1057 return ret; 1058 } 1059 1060 ipmmu_device_reset(mmu); 1061 1062 if (mmu->features->reserved_context) { 1063 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n"); 1064 set_bit(0, mmu->ctx); 1065 } 1066 } 1067 1068 /* 1069 * Register the IPMMU to the IOMMU subsystem in the following cases: 1070 * - R-Car Gen2 IPMMU (all devices registered) 1071 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) 1072 */ 1073 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { 1074 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, 1075 dev_name(&pdev->dev)); 1076 if (ret) 1077 return ret; 1078 1079 ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); 1080 if (ret) 1081 return ret; 1082 1083 #if defined(CONFIG_IOMMU_DMA) 1084 if (!iommu_present(&platform_bus_type)) 1085 bus_set_iommu(&platform_bus_type, &ipmmu_ops); 1086 #endif 1087 } 1088 1089 /* 1090 * We can't create the ARM mapping here as it requires the bus to have 1091 * an IOMMU, which only happens when bus_set_iommu() is called in 1092 * ipmmu_init() after the probe function returns. 1093 */ 1094 1095 platform_set_drvdata(pdev, mmu); 1096 1097 return 0; 1098 } 1099 1100 static int ipmmu_remove(struct platform_device *pdev) 1101 { 1102 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); 1103 1104 iommu_device_sysfs_remove(&mmu->iommu); 1105 iommu_device_unregister(&mmu->iommu); 1106 1107 arm_iommu_release_mapping(mmu->mapping); 1108 1109 ipmmu_device_reset(mmu); 1110 1111 return 0; 1112 } 1113 1114 #ifdef CONFIG_PM_SLEEP 1115 static int ipmmu_resume_noirq(struct device *dev) 1116 { 1117 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 1118 unsigned int i; 1119 1120 /* Reset root MMU and restore contexts */ 1121 if (ipmmu_is_root(mmu)) { 1122 ipmmu_device_reset(mmu); 1123 1124 for (i = 0; i < mmu->num_ctx; i++) { 1125 if (!mmu->domains[i]) 1126 continue; 1127 1128 ipmmu_domain_setup_context(mmu->domains[i]); 1129 } 1130 } 1131 1132 /* Re-enable active micro-TLBs */ 1133 for (i = 0; i < mmu->features->num_utlbs; i++) { 1134 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) 1135 continue; 1136 1137 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); 1138 } 1139 1140 return 0; 1141 } 1142 1143 static const struct dev_pm_ops ipmmu_pm = { 1144 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) 1145 }; 1146 #define DEV_PM_OPS &ipmmu_pm 1147 #else 1148 #define DEV_PM_OPS NULL 1149 #endif /* CONFIG_PM_SLEEP */ 1150 1151 static struct platform_driver ipmmu_driver = { 1152 .driver = { 1153 .name = "ipmmu-vmsa", 1154 .of_match_table = of_match_ptr(ipmmu_of_ids), 1155 .pm = DEV_PM_OPS, 1156 }, 1157 .probe = ipmmu_probe, 1158 .remove = ipmmu_remove, 1159 }; 1160 1161 static int __init ipmmu_init(void) 1162 { 1163 struct device_node *np; 1164 static bool setup_done; 1165 int ret; 1166 1167 if (setup_done) 1168 return 0; 1169 1170 np = of_find_matching_node(NULL, ipmmu_of_ids); 1171 if (!np) 1172 return 0; 1173 1174 of_node_put(np); 1175 1176 ret = platform_driver_register(&ipmmu_driver); 1177 if (ret < 0) 1178 return ret; 1179 1180 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 1181 if (!iommu_present(&platform_bus_type)) 1182 bus_set_iommu(&platform_bus_type, &ipmmu_ops); 1183 #endif 1184 1185 setup_done = true; 1186 return 0; 1187 } 1188 subsys_initcall(ipmmu_init); 1189