1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for Renesas VMSA-compatible IPMMU 4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 5 * 6 * Copyright (C) 2014-2020 Renesas Electronics Corporation 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/err.h> 13 #include <linux/export.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/io-pgtable.h> 18 #include <linux/iommu.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/of_platform.h> 22 #include <linux/platform_device.h> 23 #include <linux/sizes.h> 24 #include <linux/slab.h> 25 #include <linux/sys_soc.h> 26 27 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 28 #include <asm/dma-iommu.h> 29 #else 30 #define arm_iommu_create_mapping(...) NULL 31 #define arm_iommu_attach_device(...) -ENODEV 32 #define arm_iommu_release_mapping(...) do {} while (0) 33 #define arm_iommu_detach_device(...) do {} while (0) 34 #endif 35 36 #define IPMMU_CTX_MAX 16U 37 #define IPMMU_CTX_INVALID -1 38 39 #define IPMMU_UTLB_MAX 64U 40 41 struct ipmmu_features { 42 bool use_ns_alias_offset; 43 bool has_cache_leaf_nodes; 44 unsigned int number_of_contexts; 45 unsigned int num_utlbs; 46 bool setup_imbuscr; 47 bool twobit_imttbcr_sl0; 48 bool reserved_context; 49 bool cache_snoop; 50 unsigned int ctx_offset_base; 51 unsigned int ctx_offset_stride; 52 unsigned int utlb_offset_base; 53 }; 54 55 struct ipmmu_vmsa_device { 56 struct device *dev; 57 void __iomem *base; 58 struct iommu_device iommu; 59 struct ipmmu_vmsa_device *root; 60 const struct ipmmu_features *features; 61 unsigned int num_ctx; 62 spinlock_t lock; /* Protects ctx and domains[] */ 63 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); 64 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; 65 s8 utlb_ctx[IPMMU_UTLB_MAX]; 66 67 struct iommu_group *group; 68 struct dma_iommu_mapping *mapping; 69 }; 70 71 struct ipmmu_vmsa_domain { 72 struct ipmmu_vmsa_device *mmu; 73 struct iommu_domain io_domain; 74 75 struct io_pgtable_cfg cfg; 76 struct io_pgtable_ops *iop; 77 78 unsigned int context_id; 79 struct mutex mutex; /* Protects mappings */ 80 }; 81 82 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) 83 { 84 return container_of(dom, struct ipmmu_vmsa_domain, io_domain); 85 } 86 87 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) 88 { 89 return dev_iommu_priv_get(dev); 90 } 91 92 #define TLB_LOOP_TIMEOUT 100 /* 100us */ 93 94 /* ----------------------------------------------------------------------------- 95 * Registers Definition 96 */ 97 98 #define IM_NS_ALIAS_OFFSET 0x800 99 100 /* MMU "context" registers */ 101 #define IMCTR 0x0000 /* R-Car Gen2/3 */ 102 #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ 103 #define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 104 #define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 105 106 #define IMTTBCR 0x0008 /* R-Car Gen2/3 */ 107 #define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ 108 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ 109 #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ 110 #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ 111 #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ 112 #define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ 113 114 #define IMBUSCR 0x000c /* R-Car Gen2 only */ 115 #define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ 116 #define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ 117 118 #define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ 119 #define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ 120 121 #define IMSTR 0x0020 /* R-Car Gen2/3 */ 122 #define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ 123 #define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ 124 #define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ 125 #define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ 126 127 #define IMMAIR0 0x0028 /* R-Car Gen2/3 */ 128 129 #define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ 130 #define IMEUAR 0x0034 /* R-Car Gen3 only */ 131 132 /* uTLB registers */ 133 #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) 134 #define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ 135 #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 136 #define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ 137 #define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 138 #define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 139 140 #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) 141 #define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ 142 #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 143 144 /* ----------------------------------------------------------------------------- 145 * Root device handling 146 */ 147 148 static struct platform_driver ipmmu_driver; 149 150 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) 151 { 152 return mmu->root == mmu; 153 } 154 155 static int __ipmmu_check_device(struct device *dev, void *data) 156 { 157 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 158 struct ipmmu_vmsa_device **rootp = data; 159 160 if (ipmmu_is_root(mmu)) 161 *rootp = mmu; 162 163 return 0; 164 } 165 166 static struct ipmmu_vmsa_device *ipmmu_find_root(void) 167 { 168 struct ipmmu_vmsa_device *root = NULL; 169 170 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, 171 __ipmmu_check_device) == 0 ? root : NULL; 172 } 173 174 /* ----------------------------------------------------------------------------- 175 * Read/Write Access 176 */ 177 178 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) 179 { 180 return ioread32(mmu->base + offset); 181 } 182 183 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, 184 u32 data) 185 { 186 iowrite32(data, mmu->base + offset); 187 } 188 189 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, 190 unsigned int context_id, unsigned int reg) 191 { 192 unsigned int base = mmu->features->ctx_offset_base; 193 194 if (context_id > 7) 195 base += 0x800 - 8 * 0x40; 196 197 return base + context_id * mmu->features->ctx_offset_stride + reg; 198 } 199 200 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, 201 unsigned int context_id, unsigned int reg) 202 { 203 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); 204 } 205 206 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, 207 unsigned int context_id, unsigned int reg, u32 data) 208 { 209 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); 210 } 211 212 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, 213 unsigned int reg) 214 { 215 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); 216 } 217 218 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, 219 unsigned int reg, u32 data) 220 { 221 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 222 } 223 224 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, 225 unsigned int reg, u32 data) 226 { 227 if (domain->mmu != domain->mmu->root) 228 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); 229 230 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 231 } 232 233 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) 234 { 235 return mmu->features->utlb_offset_base + reg; 236 } 237 238 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, 239 unsigned int utlb, u32 data) 240 { 241 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); 242 } 243 244 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, 245 unsigned int utlb, u32 data) 246 { 247 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); 248 } 249 250 /* ----------------------------------------------------------------------------- 251 * TLB and microTLB Management 252 */ 253 254 /* Wait for any pending TLB invalidations to complete */ 255 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) 256 { 257 unsigned int count = 0; 258 259 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { 260 cpu_relax(); 261 if (++count == TLB_LOOP_TIMEOUT) { 262 dev_err_ratelimited(domain->mmu->dev, 263 "TLB sync timed out -- MMU may be deadlocked\n"); 264 return; 265 } 266 udelay(1); 267 } 268 } 269 270 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) 271 { 272 u32 reg; 273 274 reg = ipmmu_ctx_read_root(domain, IMCTR); 275 reg |= IMCTR_FLUSH; 276 ipmmu_ctx_write_all(domain, IMCTR, reg); 277 278 ipmmu_tlb_sync(domain); 279 } 280 281 /* 282 * Enable MMU translation for the microTLB. 283 */ 284 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, 285 unsigned int utlb) 286 { 287 struct ipmmu_vmsa_device *mmu = domain->mmu; 288 289 /* 290 * TODO: Reference-count the microTLB as several bus masters can be 291 * connected to the same microTLB. 292 */ 293 294 /* TODO: What should we set the ASID to ? */ 295 ipmmu_imuasid_write(mmu, utlb, 0); 296 /* TODO: Do we need to flush the microTLB ? */ 297 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | 298 IMUCTR_FLUSH | IMUCTR_MMUEN); 299 mmu->utlb_ctx[utlb] = domain->context_id; 300 } 301 302 /* 303 * Disable MMU translation for the microTLB. 304 */ 305 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, 306 unsigned int utlb) 307 { 308 struct ipmmu_vmsa_device *mmu = domain->mmu; 309 310 ipmmu_imuctr_write(mmu, utlb, 0); 311 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; 312 } 313 314 static void ipmmu_tlb_flush_all(void *cookie) 315 { 316 struct ipmmu_vmsa_domain *domain = cookie; 317 318 ipmmu_tlb_invalidate(domain); 319 } 320 321 static void ipmmu_tlb_flush(unsigned long iova, size_t size, 322 size_t granule, void *cookie) 323 { 324 ipmmu_tlb_flush_all(cookie); 325 } 326 327 static const struct iommu_flush_ops ipmmu_flush_ops = { 328 .tlb_flush_all = ipmmu_tlb_flush_all, 329 .tlb_flush_walk = ipmmu_tlb_flush, 330 }; 331 332 /* ----------------------------------------------------------------------------- 333 * Domain/Context Management 334 */ 335 336 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, 337 struct ipmmu_vmsa_domain *domain) 338 { 339 unsigned long flags; 340 int ret; 341 342 spin_lock_irqsave(&mmu->lock, flags); 343 344 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); 345 if (ret != mmu->num_ctx) { 346 mmu->domains[ret] = domain; 347 set_bit(ret, mmu->ctx); 348 } else 349 ret = -EBUSY; 350 351 spin_unlock_irqrestore(&mmu->lock, flags); 352 353 return ret; 354 } 355 356 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, 357 unsigned int context_id) 358 { 359 unsigned long flags; 360 361 spin_lock_irqsave(&mmu->lock, flags); 362 363 clear_bit(context_id, mmu->ctx); 364 mmu->domains[context_id] = NULL; 365 366 spin_unlock_irqrestore(&mmu->lock, flags); 367 } 368 369 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) 370 { 371 u64 ttbr; 372 u32 tmp; 373 374 /* TTBR0 */ 375 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; 376 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); 377 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); 378 379 /* 380 * TTBCR 381 * We use long descriptors and allocate the whole 32-bit VA space to 382 * TTBR0. 383 */ 384 if (domain->mmu->features->twobit_imttbcr_sl0) 385 tmp = IMTTBCR_SL0_TWOBIT_LVL_1; 386 else 387 tmp = IMTTBCR_SL0_LVL_1; 388 389 if (domain->mmu->features->cache_snoop) 390 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | 391 IMTTBCR_IRGN0_WB_WA; 392 393 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); 394 395 /* MAIR0 */ 396 ipmmu_ctx_write_root(domain, IMMAIR0, 397 domain->cfg.arm_lpae_s1_cfg.mair); 398 399 /* IMBUSCR */ 400 if (domain->mmu->features->setup_imbuscr) 401 ipmmu_ctx_write_root(domain, IMBUSCR, 402 ipmmu_ctx_read_root(domain, IMBUSCR) & 403 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); 404 405 /* 406 * IMSTR 407 * Clear all interrupt flags. 408 */ 409 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); 410 411 /* 412 * IMCTR 413 * Enable the MMU and interrupt generation. The long-descriptor 414 * translation table format doesn't use TEX remapping. Don't enable AF 415 * software management as we have no use for it. Flush the TLB as 416 * required when modifying the context registers. 417 */ 418 ipmmu_ctx_write_all(domain, IMCTR, 419 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); 420 } 421 422 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) 423 { 424 int ret; 425 426 /* 427 * Allocate the page table operations. 428 * 429 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory 430 * access, Long-descriptor format" that the NStable bit being set in a 431 * table descriptor will result in the NStable and NS bits of all child 432 * entries being ignored and considered as being set. The IPMMU seems 433 * not to comply with this, as it generates a secure access page fault 434 * if any of the NStable and NS bits isn't set when running in 435 * non-secure mode. 436 */ 437 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; 438 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; 439 domain->cfg.ias = 32; 440 domain->cfg.oas = 40; 441 domain->cfg.tlb = &ipmmu_flush_ops; 442 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); 443 domain->io_domain.geometry.force_aperture = true; 444 /* 445 * TODO: Add support for coherent walk through CCI with DVM and remove 446 * cache handling. For now, delegate it to the io-pgtable code. 447 */ 448 domain->cfg.coherent_walk = false; 449 domain->cfg.iommu_dev = domain->mmu->root->dev; 450 451 /* 452 * Find an unused context. 453 */ 454 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); 455 if (ret < 0) 456 return ret; 457 458 domain->context_id = ret; 459 460 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, 461 domain); 462 if (!domain->iop) { 463 ipmmu_domain_free_context(domain->mmu->root, 464 domain->context_id); 465 return -EINVAL; 466 } 467 468 ipmmu_domain_setup_context(domain); 469 return 0; 470 } 471 472 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) 473 { 474 if (!domain->mmu) 475 return; 476 477 /* 478 * Disable the context. Flush the TLB as required when modifying the 479 * context registers. 480 * 481 * TODO: Is TLB flush really needed ? 482 */ 483 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); 484 ipmmu_tlb_sync(domain); 485 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); 486 } 487 488 /* ----------------------------------------------------------------------------- 489 * Fault Handling 490 */ 491 492 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) 493 { 494 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; 495 struct ipmmu_vmsa_device *mmu = domain->mmu; 496 unsigned long iova; 497 u32 status; 498 499 status = ipmmu_ctx_read_root(domain, IMSTR); 500 if (!(status & err_mask)) 501 return IRQ_NONE; 502 503 iova = ipmmu_ctx_read_root(domain, IMELAR); 504 if (IS_ENABLED(CONFIG_64BIT)) 505 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; 506 507 /* 508 * Clear the error status flags. Unlike traditional interrupt flag 509 * registers that must be cleared by writing 1, this status register 510 * seems to require 0. The error address register must be read before, 511 * otherwise its value will be 0. 512 */ 513 ipmmu_ctx_write_root(domain, IMSTR, 0); 514 515 /* Log fatal errors. */ 516 if (status & IMSTR_MHIT) 517 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", 518 iova); 519 if (status & IMSTR_ABORT) 520 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", 521 iova); 522 523 if (!(status & (IMSTR_PF | IMSTR_TF))) 524 return IRQ_NONE; 525 526 /* 527 * Try to handle page faults and translation faults. 528 * 529 * TODO: We need to look up the faulty device based on the I/O VA. Use 530 * the IOMMU device for now. 531 */ 532 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) 533 return IRQ_HANDLED; 534 535 dev_err_ratelimited(mmu->dev, 536 "Unhandled fault: status 0x%08x iova 0x%lx\n", 537 status, iova); 538 539 return IRQ_HANDLED; 540 } 541 542 static irqreturn_t ipmmu_irq(int irq, void *dev) 543 { 544 struct ipmmu_vmsa_device *mmu = dev; 545 irqreturn_t status = IRQ_NONE; 546 unsigned int i; 547 unsigned long flags; 548 549 spin_lock_irqsave(&mmu->lock, flags); 550 551 /* 552 * Check interrupts for all active contexts. 553 */ 554 for (i = 0; i < mmu->num_ctx; i++) { 555 if (!mmu->domains[i]) 556 continue; 557 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) 558 status = IRQ_HANDLED; 559 } 560 561 spin_unlock_irqrestore(&mmu->lock, flags); 562 563 return status; 564 } 565 566 /* ----------------------------------------------------------------------------- 567 * IOMMU Operations 568 */ 569 570 static struct iommu_domain *ipmmu_domain_alloc(unsigned type) 571 { 572 struct ipmmu_vmsa_domain *domain; 573 574 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) 575 return NULL; 576 577 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 578 if (!domain) 579 return NULL; 580 581 mutex_init(&domain->mutex); 582 583 return &domain->io_domain; 584 } 585 586 static void ipmmu_domain_free(struct iommu_domain *io_domain) 587 { 588 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 589 590 /* 591 * Free the domain resources. We assume that all devices have already 592 * been detached. 593 */ 594 ipmmu_domain_destroy_context(domain); 595 free_io_pgtable_ops(domain->iop); 596 kfree(domain); 597 } 598 599 static int ipmmu_attach_device(struct iommu_domain *io_domain, 600 struct device *dev) 601 { 602 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 603 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 604 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 605 unsigned int i; 606 int ret = 0; 607 608 if (!mmu) { 609 dev_err(dev, "Cannot attach to IPMMU\n"); 610 return -ENXIO; 611 } 612 613 mutex_lock(&domain->mutex); 614 615 if (!domain->mmu) { 616 /* The domain hasn't been used yet, initialize it. */ 617 domain->mmu = mmu; 618 ret = ipmmu_domain_init_context(domain); 619 if (ret < 0) { 620 dev_err(dev, "Unable to initialize IPMMU context\n"); 621 domain->mmu = NULL; 622 } else { 623 dev_info(dev, "Using IPMMU context %u\n", 624 domain->context_id); 625 } 626 } else if (domain->mmu != mmu) { 627 /* 628 * Something is wrong, we can't attach two devices using 629 * different IOMMUs to the same domain. 630 */ 631 ret = -EINVAL; 632 } else 633 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); 634 635 mutex_unlock(&domain->mutex); 636 637 if (ret < 0) 638 return ret; 639 640 for (i = 0; i < fwspec->num_ids; ++i) 641 ipmmu_utlb_enable(domain, fwspec->ids[i]); 642 643 return 0; 644 } 645 646 static void ipmmu_detach_device(struct iommu_domain *io_domain, 647 struct device *dev) 648 { 649 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 650 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 651 unsigned int i; 652 653 for (i = 0; i < fwspec->num_ids; ++i) 654 ipmmu_utlb_disable(domain, fwspec->ids[i]); 655 656 /* 657 * TODO: Optimize by disabling the context when no device is attached. 658 */ 659 } 660 661 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, 662 phys_addr_t paddr, size_t pgsize, size_t pgcount, 663 int prot, gfp_t gfp, size_t *mapped) 664 { 665 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 666 667 return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount, 668 prot, gfp, mapped); 669 } 670 671 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, 672 size_t pgsize, size_t pgcount, 673 struct iommu_iotlb_gather *gather) 674 { 675 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 676 677 return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather); 678 } 679 680 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) 681 { 682 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 683 684 if (domain->mmu) 685 ipmmu_tlb_flush_all(domain); 686 } 687 688 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, 689 struct iommu_iotlb_gather *gather) 690 { 691 ipmmu_flush_iotlb_all(io_domain); 692 } 693 694 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, 695 dma_addr_t iova) 696 { 697 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 698 699 /* TODO: Is locking needed ? */ 700 701 return domain->iop->iova_to_phys(domain->iop, iova); 702 } 703 704 static int ipmmu_init_platform_device(struct device *dev, 705 struct of_phandle_args *args) 706 { 707 struct platform_device *ipmmu_pdev; 708 709 ipmmu_pdev = of_find_device_by_node(args->np); 710 if (!ipmmu_pdev) 711 return -ENODEV; 712 713 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev)); 714 715 return 0; 716 } 717 718 static const struct soc_device_attribute soc_needs_opt_in[] = { 719 { .family = "R-Car Gen3", }, 720 { .family = "R-Car Gen4", }, 721 { .family = "RZ/G2", }, 722 { /* sentinel */ } 723 }; 724 725 static const struct soc_device_attribute soc_denylist[] = { 726 { .soc_id = "r8a774a1", }, 727 { .soc_id = "r8a7795", .revision = "ES1.*" }, 728 { .soc_id = "r8a7795", .revision = "ES2.*" }, 729 { .soc_id = "r8a7796", }, 730 { /* sentinel */ } 731 }; 732 733 static const char * const devices_allowlist[] = { 734 "ee100000.mmc", 735 "ee120000.mmc", 736 "ee140000.mmc", 737 "ee160000.mmc" 738 }; 739 740 static bool ipmmu_device_is_allowed(struct device *dev) 741 { 742 unsigned int i; 743 744 /* 745 * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices. 746 * For Other SoCs, this returns true anyway. 747 */ 748 if (!soc_device_match(soc_needs_opt_in)) 749 return true; 750 751 /* Check whether this SoC can use the IPMMU correctly or not */ 752 if (soc_device_match(soc_denylist)) 753 return false; 754 755 /* Check whether this device can work with the IPMMU */ 756 for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) { 757 if (!strcmp(dev_name(dev), devices_allowlist[i])) 758 return true; 759 } 760 761 /* Otherwise, do not allow use of IPMMU */ 762 return false; 763 } 764 765 static int ipmmu_of_xlate(struct device *dev, 766 struct of_phandle_args *spec) 767 { 768 if (!ipmmu_device_is_allowed(dev)) 769 return -ENODEV; 770 771 iommu_fwspec_add_ids(dev, spec->args, 1); 772 773 /* Initialize once - xlate() will call multiple times */ 774 if (to_ipmmu(dev)) 775 return 0; 776 777 return ipmmu_init_platform_device(dev, spec); 778 } 779 780 static int ipmmu_init_arm_mapping(struct device *dev) 781 { 782 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 783 int ret; 784 785 /* 786 * Create the ARM mapping, used by the ARM DMA mapping core to allocate 787 * VAs. This will allocate a corresponding IOMMU domain. 788 * 789 * TODO: 790 * - Create one mapping per context (TLB). 791 * - Make the mapping size configurable ? We currently use a 2GB mapping 792 * at a 1GB offset to ensure that NULL VAs will fault. 793 */ 794 if (!mmu->mapping) { 795 struct dma_iommu_mapping *mapping; 796 797 mapping = arm_iommu_create_mapping(&platform_bus_type, 798 SZ_1G, SZ_2G); 799 if (IS_ERR(mapping)) { 800 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); 801 ret = PTR_ERR(mapping); 802 goto error; 803 } 804 805 mmu->mapping = mapping; 806 } 807 808 /* Attach the ARM VA mapping to the device. */ 809 ret = arm_iommu_attach_device(dev, mmu->mapping); 810 if (ret < 0) { 811 dev_err(dev, "Failed to attach device to VA mapping\n"); 812 goto error; 813 } 814 815 return 0; 816 817 error: 818 if (mmu->mapping) 819 arm_iommu_release_mapping(mmu->mapping); 820 821 return ret; 822 } 823 824 static struct iommu_device *ipmmu_probe_device(struct device *dev) 825 { 826 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 827 828 /* 829 * Only let through devices that have been verified in xlate() 830 */ 831 if (!mmu) 832 return ERR_PTR(-ENODEV); 833 834 return &mmu->iommu; 835 } 836 837 static void ipmmu_probe_finalize(struct device *dev) 838 { 839 int ret = 0; 840 841 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) 842 ret = ipmmu_init_arm_mapping(dev); 843 844 if (ret) 845 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); 846 } 847 848 static void ipmmu_release_device(struct device *dev) 849 { 850 arm_iommu_detach_device(dev); 851 } 852 853 static struct iommu_group *ipmmu_find_group(struct device *dev) 854 { 855 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 856 struct iommu_group *group; 857 858 if (mmu->group) 859 return iommu_group_ref_get(mmu->group); 860 861 group = iommu_group_alloc(); 862 if (!IS_ERR(group)) 863 mmu->group = group; 864 865 return group; 866 } 867 868 static const struct iommu_ops ipmmu_ops = { 869 .domain_alloc = ipmmu_domain_alloc, 870 .probe_device = ipmmu_probe_device, 871 .release_device = ipmmu_release_device, 872 .probe_finalize = ipmmu_probe_finalize, 873 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA) 874 ? generic_device_group : ipmmu_find_group, 875 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, 876 .of_xlate = ipmmu_of_xlate, 877 .default_domain_ops = &(const struct iommu_domain_ops) { 878 .attach_dev = ipmmu_attach_device, 879 .detach_dev = ipmmu_detach_device, 880 .map_pages = ipmmu_map, 881 .unmap_pages = ipmmu_unmap, 882 .flush_iotlb_all = ipmmu_flush_iotlb_all, 883 .iotlb_sync = ipmmu_iotlb_sync, 884 .iova_to_phys = ipmmu_iova_to_phys, 885 .free = ipmmu_domain_free, 886 } 887 }; 888 889 /* ----------------------------------------------------------------------------- 890 * Probe/remove and init 891 */ 892 893 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) 894 { 895 unsigned int i; 896 897 /* Disable all contexts. */ 898 for (i = 0; i < mmu->num_ctx; ++i) 899 ipmmu_ctx_write(mmu, i, IMCTR, 0); 900 } 901 902 static const struct ipmmu_features ipmmu_features_default = { 903 .use_ns_alias_offset = true, 904 .has_cache_leaf_nodes = false, 905 .number_of_contexts = 1, /* software only tested with one context */ 906 .num_utlbs = 32, 907 .setup_imbuscr = true, 908 .twobit_imttbcr_sl0 = false, 909 .reserved_context = false, 910 .cache_snoop = true, 911 .ctx_offset_base = 0, 912 .ctx_offset_stride = 0x40, 913 .utlb_offset_base = 0, 914 }; 915 916 static const struct ipmmu_features ipmmu_features_rcar_gen3 = { 917 .use_ns_alias_offset = false, 918 .has_cache_leaf_nodes = true, 919 .number_of_contexts = 8, 920 .num_utlbs = 48, 921 .setup_imbuscr = false, 922 .twobit_imttbcr_sl0 = true, 923 .reserved_context = true, 924 .cache_snoop = false, 925 .ctx_offset_base = 0, 926 .ctx_offset_stride = 0x40, 927 .utlb_offset_base = 0, 928 }; 929 930 static const struct ipmmu_features ipmmu_features_rcar_gen4 = { 931 .use_ns_alias_offset = false, 932 .has_cache_leaf_nodes = true, 933 .number_of_contexts = 16, 934 .num_utlbs = 64, 935 .setup_imbuscr = false, 936 .twobit_imttbcr_sl0 = true, 937 .reserved_context = true, 938 .cache_snoop = false, 939 .ctx_offset_base = 0x10000, 940 .ctx_offset_stride = 0x1040, 941 .utlb_offset_base = 0x3000, 942 }; 943 944 static const struct of_device_id ipmmu_of_ids[] = { 945 { 946 .compatible = "renesas,ipmmu-vmsa", 947 .data = &ipmmu_features_default, 948 }, { 949 .compatible = "renesas,ipmmu-r8a774a1", 950 .data = &ipmmu_features_rcar_gen3, 951 }, { 952 .compatible = "renesas,ipmmu-r8a774b1", 953 .data = &ipmmu_features_rcar_gen3, 954 }, { 955 .compatible = "renesas,ipmmu-r8a774c0", 956 .data = &ipmmu_features_rcar_gen3, 957 }, { 958 .compatible = "renesas,ipmmu-r8a774e1", 959 .data = &ipmmu_features_rcar_gen3, 960 }, { 961 .compatible = "renesas,ipmmu-r8a7795", 962 .data = &ipmmu_features_rcar_gen3, 963 }, { 964 .compatible = "renesas,ipmmu-r8a7796", 965 .data = &ipmmu_features_rcar_gen3, 966 }, { 967 .compatible = "renesas,ipmmu-r8a77961", 968 .data = &ipmmu_features_rcar_gen3, 969 }, { 970 .compatible = "renesas,ipmmu-r8a77965", 971 .data = &ipmmu_features_rcar_gen3, 972 }, { 973 .compatible = "renesas,ipmmu-r8a77970", 974 .data = &ipmmu_features_rcar_gen3, 975 }, { 976 .compatible = "renesas,ipmmu-r8a77980", 977 .data = &ipmmu_features_rcar_gen3, 978 }, { 979 .compatible = "renesas,ipmmu-r8a77990", 980 .data = &ipmmu_features_rcar_gen3, 981 }, { 982 .compatible = "renesas,ipmmu-r8a77995", 983 .data = &ipmmu_features_rcar_gen3, 984 }, { 985 .compatible = "renesas,ipmmu-r8a779a0", 986 .data = &ipmmu_features_rcar_gen4, 987 }, { 988 .compatible = "renesas,rcar-gen4-ipmmu-vmsa", 989 .data = &ipmmu_features_rcar_gen4, 990 }, { 991 /* Terminator */ 992 }, 993 }; 994 995 static int ipmmu_probe(struct platform_device *pdev) 996 { 997 struct ipmmu_vmsa_device *mmu; 998 struct resource *res; 999 int irq; 1000 int ret; 1001 1002 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); 1003 if (!mmu) { 1004 dev_err(&pdev->dev, "cannot allocate device data\n"); 1005 return -ENOMEM; 1006 } 1007 1008 mmu->dev = &pdev->dev; 1009 spin_lock_init(&mmu->lock); 1010 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); 1011 mmu->features = of_device_get_match_data(&pdev->dev); 1012 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); 1013 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1014 if (ret) 1015 return ret; 1016 1017 /* Map I/O memory and request IRQ. */ 1018 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1019 mmu->base = devm_ioremap_resource(&pdev->dev, res); 1020 if (IS_ERR(mmu->base)) 1021 return PTR_ERR(mmu->base); 1022 1023 /* 1024 * The IPMMU has two register banks, for secure and non-secure modes. 1025 * The bank mapped at the beginning of the IPMMU address space 1026 * corresponds to the running mode of the CPU. When running in secure 1027 * mode the non-secure register bank is also available at an offset. 1028 * 1029 * Secure mode operation isn't clearly documented and is thus currently 1030 * not implemented in the driver. Furthermore, preliminary tests of 1031 * non-secure operation with the main register bank were not successful. 1032 * Offset the registers base unconditionally to point to the non-secure 1033 * alias space for now. 1034 */ 1035 if (mmu->features->use_ns_alias_offset) 1036 mmu->base += IM_NS_ALIAS_OFFSET; 1037 1038 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); 1039 1040 /* 1041 * Determine if this IPMMU instance is a root device by checking for 1042 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. 1043 */ 1044 if (!mmu->features->has_cache_leaf_nodes || 1045 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL)) 1046 mmu->root = mmu; 1047 else 1048 mmu->root = ipmmu_find_root(); 1049 1050 /* 1051 * Wait until the root device has been registered for sure. 1052 */ 1053 if (!mmu->root) 1054 return -EPROBE_DEFER; 1055 1056 /* Root devices have mandatory IRQs */ 1057 if (ipmmu_is_root(mmu)) { 1058 irq = platform_get_irq(pdev, 0); 1059 if (irq < 0) 1060 return irq; 1061 1062 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, 1063 dev_name(&pdev->dev), mmu); 1064 if (ret < 0) { 1065 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); 1066 return ret; 1067 } 1068 1069 ipmmu_device_reset(mmu); 1070 1071 if (mmu->features->reserved_context) { 1072 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n"); 1073 set_bit(0, mmu->ctx); 1074 } 1075 } 1076 1077 /* 1078 * Register the IPMMU to the IOMMU subsystem in the following cases: 1079 * - R-Car Gen2 IPMMU (all devices registered) 1080 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) 1081 */ 1082 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { 1083 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, 1084 dev_name(&pdev->dev)); 1085 if (ret) 1086 return ret; 1087 1088 ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); 1089 if (ret) 1090 return ret; 1091 } 1092 1093 /* 1094 * We can't create the ARM mapping here as it requires the bus to have 1095 * an IOMMU, which only happens when bus_set_iommu() is called in 1096 * ipmmu_init() after the probe function returns. 1097 */ 1098 1099 platform_set_drvdata(pdev, mmu); 1100 1101 return 0; 1102 } 1103 1104 static int ipmmu_remove(struct platform_device *pdev) 1105 { 1106 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); 1107 1108 iommu_device_sysfs_remove(&mmu->iommu); 1109 iommu_device_unregister(&mmu->iommu); 1110 1111 arm_iommu_release_mapping(mmu->mapping); 1112 1113 ipmmu_device_reset(mmu); 1114 1115 return 0; 1116 } 1117 1118 #ifdef CONFIG_PM_SLEEP 1119 static int ipmmu_resume_noirq(struct device *dev) 1120 { 1121 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 1122 unsigned int i; 1123 1124 /* Reset root MMU and restore contexts */ 1125 if (ipmmu_is_root(mmu)) { 1126 ipmmu_device_reset(mmu); 1127 1128 for (i = 0; i < mmu->num_ctx; i++) { 1129 if (!mmu->domains[i]) 1130 continue; 1131 1132 ipmmu_domain_setup_context(mmu->domains[i]); 1133 } 1134 } 1135 1136 /* Re-enable active micro-TLBs */ 1137 for (i = 0; i < mmu->features->num_utlbs; i++) { 1138 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) 1139 continue; 1140 1141 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); 1142 } 1143 1144 return 0; 1145 } 1146 1147 static const struct dev_pm_ops ipmmu_pm = { 1148 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) 1149 }; 1150 #define DEV_PM_OPS &ipmmu_pm 1151 #else 1152 #define DEV_PM_OPS NULL 1153 #endif /* CONFIG_PM_SLEEP */ 1154 1155 static struct platform_driver ipmmu_driver = { 1156 .driver = { 1157 .name = "ipmmu-vmsa", 1158 .of_match_table = of_match_ptr(ipmmu_of_ids), 1159 .pm = DEV_PM_OPS, 1160 }, 1161 .probe = ipmmu_probe, 1162 .remove = ipmmu_remove, 1163 }; 1164 builtin_platform_driver(ipmmu_driver); 1165