1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 // Copyright (C) 2016-2018, Allwinner Technology CO., LTD. 3 // Copyright (C) 2019-2020, Cerno 4 5 #include <linux/bitfield.h> 6 #include <linux/bug.h> 7 #include <linux/clk.h> 8 #include <linux/device.h> 9 #include <linux/dma-direction.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/err.h> 12 #include <linux/errno.h> 13 #include <linux/interrupt.h> 14 #include <linux/iommu.h> 15 #include <linux/iopoll.h> 16 #include <linux/ioport.h> 17 #include <linux/log2.h> 18 #include <linux/module.h> 19 #include <linux/of_platform.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/reset.h> 24 #include <linux/sizes.h> 25 #include <linux/slab.h> 26 #include <linux/spinlock.h> 27 #include <linux/types.h> 28 29 #define IOMMU_RESET_REG 0x010 30 #define IOMMU_RESET_RELEASE_ALL 0xffffffff 31 #define IOMMU_ENABLE_REG 0x020 32 #define IOMMU_ENABLE_ENABLE BIT(0) 33 34 #define IOMMU_BYPASS_REG 0x030 35 #define IOMMU_AUTO_GATING_REG 0x040 36 #define IOMMU_AUTO_GATING_ENABLE BIT(0) 37 38 #define IOMMU_WBUF_CTRL_REG 0x044 39 #define IOMMU_OOO_CTRL_REG 0x048 40 #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c 41 #define IOMMU_TTB_REG 0x050 42 #define IOMMU_TLB_ENABLE_REG 0x060 43 #define IOMMU_TLB_PREFETCH_REG 0x070 44 #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m) 45 46 #define IOMMU_TLB_FLUSH_REG 0x080 47 #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17) 48 #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16) 49 #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0)) 50 51 #define IOMMU_TLB_IVLD_ADDR_REG 0x090 52 #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094 53 #define IOMMU_TLB_IVLD_ENABLE_REG 0x098 54 #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0) 55 56 #define IOMMU_PC_IVLD_ADDR_REG 0x0a0 57 #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8 58 #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0) 59 60 #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4) 61 #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2))) 62 #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1)) 63 64 #define IOMMU_DM_AUT_OVWT_REG 0x0d0 65 #define IOMMU_INT_ENABLE_REG 0x100 66 #define IOMMU_INT_CLR_REG 0x104 67 #define IOMMU_INT_STA_REG 0x108 68 #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4) 69 #define IOMMU_INT_ERR_ADDR_L1_REG 0x130 70 #define IOMMU_INT_ERR_ADDR_L2_REG 0x134 71 #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4) 72 #define IOMMU_L1PG_INT_REG 0x0180 73 #define IOMMU_L2PG_INT_REG 0x0184 74 75 #define IOMMU_INT_INVALID_L2PG BIT(17) 76 #define IOMMU_INT_INVALID_L1PG BIT(16) 77 #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m) 78 #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \ 79 IOMMU_INT_MASTER_PERMISSION(1) | \ 80 IOMMU_INT_MASTER_PERMISSION(2) | \ 81 IOMMU_INT_MASTER_PERMISSION(3) | \ 82 IOMMU_INT_MASTER_PERMISSION(4) | \ 83 IOMMU_INT_MASTER_PERMISSION(5)) 84 #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \ 85 IOMMU_INT_INVALID_L2PG | \ 86 IOMMU_INT_MASTER_MASK) 87 88 #define PT_ENTRY_SIZE sizeof(u32) 89 90 #define NUM_DT_ENTRIES 4096 91 #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE) 92 93 #define NUM_PT_ENTRIES 256 94 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE) 95 96 #define SPAGE_SIZE 4096 97 98 struct sun50i_iommu { 99 struct iommu_device iommu; 100 101 /* Lock to modify the IOMMU registers */ 102 spinlock_t iommu_lock; 103 104 struct device *dev; 105 void __iomem *base; 106 struct reset_control *reset; 107 struct clk *clk; 108 109 struct iommu_domain *domain; 110 struct kmem_cache *pt_pool; 111 }; 112 113 struct sun50i_iommu_domain { 114 struct iommu_domain domain; 115 116 /* Number of devices attached to the domain */ 117 refcount_t refcnt; 118 119 /* L1 Page Table */ 120 u32 *dt; 121 dma_addr_t dt_dma; 122 123 struct sun50i_iommu *iommu; 124 }; 125 126 static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain) 127 { 128 return container_of(domain, struct sun50i_iommu_domain, domain); 129 } 130 131 static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev) 132 { 133 return dev_iommu_priv_get(dev); 134 } 135 136 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) 137 { 138 return readl(iommu->base + offset); 139 } 140 141 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) 142 { 143 writel(value, iommu->base + offset); 144 } 145 146 /* 147 * The Allwinner H6 IOMMU uses a 2-level page table. 148 * 149 * The first level is the usual Directory Table (DT), that consists of 150 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page 151 * Table (PT). 152 * 153 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each 154 * pointing to a 4kB page of physical memory. 155 * 156 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG 157 * register that contains its physical address. 158 */ 159 160 #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20) 161 #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12) 162 #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0) 163 164 static u32 sun50i_iova_get_dte_index(dma_addr_t iova) 165 { 166 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova); 167 } 168 169 static u32 sun50i_iova_get_pte_index(dma_addr_t iova) 170 { 171 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova); 172 } 173 174 static u32 sun50i_iova_get_page_offset(dma_addr_t iova) 175 { 176 return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova); 177 } 178 179 /* 180 * Each Directory Table Entry has a Page Table address and a valid 181 * bit: 182 183 * +---------------------+-----------+-+ 184 * | PT address | Reserved |V| 185 * +---------------------+-----------+-+ 186 * 31:10 - Page Table address 187 * 9:2 - Reserved 188 * 1:0 - 1 if the entry is valid 189 */ 190 191 #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10) 192 #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0) 193 #define SUN50I_DTE_PT_VALID 1 194 195 static phys_addr_t sun50i_dte_get_pt_address(u32 dte) 196 { 197 return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK; 198 } 199 200 static bool sun50i_dte_is_pt_valid(u32 dte) 201 { 202 return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID; 203 } 204 205 static u32 sun50i_mk_dte(dma_addr_t pt_dma) 206 { 207 return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID; 208 } 209 210 /* 211 * Each PTE has a Page address, an authority index and a valid bit: 212 * 213 * +----------------+-----+-----+-----+---+-----+ 214 * | Page address | Rsv | ACI | Rsv | V | Rsv | 215 * +----------------+-----+-----+-----+---+-----+ 216 * 31:12 - Page address 217 * 11:8 - Reserved 218 * 7:4 - Authority Control Index 219 * 3:2 - Reserved 220 * 1 - 1 if the entry is valid 221 * 0 - Reserved 222 * 223 * The way permissions work is that the IOMMU has 16 "domains" that 224 * can be configured to give each masters either read or write 225 * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain 226 * 0 seems like the default domain, and its permissions in the 227 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really 228 * useful to enforce any particular permission. 229 * 230 * Each page entry will then have a reference to the domain they are 231 * affected to, so that we can actually enforce them on a per-page 232 * basis. 233 * 234 * In order to make it work with the IOMMU framework, we will be using 235 * 4 different domains, starting at 1: RD_WR, RD, WR and NONE 236 * depending on the permission we want to enforce. Each domain will 237 * have each master setup in the same way, since the IOMMU framework 238 * doesn't seem to restrict page access on a per-device basis. And 239 * then we will use the relevant domain index when generating the page 240 * table entry depending on the permissions we want to be enforced. 241 */ 242 243 enum sun50i_iommu_aci { 244 SUN50I_IOMMU_ACI_DO_NOT_USE = 0, 245 SUN50I_IOMMU_ACI_NONE, 246 SUN50I_IOMMU_ACI_RD, 247 SUN50I_IOMMU_ACI_WR, 248 SUN50I_IOMMU_ACI_RD_WR, 249 }; 250 251 #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12) 252 #define SUN50I_PTE_ACI_MASK GENMASK(7, 4) 253 #define SUN50I_PTE_PAGE_VALID BIT(1) 254 255 static phys_addr_t sun50i_pte_get_page_address(u32 pte) 256 { 257 return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK; 258 } 259 260 static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte) 261 { 262 return FIELD_GET(SUN50I_PTE_ACI_MASK, pte); 263 } 264 265 static bool sun50i_pte_is_page_valid(u32 pte) 266 { 267 return pte & SUN50I_PTE_PAGE_VALID; 268 } 269 270 static u32 sun50i_mk_pte(phys_addr_t page, int prot) 271 { 272 enum sun50i_iommu_aci aci; 273 u32 flags = 0; 274 275 if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE)) 276 aci = SUN50I_IOMMU_ACI_RD_WR; 277 else if (prot & IOMMU_READ) 278 aci = SUN50I_IOMMU_ACI_RD; 279 else if (prot & IOMMU_WRITE) 280 aci = SUN50I_IOMMU_ACI_WR; 281 else 282 aci = SUN50I_IOMMU_ACI_NONE; 283 284 flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci); 285 page &= SUN50I_PTE_PAGE_ADDRESS_MASK; 286 return page | flags | SUN50I_PTE_PAGE_VALID; 287 } 288 289 static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain, 290 void *vaddr, unsigned int count) 291 { 292 struct sun50i_iommu *iommu = sun50i_domain->iommu; 293 dma_addr_t dma = virt_to_phys(vaddr); 294 size_t size = count * PT_ENTRY_SIZE; 295 296 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); 297 } 298 299 static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, 300 unsigned long iova) 301 { 302 u32 reg; 303 int ret; 304 305 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova); 306 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12)); 307 iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 308 IOMMU_TLB_IVLD_ENABLE_ENABLE); 309 310 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG, 311 reg, !reg, 1, 2000); 312 if (ret) 313 dev_warn(iommu->dev, "TLB invalidation timed out!\n"); 314 } 315 316 static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu, 317 unsigned long iova) 318 { 319 u32 reg; 320 int ret; 321 322 iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova); 323 iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 324 IOMMU_PC_IVLD_ENABLE_ENABLE); 325 326 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG, 327 reg, !reg, 1, 2000); 328 if (ret) 329 dev_warn(iommu->dev, "PTW cache invalidation timed out!\n"); 330 } 331 332 static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu, 333 unsigned long iova, size_t size) 334 { 335 assert_spin_locked(&iommu->iommu_lock); 336 337 iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0); 338 339 sun50i_iommu_zap_iova(iommu, iova); 340 sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE); 341 if (size > SPAGE_SIZE) { 342 sun50i_iommu_zap_iova(iommu, iova + size); 343 sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE); 344 } 345 sun50i_iommu_zap_ptw_cache(iommu, iova); 346 sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M); 347 if (size > SZ_1M) { 348 sun50i_iommu_zap_ptw_cache(iommu, iova + size); 349 sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M); 350 } 351 352 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); 353 } 354 355 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) 356 { 357 u32 reg; 358 int ret; 359 360 assert_spin_locked(&iommu->iommu_lock); 361 362 iommu_write(iommu, 363 IOMMU_TLB_FLUSH_REG, 364 IOMMU_TLB_FLUSH_PTW_CACHE | 365 IOMMU_TLB_FLUSH_MACRO_TLB | 366 IOMMU_TLB_FLUSH_MICRO_TLB(5) | 367 IOMMU_TLB_FLUSH_MICRO_TLB(4) | 368 IOMMU_TLB_FLUSH_MICRO_TLB(3) | 369 IOMMU_TLB_FLUSH_MICRO_TLB(2) | 370 IOMMU_TLB_FLUSH_MICRO_TLB(1) | 371 IOMMU_TLB_FLUSH_MICRO_TLB(0)); 372 373 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG, 374 reg, !reg, 375 1, 2000); 376 if (ret) 377 dev_warn(iommu->dev, "TLB Flush timed out!\n"); 378 379 return ret; 380 } 381 382 static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain) 383 { 384 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 385 struct sun50i_iommu *iommu = sun50i_domain->iommu; 386 unsigned long flags; 387 388 /* 389 * At boot, we'll have a first call into .flush_iotlb_all right after 390 * .probe_device, and since we link our (single) domain to our iommu in 391 * the .attach_device callback, we don't have that pointer set. 392 * 393 * It shouldn't really be any trouble to ignore it though since we flush 394 * all caches as part of the device powerup. 395 */ 396 if (!iommu) 397 return; 398 399 spin_lock_irqsave(&iommu->iommu_lock, flags); 400 sun50i_iommu_flush_all_tlb(iommu); 401 spin_unlock_irqrestore(&iommu->iommu_lock, flags); 402 } 403 404 static int sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain, 405 unsigned long iova, size_t size) 406 { 407 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 408 struct sun50i_iommu *iommu = sun50i_domain->iommu; 409 unsigned long flags; 410 411 spin_lock_irqsave(&iommu->iommu_lock, flags); 412 sun50i_iommu_zap_range(iommu, iova, size); 413 spin_unlock_irqrestore(&iommu->iommu_lock, flags); 414 415 return 0; 416 } 417 418 static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain, 419 struct iommu_iotlb_gather *gather) 420 { 421 sun50i_iommu_flush_iotlb_all(domain); 422 } 423 424 static int sun50i_iommu_enable(struct sun50i_iommu *iommu) 425 { 426 struct sun50i_iommu_domain *sun50i_domain; 427 unsigned long flags; 428 int ret; 429 430 if (!iommu->domain) 431 return 0; 432 433 sun50i_domain = to_sun50i_domain(iommu->domain); 434 435 ret = reset_control_deassert(iommu->reset); 436 if (ret) 437 return ret; 438 439 ret = clk_prepare_enable(iommu->clk); 440 if (ret) 441 goto err_reset_assert; 442 443 spin_lock_irqsave(&iommu->iommu_lock, flags); 444 445 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma); 446 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, 447 IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) | 448 IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) | 449 IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) | 450 IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) | 451 IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) | 452 IOMMU_TLB_PREFETCH_MASTER_ENABLE(5)); 453 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK); 454 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE), 455 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | 456 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | 457 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) | 458 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) | 459 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) | 460 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) | 461 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) | 462 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) | 463 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) | 464 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) | 465 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) | 466 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5)); 467 468 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD), 469 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) | 470 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) | 471 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) | 472 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) | 473 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) | 474 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5)); 475 476 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR), 477 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) | 478 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) | 479 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) | 480 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) | 481 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) | 482 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5)); 483 484 ret = sun50i_iommu_flush_all_tlb(iommu); 485 if (ret) { 486 spin_unlock_irqrestore(&iommu->iommu_lock, flags); 487 goto err_clk_disable; 488 } 489 490 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); 491 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE); 492 493 spin_unlock_irqrestore(&iommu->iommu_lock, flags); 494 495 return 0; 496 497 err_clk_disable: 498 clk_disable_unprepare(iommu->clk); 499 500 err_reset_assert: 501 reset_control_assert(iommu->reset); 502 503 return ret; 504 } 505 506 static void sun50i_iommu_disable(struct sun50i_iommu *iommu) 507 { 508 unsigned long flags; 509 510 spin_lock_irqsave(&iommu->iommu_lock, flags); 511 512 iommu_write(iommu, IOMMU_ENABLE_REG, 0); 513 iommu_write(iommu, IOMMU_TTB_REG, 0); 514 515 spin_unlock_irqrestore(&iommu->iommu_lock, flags); 516 517 clk_disable_unprepare(iommu->clk); 518 reset_control_assert(iommu->reset); 519 } 520 521 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu, 522 gfp_t gfp) 523 { 524 dma_addr_t pt_dma; 525 u32 *page_table; 526 527 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp); 528 if (!page_table) 529 return ERR_PTR(-ENOMEM); 530 531 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE); 532 if (dma_mapping_error(iommu->dev, pt_dma)) { 533 dev_err(iommu->dev, "Couldn't map L2 Page Table\n"); 534 kmem_cache_free(iommu->pt_pool, page_table); 535 return ERR_PTR(-ENOMEM); 536 } 537 538 /* We rely on the physical address and DMA address being the same */ 539 WARN_ON(pt_dma != virt_to_phys(page_table)); 540 541 return page_table; 542 } 543 544 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu, 545 u32 *page_table) 546 { 547 phys_addr_t pt_phys = virt_to_phys(page_table); 548 549 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE); 550 kmem_cache_free(iommu->pt_pool, page_table); 551 } 552 553 static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain, 554 dma_addr_t iova, gfp_t gfp) 555 { 556 struct sun50i_iommu *iommu = sun50i_domain->iommu; 557 u32 *page_table; 558 u32 *dte_addr; 559 u32 old_dte; 560 u32 dte; 561 562 dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; 563 dte = *dte_addr; 564 if (sun50i_dte_is_pt_valid(dte)) { 565 phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte); 566 return (u32 *)phys_to_virt(pt_phys); 567 } 568 569 page_table = sun50i_iommu_alloc_page_table(iommu, gfp); 570 if (IS_ERR(page_table)) 571 return page_table; 572 573 dte = sun50i_mk_dte(virt_to_phys(page_table)); 574 old_dte = cmpxchg(dte_addr, 0, dte); 575 if (old_dte) { 576 phys_addr_t installed_pt_phys = 577 sun50i_dte_get_pt_address(old_dte); 578 u32 *installed_pt = phys_to_virt(installed_pt_phys); 579 u32 *drop_pt = page_table; 580 581 page_table = installed_pt; 582 dte = old_dte; 583 sun50i_iommu_free_page_table(iommu, drop_pt); 584 } 585 586 sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES); 587 sun50i_table_flush(sun50i_domain, dte_addr, 1); 588 589 return page_table; 590 } 591 592 static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova, 593 phys_addr_t paddr, size_t size, size_t count, 594 int prot, gfp_t gfp, size_t *mapped) 595 { 596 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 597 struct sun50i_iommu *iommu = sun50i_domain->iommu; 598 u32 pte_index; 599 u32 *page_table, *pte_addr; 600 int ret = 0; 601 602 page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp); 603 if (IS_ERR(page_table)) { 604 ret = PTR_ERR(page_table); 605 goto out; 606 } 607 608 pte_index = sun50i_iova_get_pte_index(iova); 609 pte_addr = &page_table[pte_index]; 610 if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) { 611 phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr); 612 dev_err(iommu->dev, 613 "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n", 614 &iova, &page_phys, &paddr, prot); 615 ret = -EBUSY; 616 goto out; 617 } 618 619 *pte_addr = sun50i_mk_pte(paddr, prot); 620 sun50i_table_flush(sun50i_domain, pte_addr, 1); 621 *mapped = size; 622 623 out: 624 return ret; 625 } 626 627 static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova, 628 size_t size, size_t count, struct iommu_iotlb_gather *gather) 629 { 630 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 631 phys_addr_t pt_phys; 632 u32 *pte_addr; 633 u32 dte; 634 635 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; 636 if (!sun50i_dte_is_pt_valid(dte)) 637 return 0; 638 639 pt_phys = sun50i_dte_get_pt_address(dte); 640 pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova); 641 642 if (!sun50i_pte_is_page_valid(*pte_addr)) 643 return 0; 644 645 memset(pte_addr, 0, sizeof(*pte_addr)); 646 sun50i_table_flush(sun50i_domain, pte_addr, 1); 647 648 return SZ_4K; 649 } 650 651 static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain, 652 dma_addr_t iova) 653 { 654 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 655 phys_addr_t pt_phys; 656 u32 *page_table; 657 u32 dte, pte; 658 659 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; 660 if (!sun50i_dte_is_pt_valid(dte)) 661 return 0; 662 663 pt_phys = sun50i_dte_get_pt_address(dte); 664 page_table = (u32 *)phys_to_virt(pt_phys); 665 pte = page_table[sun50i_iova_get_pte_index(iova)]; 666 if (!sun50i_pte_is_page_valid(pte)) 667 return 0; 668 669 return sun50i_pte_get_page_address(pte) + 670 sun50i_iova_get_page_offset(iova); 671 } 672 673 static struct iommu_domain * 674 sun50i_iommu_domain_alloc_paging(struct device *dev) 675 { 676 struct sun50i_iommu_domain *sun50i_domain; 677 678 sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL); 679 if (!sun50i_domain) 680 return NULL; 681 682 sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 683 get_order(DT_SIZE)); 684 if (!sun50i_domain->dt) 685 goto err_free_domain; 686 687 refcount_set(&sun50i_domain->refcnt, 1); 688 689 sun50i_domain->domain.geometry.aperture_start = 0; 690 sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); 691 sun50i_domain->domain.geometry.force_aperture = true; 692 693 return &sun50i_domain->domain; 694 695 err_free_domain: 696 kfree(sun50i_domain); 697 698 return NULL; 699 } 700 701 static void sun50i_iommu_domain_free(struct iommu_domain *domain) 702 { 703 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 704 705 free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE)); 706 sun50i_domain->dt = NULL; 707 708 kfree(sun50i_domain); 709 } 710 711 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu, 712 struct sun50i_iommu_domain *sun50i_domain) 713 { 714 iommu->domain = &sun50i_domain->domain; 715 sun50i_domain->iommu = iommu; 716 717 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt, 718 DT_SIZE, DMA_TO_DEVICE); 719 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) { 720 dev_err(iommu->dev, "Couldn't map L1 Page Table\n"); 721 return -ENOMEM; 722 } 723 724 return sun50i_iommu_enable(iommu); 725 } 726 727 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu, 728 struct sun50i_iommu_domain *sun50i_domain) 729 { 730 unsigned int i; 731 732 for (i = 0; i < NUM_DT_ENTRIES; i++) { 733 phys_addr_t pt_phys; 734 u32 *page_table; 735 u32 *dte_addr; 736 u32 dte; 737 738 dte_addr = &sun50i_domain->dt[i]; 739 dte = *dte_addr; 740 if (!sun50i_dte_is_pt_valid(dte)) 741 continue; 742 743 memset(dte_addr, 0, sizeof(*dte_addr)); 744 sun50i_table_flush(sun50i_domain, dte_addr, 1); 745 746 pt_phys = sun50i_dte_get_pt_address(dte); 747 page_table = phys_to_virt(pt_phys); 748 sun50i_iommu_free_page_table(iommu, page_table); 749 } 750 751 752 sun50i_iommu_disable(iommu); 753 754 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt), 755 DT_SIZE, DMA_TO_DEVICE); 756 757 iommu->domain = NULL; 758 } 759 760 static int sun50i_iommu_identity_attach(struct iommu_domain *identity_domain, 761 struct device *dev) 762 { 763 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev); 764 struct sun50i_iommu_domain *sun50i_domain; 765 766 dev_dbg(dev, "Detaching from IOMMU domain\n"); 767 768 if (iommu->domain == identity_domain) 769 return 0; 770 771 sun50i_domain = to_sun50i_domain(iommu->domain); 772 if (refcount_dec_and_test(&sun50i_domain->refcnt)) 773 sun50i_iommu_detach_domain(iommu, sun50i_domain); 774 return 0; 775 } 776 777 static struct iommu_domain_ops sun50i_iommu_identity_ops = { 778 .attach_dev = sun50i_iommu_identity_attach, 779 }; 780 781 static struct iommu_domain sun50i_iommu_identity_domain = { 782 .type = IOMMU_DOMAIN_IDENTITY, 783 .ops = &sun50i_iommu_identity_ops, 784 }; 785 786 static int sun50i_iommu_attach_device(struct iommu_domain *domain, 787 struct device *dev) 788 { 789 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); 790 struct sun50i_iommu *iommu; 791 792 iommu = sun50i_iommu_from_dev(dev); 793 if (!iommu) 794 return -ENODEV; 795 796 dev_dbg(dev, "Attaching to IOMMU domain\n"); 797 798 refcount_inc(&sun50i_domain->refcnt); 799 800 if (iommu->domain == domain) 801 return 0; 802 803 sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev); 804 805 sun50i_iommu_attach_domain(iommu, sun50i_domain); 806 807 return 0; 808 } 809 810 static struct iommu_device *sun50i_iommu_probe_device(struct device *dev) 811 { 812 struct sun50i_iommu *iommu; 813 814 iommu = sun50i_iommu_from_dev(dev); 815 if (!iommu) 816 return ERR_PTR(-ENODEV); 817 818 return &iommu->iommu; 819 } 820 821 static int sun50i_iommu_of_xlate(struct device *dev, 822 const struct of_phandle_args *args) 823 { 824 struct platform_device *iommu_pdev = of_find_device_by_node(args->np); 825 unsigned id = args->args[0]; 826 827 dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev)); 828 829 return iommu_fwspec_add_ids(dev, &id, 1); 830 } 831 832 static const struct iommu_ops sun50i_iommu_ops = { 833 .identity_domain = &sun50i_iommu_identity_domain, 834 .pgsize_bitmap = SZ_4K, 835 .device_group = generic_single_device_group, 836 .domain_alloc_paging = sun50i_iommu_domain_alloc_paging, 837 .of_xlate = sun50i_iommu_of_xlate, 838 .probe_device = sun50i_iommu_probe_device, 839 .default_domain_ops = &(const struct iommu_domain_ops) { 840 .attach_dev = sun50i_iommu_attach_device, 841 .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, 842 .iotlb_sync_map = sun50i_iommu_iotlb_sync_map, 843 .iotlb_sync = sun50i_iommu_iotlb_sync, 844 .iova_to_phys = sun50i_iommu_iova_to_phys, 845 .map_pages = sun50i_iommu_map, 846 .unmap_pages = sun50i_iommu_unmap, 847 .free = sun50i_iommu_domain_free, 848 } 849 }; 850 851 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, 852 unsigned master, phys_addr_t iova, 853 unsigned prot) 854 { 855 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n", 856 &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd"); 857 858 if (iommu->domain) 859 report_iommu_fault(iommu->domain, iommu->dev, iova, prot); 860 else 861 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n"); 862 863 sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE); 864 } 865 866 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu, 867 unsigned addr_reg, 868 unsigned blame_reg) 869 { 870 phys_addr_t iova; 871 unsigned master; 872 u32 blame; 873 874 assert_spin_locked(&iommu->iommu_lock); 875 876 iova = iommu_read(iommu, addr_reg); 877 blame = iommu_read(iommu, blame_reg); 878 master = ilog2(blame & IOMMU_INT_MASTER_MASK); 879 880 /* 881 * If the address is not in the page table, we can't get what 882 * operation triggered the fault. Assume it's a read 883 * operation. 884 */ 885 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ); 886 887 return iova; 888 } 889 890 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) 891 { 892 enum sun50i_iommu_aci aci; 893 phys_addr_t iova; 894 unsigned master; 895 unsigned dir; 896 u32 blame; 897 898 assert_spin_locked(&iommu->iommu_lock); 899 900 blame = iommu_read(iommu, IOMMU_INT_STA_REG); 901 master = ilog2(blame & IOMMU_INT_MASTER_MASK); 902 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master)); 903 aci = sun50i_get_pte_aci(iommu_read(iommu, 904 IOMMU_INT_ERR_DATA_REG(master))); 905 906 switch (aci) { 907 /* 908 * If we are in the read-only domain, then it means we 909 * tried to write. 910 */ 911 case SUN50I_IOMMU_ACI_RD: 912 dir = IOMMU_FAULT_WRITE; 913 break; 914 915 /* 916 * If we are in the write-only domain, then it means 917 * we tried to read. 918 */ 919 case SUN50I_IOMMU_ACI_WR: 920 921 /* 922 * If we are in the domain without any permission, we 923 * can't really tell. Let's default to a read 924 * operation. 925 */ 926 case SUN50I_IOMMU_ACI_NONE: 927 928 /* WTF? */ 929 case SUN50I_IOMMU_ACI_RD_WR: 930 default: 931 dir = IOMMU_FAULT_READ; 932 break; 933 } 934 935 /* 936 * If the address is not in the page table, we can't get what 937 * operation triggered the fault. Assume it's a read 938 * operation. 939 */ 940 sun50i_iommu_report_fault(iommu, master, iova, dir); 941 942 return iova; 943 } 944 945 static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) 946 { 947 u32 status, l1_status, l2_status, resets; 948 struct sun50i_iommu *iommu = dev_id; 949 950 spin_lock(&iommu->iommu_lock); 951 952 status = iommu_read(iommu, IOMMU_INT_STA_REG); 953 if (!(status & IOMMU_INT_MASK)) { 954 spin_unlock(&iommu->iommu_lock); 955 return IRQ_NONE; 956 } 957 958 l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG); 959 l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG); 960 961 if (status & IOMMU_INT_INVALID_L2PG) 962 sun50i_iommu_handle_pt_irq(iommu, 963 IOMMU_INT_ERR_ADDR_L2_REG, 964 IOMMU_L2PG_INT_REG); 965 else if (status & IOMMU_INT_INVALID_L1PG) 966 sun50i_iommu_handle_pt_irq(iommu, 967 IOMMU_INT_ERR_ADDR_L1_REG, 968 IOMMU_L1PG_INT_REG); 969 else 970 sun50i_iommu_handle_perm_irq(iommu); 971 972 iommu_write(iommu, IOMMU_INT_CLR_REG, status); 973 974 resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK; 975 iommu_write(iommu, IOMMU_RESET_REG, ~resets); 976 iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL); 977 978 spin_unlock(&iommu->iommu_lock); 979 980 return IRQ_HANDLED; 981 } 982 983 static int sun50i_iommu_probe(struct platform_device *pdev) 984 { 985 struct sun50i_iommu *iommu; 986 int ret, irq; 987 988 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); 989 if (!iommu) 990 return -ENOMEM; 991 spin_lock_init(&iommu->iommu_lock); 992 iommu->domain = &sun50i_iommu_identity_domain; 993 platform_set_drvdata(pdev, iommu); 994 iommu->dev = &pdev->dev; 995 996 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev), 997 PT_SIZE, PT_SIZE, 998 SLAB_HWCACHE_ALIGN, 999 NULL); 1000 if (!iommu->pt_pool) 1001 return -ENOMEM; 1002 1003 iommu->base = devm_platform_ioremap_resource(pdev, 0); 1004 if (IS_ERR(iommu->base)) { 1005 ret = PTR_ERR(iommu->base); 1006 goto err_free_cache; 1007 } 1008 1009 irq = platform_get_irq(pdev, 0); 1010 if (irq < 0) { 1011 ret = irq; 1012 goto err_free_cache; 1013 } 1014 1015 iommu->clk = devm_clk_get(&pdev->dev, NULL); 1016 if (IS_ERR(iommu->clk)) { 1017 dev_err(&pdev->dev, "Couldn't get our clock.\n"); 1018 ret = PTR_ERR(iommu->clk); 1019 goto err_free_cache; 1020 } 1021 1022 iommu->reset = devm_reset_control_get(&pdev->dev, NULL); 1023 if (IS_ERR(iommu->reset)) { 1024 dev_err(&pdev->dev, "Couldn't get our reset line.\n"); 1025 ret = PTR_ERR(iommu->reset); 1026 goto err_free_cache; 1027 } 1028 1029 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev, 1030 NULL, dev_name(&pdev->dev)); 1031 if (ret) 1032 goto err_free_cache; 1033 1034 ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev); 1035 if (ret) 1036 goto err_remove_sysfs; 1037 1038 ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0, 1039 dev_name(&pdev->dev), iommu); 1040 if (ret < 0) 1041 goto err_unregister; 1042 1043 return 0; 1044 1045 err_unregister: 1046 iommu_device_unregister(&iommu->iommu); 1047 1048 err_remove_sysfs: 1049 iommu_device_sysfs_remove(&iommu->iommu); 1050 1051 err_free_cache: 1052 kmem_cache_destroy(iommu->pt_pool); 1053 1054 return ret; 1055 } 1056 1057 static const struct of_device_id sun50i_iommu_dt[] = { 1058 { .compatible = "allwinner,sun50i-h6-iommu", }, 1059 { /* sentinel */ }, 1060 }; 1061 MODULE_DEVICE_TABLE(of, sun50i_iommu_dt); 1062 1063 static struct platform_driver sun50i_iommu_driver = { 1064 .driver = { 1065 .name = "sun50i-iommu", 1066 .of_match_table = sun50i_iommu_dt, 1067 .suppress_bind_attrs = true, 1068 } 1069 }; 1070 builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe); 1071 1072 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver"); 1073 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); 1074 MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>"); 1075