1c68a2921SDaniel Kurtz /* 2c68a2921SDaniel Kurtz * This program is free software; you can redistribute it and/or modify 3c68a2921SDaniel Kurtz * it under the terms of the GNU General Public License version 2 as 4c68a2921SDaniel Kurtz * published by the Free Software Foundation. 5c68a2921SDaniel Kurtz */ 6c68a2921SDaniel Kurtz 7f2e3a5f5STomasz Figa #include <linux/clk.h> 8c68a2921SDaniel Kurtz #include <linux/compiler.h> 9c68a2921SDaniel Kurtz #include <linux/delay.h> 10c68a2921SDaniel Kurtz #include <linux/device.h> 114f0aba67SShunqian Zheng #include <linux/dma-iommu.h> 12461a6946SJoerg Roedel #include <linux/dma-mapping.h> 13c68a2921SDaniel Kurtz #include <linux/errno.h> 14c68a2921SDaniel Kurtz #include <linux/interrupt.h> 15c68a2921SDaniel Kurtz #include <linux/io.h> 16c68a2921SDaniel Kurtz #include <linux/iommu.h> 170416bf64STomasz Figa #include <linux/iopoll.h> 18c68a2921SDaniel Kurtz #include <linux/list.h> 19c68a2921SDaniel Kurtz #include <linux/mm.h> 20c68a2921SDaniel Kurtz #include <linux/module.h> 21c68a2921SDaniel Kurtz #include <linux/of.h> 225fd577c3SJeffy Chen #include <linux/of_iommu.h> 23c68a2921SDaniel Kurtz #include <linux/of_platform.h> 24c68a2921SDaniel Kurtz #include <linux/platform_device.h> 250f181d3cSJeffy Chen #include <linux/pm_runtime.h> 26c68a2921SDaniel Kurtz #include <linux/slab.h> 27c68a2921SDaniel Kurtz #include <linux/spinlock.h> 28c68a2921SDaniel Kurtz 29c68a2921SDaniel Kurtz /** MMU register offsets */ 30c68a2921SDaniel Kurtz #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ 31c68a2921SDaniel Kurtz #define RK_MMU_STATUS 0x04 32c68a2921SDaniel Kurtz #define RK_MMU_COMMAND 0x08 33c68a2921SDaniel Kurtz #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ 34c68a2921SDaniel Kurtz #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ 35c68a2921SDaniel Kurtz #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ 36c68a2921SDaniel Kurtz #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ 37c68a2921SDaniel Kurtz #define RK_MMU_INT_MASK 0x1C /* IRQ enable */ 38c68a2921SDaniel Kurtz #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ 39c68a2921SDaniel Kurtz #define RK_MMU_AUTO_GATING 0x24 40c68a2921SDaniel Kurtz 41c68a2921SDaniel Kurtz #define DTE_ADDR_DUMMY 0xCAFEBABE 420416bf64STomasz Figa 430416bf64STomasz Figa #define RK_MMU_POLL_PERIOD_US 100 440416bf64STomasz Figa #define RK_MMU_FORCE_RESET_TIMEOUT_US 100000 450416bf64STomasz Figa #define RK_MMU_POLL_TIMEOUT_US 1000 46c68a2921SDaniel Kurtz 47c68a2921SDaniel Kurtz /* RK_MMU_STATUS fields */ 48c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) 49c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) 50c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_ACTIVE BIT(2) 51c68a2921SDaniel Kurtz #define RK_MMU_STATUS_IDLE BIT(3) 52c68a2921SDaniel Kurtz #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) 53c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) 54c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) 55c68a2921SDaniel Kurtz 56c68a2921SDaniel Kurtz /* RK_MMU_COMMAND command values */ 57c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ 58c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ 59c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ 60c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ 61c68a2921SDaniel Kurtz #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ 62c68a2921SDaniel Kurtz #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ 63c68a2921SDaniel Kurtz #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ 64c68a2921SDaniel Kurtz 65c68a2921SDaniel Kurtz /* RK_MMU_INT_* register fields */ 66c68a2921SDaniel Kurtz #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ 67c68a2921SDaniel Kurtz #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ 68c68a2921SDaniel Kurtz #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) 69c68a2921SDaniel Kurtz 70c68a2921SDaniel Kurtz #define NUM_DT_ENTRIES 1024 71c68a2921SDaniel Kurtz #define NUM_PT_ENTRIES 1024 72c68a2921SDaniel Kurtz 73c68a2921SDaniel Kurtz #define SPAGE_ORDER 12 74c68a2921SDaniel Kurtz #define SPAGE_SIZE (1 << SPAGE_ORDER) 75c68a2921SDaniel Kurtz 76c68a2921SDaniel Kurtz /* 77c68a2921SDaniel Kurtz * Support mapping any size that fits in one page table: 78c68a2921SDaniel Kurtz * 4 KiB to 4 MiB 79c68a2921SDaniel Kurtz */ 80c68a2921SDaniel Kurtz #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 81c68a2921SDaniel Kurtz 82c68a2921SDaniel Kurtz struct rk_iommu_domain { 83c68a2921SDaniel Kurtz struct list_head iommus; 84c68a2921SDaniel Kurtz u32 *dt; /* page directory table */ 854f0aba67SShunqian Zheng dma_addr_t dt_dma; 86c68a2921SDaniel Kurtz spinlock_t iommus_lock; /* lock for iommus list */ 87c68a2921SDaniel Kurtz spinlock_t dt_lock; /* lock for modifying page directory table */ 88bcd516a3SJoerg Roedel 89bcd516a3SJoerg Roedel struct iommu_domain domain; 90c68a2921SDaniel Kurtz }; 91c68a2921SDaniel Kurtz 92f2e3a5f5STomasz Figa /* list of clocks required by IOMMU */ 93f2e3a5f5STomasz Figa static const char * const rk_iommu_clocks[] = { 94f2e3a5f5STomasz Figa "aclk", "iface", 95f2e3a5f5STomasz Figa }; 96f2e3a5f5STomasz Figa 97c68a2921SDaniel Kurtz struct rk_iommu { 98c68a2921SDaniel Kurtz struct device *dev; 99cd6438c5SZhengShunQian void __iomem **bases; 100cd6438c5SZhengShunQian int num_mmu; 101f2e3a5f5STomasz Figa struct clk_bulk_data *clocks; 102f2e3a5f5STomasz Figa int num_clocks; 103c3aa4742SSimon Xue bool reset_disabled; 104c9d9f239SJoerg Roedel struct iommu_device iommu; 105c68a2921SDaniel Kurtz struct list_head node; /* entry in rk_iommu_domain.iommus */ 106c68a2921SDaniel Kurtz struct iommu_domain *domain; /* domain to which iommu is attached */ 10757c26957SJeffy Chen struct iommu_group *group; 108c68a2921SDaniel Kurtz }; 109c68a2921SDaniel Kurtz 1105fd577c3SJeffy Chen struct rk_iommudata { 1110f181d3cSJeffy Chen struct device_link *link; /* runtime PM link from IOMMU to master */ 1125fd577c3SJeffy Chen struct rk_iommu *iommu; 1135fd577c3SJeffy Chen }; 1145fd577c3SJeffy Chen 1159176a303SJeffy Chen static struct device *dma_dev; 1169176a303SJeffy Chen 1174f0aba67SShunqian Zheng static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, 1184f0aba67SShunqian Zheng unsigned int count) 119c68a2921SDaniel Kurtz { 1204f0aba67SShunqian Zheng size_t size = count * sizeof(u32); /* count of u32 entry */ 121c68a2921SDaniel Kurtz 1229176a303SJeffy Chen dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE); 123c68a2921SDaniel Kurtz } 124c68a2921SDaniel Kurtz 125bcd516a3SJoerg Roedel static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) 126bcd516a3SJoerg Roedel { 127bcd516a3SJoerg Roedel return container_of(dom, struct rk_iommu_domain, domain); 128bcd516a3SJoerg Roedel } 129bcd516a3SJoerg Roedel 130c68a2921SDaniel Kurtz /* 131c68a2921SDaniel Kurtz * The Rockchip rk3288 iommu uses a 2-level page table. 132c68a2921SDaniel Kurtz * The first level is the "Directory Table" (DT). 133c68a2921SDaniel Kurtz * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing 134c68a2921SDaniel Kurtz * to a "Page Table". 135c68a2921SDaniel Kurtz * The second level is the 1024 Page Tables (PT). 136c68a2921SDaniel Kurtz * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to 137c68a2921SDaniel Kurtz * a 4 KB page of physical memory. 138c68a2921SDaniel Kurtz * 139c68a2921SDaniel Kurtz * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). 140c68a2921SDaniel Kurtz * Each iommu device has a MMU_DTE_ADDR register that contains the physical 141c68a2921SDaniel Kurtz * address of the start of the DT page. 142c68a2921SDaniel Kurtz * 143c68a2921SDaniel Kurtz * The structure of the page table is as follows: 144c68a2921SDaniel Kurtz * 145c68a2921SDaniel Kurtz * DT 146c68a2921SDaniel Kurtz * MMU_DTE_ADDR -> +-----+ 147c68a2921SDaniel Kurtz * | | 148c68a2921SDaniel Kurtz * +-----+ PT 149c68a2921SDaniel Kurtz * | DTE | -> +-----+ 150c68a2921SDaniel Kurtz * +-----+ | | Memory 151c68a2921SDaniel Kurtz * | | +-----+ Page 152c68a2921SDaniel Kurtz * | | | PTE | -> +-----+ 153c68a2921SDaniel Kurtz * +-----+ +-----+ | | 154c68a2921SDaniel Kurtz * | | | | 155c68a2921SDaniel Kurtz * | | | | 156c68a2921SDaniel Kurtz * +-----+ | | 157c68a2921SDaniel Kurtz * | | 158c68a2921SDaniel Kurtz * | | 159c68a2921SDaniel Kurtz * +-----+ 160c68a2921SDaniel Kurtz */ 161c68a2921SDaniel Kurtz 162c68a2921SDaniel Kurtz /* 163c68a2921SDaniel Kurtz * Each DTE has a PT address and a valid bit: 164c68a2921SDaniel Kurtz * +---------------------+-----------+-+ 165c68a2921SDaniel Kurtz * | PT address | Reserved |V| 166c68a2921SDaniel Kurtz * +---------------------+-----------+-+ 167c68a2921SDaniel Kurtz * 31:12 - PT address (PTs always starts on a 4 KB boundary) 168c68a2921SDaniel Kurtz * 11: 1 - Reserved 169c68a2921SDaniel Kurtz * 0 - 1 if PT @ PT address is valid 170c68a2921SDaniel Kurtz */ 171c68a2921SDaniel Kurtz #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 172c68a2921SDaniel Kurtz #define RK_DTE_PT_VALID BIT(0) 173c68a2921SDaniel Kurtz 174c68a2921SDaniel Kurtz static inline phys_addr_t rk_dte_pt_address(u32 dte) 175c68a2921SDaniel Kurtz { 176c68a2921SDaniel Kurtz return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; 177c68a2921SDaniel Kurtz } 178c68a2921SDaniel Kurtz 179c68a2921SDaniel Kurtz static inline bool rk_dte_is_pt_valid(u32 dte) 180c68a2921SDaniel Kurtz { 181c68a2921SDaniel Kurtz return dte & RK_DTE_PT_VALID; 182c68a2921SDaniel Kurtz } 183c68a2921SDaniel Kurtz 1844f0aba67SShunqian Zheng static inline u32 rk_mk_dte(dma_addr_t pt_dma) 185c68a2921SDaniel Kurtz { 1864f0aba67SShunqian Zheng return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; 187c68a2921SDaniel Kurtz } 188c68a2921SDaniel Kurtz 189c68a2921SDaniel Kurtz /* 190c68a2921SDaniel Kurtz * Each PTE has a Page address, some flags and a valid bit: 191c68a2921SDaniel Kurtz * +---------------------+---+-------+-+ 192c68a2921SDaniel Kurtz * | Page address |Rsv| Flags |V| 193c68a2921SDaniel Kurtz * +---------------------+---+-------+-+ 194c68a2921SDaniel Kurtz * 31:12 - Page address (Pages always start on a 4 KB boundary) 195c68a2921SDaniel Kurtz * 11: 9 - Reserved 196c68a2921SDaniel Kurtz * 8: 1 - Flags 197c68a2921SDaniel Kurtz * 8 - Read allocate - allocate cache space on read misses 198c68a2921SDaniel Kurtz * 7 - Read cache - enable cache & prefetch of data 199c68a2921SDaniel Kurtz * 6 - Write buffer - enable delaying writes on their way to memory 200c68a2921SDaniel Kurtz * 5 - Write allocate - allocate cache space on write misses 201c68a2921SDaniel Kurtz * 4 - Write cache - different writes can be merged together 202c68a2921SDaniel Kurtz * 3 - Override cache attributes 203c68a2921SDaniel Kurtz * if 1, bits 4-8 control cache attributes 204c68a2921SDaniel Kurtz * if 0, the system bus defaults are used 205c68a2921SDaniel Kurtz * 2 - Writable 206c68a2921SDaniel Kurtz * 1 - Readable 207c68a2921SDaniel Kurtz * 0 - 1 if Page @ Page address is valid 208c68a2921SDaniel Kurtz */ 209c68a2921SDaniel Kurtz #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 210c68a2921SDaniel Kurtz #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe 211c68a2921SDaniel Kurtz #define RK_PTE_PAGE_WRITABLE BIT(2) 212c68a2921SDaniel Kurtz #define RK_PTE_PAGE_READABLE BIT(1) 213c68a2921SDaniel Kurtz #define RK_PTE_PAGE_VALID BIT(0) 214c68a2921SDaniel Kurtz 215c68a2921SDaniel Kurtz static inline phys_addr_t rk_pte_page_address(u32 pte) 216c68a2921SDaniel Kurtz { 217c68a2921SDaniel Kurtz return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; 218c68a2921SDaniel Kurtz } 219c68a2921SDaniel Kurtz 220c68a2921SDaniel Kurtz static inline bool rk_pte_is_page_valid(u32 pte) 221c68a2921SDaniel Kurtz { 222c68a2921SDaniel Kurtz return pte & RK_PTE_PAGE_VALID; 223c68a2921SDaniel Kurtz } 224c68a2921SDaniel Kurtz 225c68a2921SDaniel Kurtz /* TODO: set cache flags per prot IOMMU_CACHE */ 226c68a2921SDaniel Kurtz static u32 rk_mk_pte(phys_addr_t page, int prot) 227c68a2921SDaniel Kurtz { 228c68a2921SDaniel Kurtz u32 flags = 0; 229c68a2921SDaniel Kurtz flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; 230c68a2921SDaniel Kurtz flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; 231c68a2921SDaniel Kurtz page &= RK_PTE_PAGE_ADDRESS_MASK; 232c68a2921SDaniel Kurtz return page | flags | RK_PTE_PAGE_VALID; 233c68a2921SDaniel Kurtz } 234c68a2921SDaniel Kurtz 235c68a2921SDaniel Kurtz static u32 rk_mk_pte_invalid(u32 pte) 236c68a2921SDaniel Kurtz { 237c68a2921SDaniel Kurtz return pte & ~RK_PTE_PAGE_VALID; 238c68a2921SDaniel Kurtz } 239c68a2921SDaniel Kurtz 240c68a2921SDaniel Kurtz /* 241c68a2921SDaniel Kurtz * rk3288 iova (IOMMU Virtual Address) format 242c68a2921SDaniel Kurtz * 31 22.21 12.11 0 243c68a2921SDaniel Kurtz * +-----------+-----------+-------------+ 244c68a2921SDaniel Kurtz * | DTE index | PTE index | Page offset | 245c68a2921SDaniel Kurtz * +-----------+-----------+-------------+ 246c68a2921SDaniel Kurtz * 31:22 - DTE index - index of DTE in DT 247c68a2921SDaniel Kurtz * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address 248c68a2921SDaniel Kurtz * 11: 0 - Page offset - offset into page @ PTE.page_address 249c68a2921SDaniel Kurtz */ 250c68a2921SDaniel Kurtz #define RK_IOVA_DTE_MASK 0xffc00000 251c68a2921SDaniel Kurtz #define RK_IOVA_DTE_SHIFT 22 252c68a2921SDaniel Kurtz #define RK_IOVA_PTE_MASK 0x003ff000 253c68a2921SDaniel Kurtz #define RK_IOVA_PTE_SHIFT 12 254c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_MASK 0x00000fff 255c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_SHIFT 0 256c68a2921SDaniel Kurtz 257c68a2921SDaniel Kurtz static u32 rk_iova_dte_index(dma_addr_t iova) 258c68a2921SDaniel Kurtz { 259c68a2921SDaniel Kurtz return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; 260c68a2921SDaniel Kurtz } 261c68a2921SDaniel Kurtz 262c68a2921SDaniel Kurtz static u32 rk_iova_pte_index(dma_addr_t iova) 263c68a2921SDaniel Kurtz { 264c68a2921SDaniel Kurtz return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; 265c68a2921SDaniel Kurtz } 266c68a2921SDaniel Kurtz 267c68a2921SDaniel Kurtz static u32 rk_iova_page_offset(dma_addr_t iova) 268c68a2921SDaniel Kurtz { 269c68a2921SDaniel Kurtz return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; 270c68a2921SDaniel Kurtz } 271c68a2921SDaniel Kurtz 272cd6438c5SZhengShunQian static u32 rk_iommu_read(void __iomem *base, u32 offset) 273c68a2921SDaniel Kurtz { 274cd6438c5SZhengShunQian return readl(base + offset); 275c68a2921SDaniel Kurtz } 276c68a2921SDaniel Kurtz 277cd6438c5SZhengShunQian static void rk_iommu_write(void __iomem *base, u32 offset, u32 value) 278c68a2921SDaniel Kurtz { 279cd6438c5SZhengShunQian writel(value, base + offset); 280c68a2921SDaniel Kurtz } 281c68a2921SDaniel Kurtz 282c68a2921SDaniel Kurtz static void rk_iommu_command(struct rk_iommu *iommu, u32 command) 283c68a2921SDaniel Kurtz { 284cd6438c5SZhengShunQian int i; 285cd6438c5SZhengShunQian 286cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) 287cd6438c5SZhengShunQian writel(command, iommu->bases[i] + RK_MMU_COMMAND); 288c68a2921SDaniel Kurtz } 289c68a2921SDaniel Kurtz 290cd6438c5SZhengShunQian static void rk_iommu_base_command(void __iomem *base, u32 command) 291cd6438c5SZhengShunQian { 292cd6438c5SZhengShunQian writel(command, base + RK_MMU_COMMAND); 293cd6438c5SZhengShunQian } 294bf2a5e71STomasz Figa static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, 295c68a2921SDaniel Kurtz size_t size) 296c68a2921SDaniel Kurtz { 297cd6438c5SZhengShunQian int i; 298bf2a5e71STomasz Figa dma_addr_t iova_end = iova_start + size; 299c68a2921SDaniel Kurtz /* 300c68a2921SDaniel Kurtz * TODO(djkurtz): Figure out when it is more efficient to shootdown the 301c68a2921SDaniel Kurtz * entire iotlb rather than iterate over individual iovas. 302c68a2921SDaniel Kurtz */ 303bf2a5e71STomasz Figa for (i = 0; i < iommu->num_mmu; i++) { 304bf2a5e71STomasz Figa dma_addr_t iova; 305bf2a5e71STomasz Figa 306bf2a5e71STomasz Figa for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) 307cd6438c5SZhengShunQian rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); 308c68a2921SDaniel Kurtz } 309bf2a5e71STomasz Figa } 310c68a2921SDaniel Kurtz 311c68a2921SDaniel Kurtz static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) 312c68a2921SDaniel Kurtz { 313cd6438c5SZhengShunQian bool active = true; 314cd6438c5SZhengShunQian int i; 315cd6438c5SZhengShunQian 316cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) 317fbedd9b9SJohn Keeping active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 318fbedd9b9SJohn Keeping RK_MMU_STATUS_STALL_ACTIVE); 319cd6438c5SZhengShunQian 320cd6438c5SZhengShunQian return active; 321c68a2921SDaniel Kurtz } 322c68a2921SDaniel Kurtz 323c68a2921SDaniel Kurtz static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) 324c68a2921SDaniel Kurtz { 325cd6438c5SZhengShunQian bool enable = true; 326cd6438c5SZhengShunQian int i; 327cd6438c5SZhengShunQian 328cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) 329fbedd9b9SJohn Keeping enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 330fbedd9b9SJohn Keeping RK_MMU_STATUS_PAGING_ENABLED); 331cd6438c5SZhengShunQian 332cd6438c5SZhengShunQian return enable; 333c68a2921SDaniel Kurtz } 334c68a2921SDaniel Kurtz 3350416bf64STomasz Figa static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) 3360416bf64STomasz Figa { 3370416bf64STomasz Figa bool done = true; 3380416bf64STomasz Figa int i; 3390416bf64STomasz Figa 3400416bf64STomasz Figa for (i = 0; i < iommu->num_mmu; i++) 3410416bf64STomasz Figa done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; 3420416bf64STomasz Figa 3430416bf64STomasz Figa return done; 3440416bf64STomasz Figa } 3450416bf64STomasz Figa 346c68a2921SDaniel Kurtz static int rk_iommu_enable_stall(struct rk_iommu *iommu) 347c68a2921SDaniel Kurtz { 348cd6438c5SZhengShunQian int ret, i; 3490416bf64STomasz Figa bool val; 350c68a2921SDaniel Kurtz 351c68a2921SDaniel Kurtz if (rk_iommu_is_stall_active(iommu)) 352c68a2921SDaniel Kurtz return 0; 353c68a2921SDaniel Kurtz 354c68a2921SDaniel Kurtz /* Stall can only be enabled if paging is enabled */ 355c68a2921SDaniel Kurtz if (!rk_iommu_is_paging_enabled(iommu)) 356c68a2921SDaniel Kurtz return 0; 357c68a2921SDaniel Kurtz 358c68a2921SDaniel Kurtz rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); 359c68a2921SDaniel Kurtz 3600416bf64STomasz Figa ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, 3610416bf64STomasz Figa val, RK_MMU_POLL_PERIOD_US, 3620416bf64STomasz Figa RK_MMU_POLL_TIMEOUT_US); 363c68a2921SDaniel Kurtz if (ret) 364cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) 365c68a2921SDaniel Kurtz dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", 366cd6438c5SZhengShunQian rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 367c68a2921SDaniel Kurtz 368c68a2921SDaniel Kurtz return ret; 369c68a2921SDaniel Kurtz } 370c68a2921SDaniel Kurtz 371c68a2921SDaniel Kurtz static int rk_iommu_disable_stall(struct rk_iommu *iommu) 372c68a2921SDaniel Kurtz { 373cd6438c5SZhengShunQian int ret, i; 3740416bf64STomasz Figa bool val; 375c68a2921SDaniel Kurtz 376c68a2921SDaniel Kurtz if (!rk_iommu_is_stall_active(iommu)) 377c68a2921SDaniel Kurtz return 0; 378c68a2921SDaniel Kurtz 379c68a2921SDaniel Kurtz rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); 380c68a2921SDaniel Kurtz 3810416bf64STomasz Figa ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, 3820416bf64STomasz Figa !val, RK_MMU_POLL_PERIOD_US, 3830416bf64STomasz Figa RK_MMU_POLL_TIMEOUT_US); 384c68a2921SDaniel Kurtz if (ret) 385cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) 386c68a2921SDaniel Kurtz dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", 387cd6438c5SZhengShunQian rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 388c68a2921SDaniel Kurtz 389c68a2921SDaniel Kurtz return ret; 390c68a2921SDaniel Kurtz } 391c68a2921SDaniel Kurtz 392c68a2921SDaniel Kurtz static int rk_iommu_enable_paging(struct rk_iommu *iommu) 393c68a2921SDaniel Kurtz { 394cd6438c5SZhengShunQian int ret, i; 3950416bf64STomasz Figa bool val; 396c68a2921SDaniel Kurtz 397c68a2921SDaniel Kurtz if (rk_iommu_is_paging_enabled(iommu)) 398c68a2921SDaniel Kurtz return 0; 399c68a2921SDaniel Kurtz 400c68a2921SDaniel Kurtz rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); 401c68a2921SDaniel Kurtz 4020416bf64STomasz Figa ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, 4030416bf64STomasz Figa val, RK_MMU_POLL_PERIOD_US, 4040416bf64STomasz Figa RK_MMU_POLL_TIMEOUT_US); 405c68a2921SDaniel Kurtz if (ret) 406cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) 407c68a2921SDaniel Kurtz dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", 408cd6438c5SZhengShunQian rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 409c68a2921SDaniel Kurtz 410c68a2921SDaniel Kurtz return ret; 411c68a2921SDaniel Kurtz } 412c68a2921SDaniel Kurtz 413c68a2921SDaniel Kurtz static int rk_iommu_disable_paging(struct rk_iommu *iommu) 414c68a2921SDaniel Kurtz { 415cd6438c5SZhengShunQian int ret, i; 4160416bf64STomasz Figa bool val; 417c68a2921SDaniel Kurtz 418c68a2921SDaniel Kurtz if (!rk_iommu_is_paging_enabled(iommu)) 419c68a2921SDaniel Kurtz return 0; 420c68a2921SDaniel Kurtz 421c68a2921SDaniel Kurtz rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); 422c68a2921SDaniel Kurtz 4230416bf64STomasz Figa ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, 4240416bf64STomasz Figa !val, RK_MMU_POLL_PERIOD_US, 4250416bf64STomasz Figa RK_MMU_POLL_TIMEOUT_US); 426c68a2921SDaniel Kurtz if (ret) 427cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) 428c68a2921SDaniel Kurtz dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", 429cd6438c5SZhengShunQian rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 430c68a2921SDaniel Kurtz 431c68a2921SDaniel Kurtz return ret; 432c68a2921SDaniel Kurtz } 433c68a2921SDaniel Kurtz 434c68a2921SDaniel Kurtz static int rk_iommu_force_reset(struct rk_iommu *iommu) 435c68a2921SDaniel Kurtz { 436cd6438c5SZhengShunQian int ret, i; 437c68a2921SDaniel Kurtz u32 dte_addr; 4380416bf64STomasz Figa bool val; 439c68a2921SDaniel Kurtz 440c3aa4742SSimon Xue if (iommu->reset_disabled) 441c3aa4742SSimon Xue return 0; 442c3aa4742SSimon Xue 443c68a2921SDaniel Kurtz /* 444c68a2921SDaniel Kurtz * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY 445c68a2921SDaniel Kurtz * and verifying that upper 5 nybbles are read back. 446c68a2921SDaniel Kurtz */ 447cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) { 448cd6438c5SZhengShunQian rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); 449c68a2921SDaniel Kurtz 450cd6438c5SZhengShunQian dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR); 451c68a2921SDaniel Kurtz if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { 452c68a2921SDaniel Kurtz dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); 453c68a2921SDaniel Kurtz return -EFAULT; 454c68a2921SDaniel Kurtz } 455cd6438c5SZhengShunQian } 456c68a2921SDaniel Kurtz 457c68a2921SDaniel Kurtz rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); 458c68a2921SDaniel Kurtz 4590416bf64STomasz Figa ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, 4600416bf64STomasz Figa val, RK_MMU_FORCE_RESET_TIMEOUT_US, 4610416bf64STomasz Figa RK_MMU_POLL_TIMEOUT_US); 462cd6438c5SZhengShunQian if (ret) { 463c68a2921SDaniel Kurtz dev_err(iommu->dev, "FORCE_RESET command timed out\n"); 464c68a2921SDaniel Kurtz return ret; 465c68a2921SDaniel Kurtz } 466c68a2921SDaniel Kurtz 467cd6438c5SZhengShunQian return 0; 468cd6438c5SZhengShunQian } 469cd6438c5SZhengShunQian 470cd6438c5SZhengShunQian static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) 471c68a2921SDaniel Kurtz { 472cd6438c5SZhengShunQian void __iomem *base = iommu->bases[index]; 473c68a2921SDaniel Kurtz u32 dte_index, pte_index, page_offset; 474c68a2921SDaniel Kurtz u32 mmu_dte_addr; 475c68a2921SDaniel Kurtz phys_addr_t mmu_dte_addr_phys, dte_addr_phys; 476c68a2921SDaniel Kurtz u32 *dte_addr; 477c68a2921SDaniel Kurtz u32 dte; 478c68a2921SDaniel Kurtz phys_addr_t pte_addr_phys = 0; 479c68a2921SDaniel Kurtz u32 *pte_addr = NULL; 480c68a2921SDaniel Kurtz u32 pte = 0; 481c68a2921SDaniel Kurtz phys_addr_t page_addr_phys = 0; 482c68a2921SDaniel Kurtz u32 page_flags = 0; 483c68a2921SDaniel Kurtz 484c68a2921SDaniel Kurtz dte_index = rk_iova_dte_index(iova); 485c68a2921SDaniel Kurtz pte_index = rk_iova_pte_index(iova); 486c68a2921SDaniel Kurtz page_offset = rk_iova_page_offset(iova); 487c68a2921SDaniel Kurtz 488cd6438c5SZhengShunQian mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); 489c68a2921SDaniel Kurtz mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; 490c68a2921SDaniel Kurtz 491c68a2921SDaniel Kurtz dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); 492c68a2921SDaniel Kurtz dte_addr = phys_to_virt(dte_addr_phys); 493c68a2921SDaniel Kurtz dte = *dte_addr; 494c68a2921SDaniel Kurtz 495c68a2921SDaniel Kurtz if (!rk_dte_is_pt_valid(dte)) 496c68a2921SDaniel Kurtz goto print_it; 497c68a2921SDaniel Kurtz 498c68a2921SDaniel Kurtz pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); 499c68a2921SDaniel Kurtz pte_addr = phys_to_virt(pte_addr_phys); 500c68a2921SDaniel Kurtz pte = *pte_addr; 501c68a2921SDaniel Kurtz 502c68a2921SDaniel Kurtz if (!rk_pte_is_page_valid(pte)) 503c68a2921SDaniel Kurtz goto print_it; 504c68a2921SDaniel Kurtz 505c68a2921SDaniel Kurtz page_addr_phys = rk_pte_page_address(pte) + page_offset; 506c68a2921SDaniel Kurtz page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; 507c68a2921SDaniel Kurtz 508c68a2921SDaniel Kurtz print_it: 509c68a2921SDaniel Kurtz dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", 510c68a2921SDaniel Kurtz &iova, dte_index, pte_index, page_offset); 511c68a2921SDaniel Kurtz dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", 512c68a2921SDaniel Kurtz &mmu_dte_addr_phys, &dte_addr_phys, dte, 513c68a2921SDaniel Kurtz rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, 514c68a2921SDaniel Kurtz rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); 515c68a2921SDaniel Kurtz } 516c68a2921SDaniel Kurtz 517c68a2921SDaniel Kurtz static irqreturn_t rk_iommu_irq(int irq, void *dev_id) 518c68a2921SDaniel Kurtz { 519c68a2921SDaniel Kurtz struct rk_iommu *iommu = dev_id; 520c68a2921SDaniel Kurtz u32 status; 521c68a2921SDaniel Kurtz u32 int_status; 522c68a2921SDaniel Kurtz dma_addr_t iova; 523cd6438c5SZhengShunQian irqreturn_t ret = IRQ_NONE; 524*3fc7c5c0SMarc Zyngier int i, err; 525c68a2921SDaniel Kurtz 526*3fc7c5c0SMarc Zyngier err = pm_runtime_get_if_in_use(iommu->dev); 527*3fc7c5c0SMarc Zyngier if (WARN_ON_ONCE(err <= 0)) 528*3fc7c5c0SMarc Zyngier return ret; 5290f181d3cSJeffy Chen 5300f181d3cSJeffy Chen if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) 5310f181d3cSJeffy Chen goto out; 532f2e3a5f5STomasz Figa 533cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) { 534cd6438c5SZhengShunQian int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); 535c68a2921SDaniel Kurtz if (int_status == 0) 536cd6438c5SZhengShunQian continue; 537c68a2921SDaniel Kurtz 538cd6438c5SZhengShunQian ret = IRQ_HANDLED; 539cd6438c5SZhengShunQian iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); 540c68a2921SDaniel Kurtz 541c68a2921SDaniel Kurtz if (int_status & RK_MMU_IRQ_PAGE_FAULT) { 542c68a2921SDaniel Kurtz int flags; 543c68a2921SDaniel Kurtz 544cd6438c5SZhengShunQian status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); 545c68a2921SDaniel Kurtz flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? 546c68a2921SDaniel Kurtz IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 547c68a2921SDaniel Kurtz 548c68a2921SDaniel Kurtz dev_err(iommu->dev, "Page fault at %pad of type %s\n", 549c68a2921SDaniel Kurtz &iova, 550c68a2921SDaniel Kurtz (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); 551c68a2921SDaniel Kurtz 552cd6438c5SZhengShunQian log_iova(iommu, i, iova); 553c68a2921SDaniel Kurtz 554c68a2921SDaniel Kurtz /* 555c68a2921SDaniel Kurtz * Report page fault to any installed handlers. 556c68a2921SDaniel Kurtz * Ignore the return code, though, since we always zap cache 557c68a2921SDaniel Kurtz * and clear the page fault anyway. 558c68a2921SDaniel Kurtz */ 559c68a2921SDaniel Kurtz if (iommu->domain) 560c68a2921SDaniel Kurtz report_iommu_fault(iommu->domain, iommu->dev, iova, 561c68a2921SDaniel Kurtz flags); 562c68a2921SDaniel Kurtz else 563c68a2921SDaniel Kurtz dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); 564c68a2921SDaniel Kurtz 565cd6438c5SZhengShunQian rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 566cd6438c5SZhengShunQian rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); 567c68a2921SDaniel Kurtz } 568c68a2921SDaniel Kurtz 569c68a2921SDaniel Kurtz if (int_status & RK_MMU_IRQ_BUS_ERROR) 570c68a2921SDaniel Kurtz dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); 571c68a2921SDaniel Kurtz 572c68a2921SDaniel Kurtz if (int_status & ~RK_MMU_IRQ_MASK) 573c68a2921SDaniel Kurtz dev_err(iommu->dev, "unexpected int_status: %#08x\n", 574c68a2921SDaniel Kurtz int_status); 575c68a2921SDaniel Kurtz 576cd6438c5SZhengShunQian rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); 577cd6438c5SZhengShunQian } 578c68a2921SDaniel Kurtz 579f2e3a5f5STomasz Figa clk_bulk_disable(iommu->num_clocks, iommu->clocks); 580f2e3a5f5STomasz Figa 5810f181d3cSJeffy Chen out: 5820f181d3cSJeffy Chen pm_runtime_put(iommu->dev); 583cd6438c5SZhengShunQian return ret; 584c68a2921SDaniel Kurtz } 585c68a2921SDaniel Kurtz 586c68a2921SDaniel Kurtz static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, 587c68a2921SDaniel Kurtz dma_addr_t iova) 588c68a2921SDaniel Kurtz { 589bcd516a3SJoerg Roedel struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 590c68a2921SDaniel Kurtz unsigned long flags; 591c68a2921SDaniel Kurtz phys_addr_t pt_phys, phys = 0; 592c68a2921SDaniel Kurtz u32 dte, pte; 593c68a2921SDaniel Kurtz u32 *page_table; 594c68a2921SDaniel Kurtz 595c68a2921SDaniel Kurtz spin_lock_irqsave(&rk_domain->dt_lock, flags); 596c68a2921SDaniel Kurtz 597c68a2921SDaniel Kurtz dte = rk_domain->dt[rk_iova_dte_index(iova)]; 598c68a2921SDaniel Kurtz if (!rk_dte_is_pt_valid(dte)) 599c68a2921SDaniel Kurtz goto out; 600c68a2921SDaniel Kurtz 601c68a2921SDaniel Kurtz pt_phys = rk_dte_pt_address(dte); 602c68a2921SDaniel Kurtz page_table = (u32 *)phys_to_virt(pt_phys); 603c68a2921SDaniel Kurtz pte = page_table[rk_iova_pte_index(iova)]; 604c68a2921SDaniel Kurtz if (!rk_pte_is_page_valid(pte)) 605c68a2921SDaniel Kurtz goto out; 606c68a2921SDaniel Kurtz 607c68a2921SDaniel Kurtz phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); 608c68a2921SDaniel Kurtz out: 609c68a2921SDaniel Kurtz spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 610c68a2921SDaniel Kurtz 611c68a2921SDaniel Kurtz return phys; 612c68a2921SDaniel Kurtz } 613c68a2921SDaniel Kurtz 614c68a2921SDaniel Kurtz static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, 615c68a2921SDaniel Kurtz dma_addr_t iova, size_t size) 616c68a2921SDaniel Kurtz { 617c68a2921SDaniel Kurtz struct list_head *pos; 618c68a2921SDaniel Kurtz unsigned long flags; 619c68a2921SDaniel Kurtz 620c68a2921SDaniel Kurtz /* shootdown these iova from all iommus using this domain */ 621c68a2921SDaniel Kurtz spin_lock_irqsave(&rk_domain->iommus_lock, flags); 622c68a2921SDaniel Kurtz list_for_each(pos, &rk_domain->iommus) { 623c68a2921SDaniel Kurtz struct rk_iommu *iommu; 624*3fc7c5c0SMarc Zyngier int ret; 6250f181d3cSJeffy Chen 626c68a2921SDaniel Kurtz iommu = list_entry(pos, struct rk_iommu, node); 6270f181d3cSJeffy Chen 6280f181d3cSJeffy Chen /* Only zap TLBs of IOMMUs that are powered on. */ 629*3fc7c5c0SMarc Zyngier ret = pm_runtime_get_if_in_use(iommu->dev); 630*3fc7c5c0SMarc Zyngier if (WARN_ON_ONCE(ret < 0)) 631*3fc7c5c0SMarc Zyngier continue; 632*3fc7c5c0SMarc Zyngier if (ret) { 6330f181d3cSJeffy Chen WARN_ON(clk_bulk_enable(iommu->num_clocks, 6340f181d3cSJeffy Chen iommu->clocks)); 635c68a2921SDaniel Kurtz rk_iommu_zap_lines(iommu, iova, size); 636f2e3a5f5STomasz Figa clk_bulk_disable(iommu->num_clocks, iommu->clocks); 6370f181d3cSJeffy Chen pm_runtime_put(iommu->dev); 6380f181d3cSJeffy Chen } 639c68a2921SDaniel Kurtz } 640c68a2921SDaniel Kurtz spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 641c68a2921SDaniel Kurtz } 642c68a2921SDaniel Kurtz 643d4dd920cSTomasz Figa static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, 644d4dd920cSTomasz Figa dma_addr_t iova, size_t size) 645d4dd920cSTomasz Figa { 646d4dd920cSTomasz Figa rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); 647d4dd920cSTomasz Figa if (size > SPAGE_SIZE) 648d4dd920cSTomasz Figa rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, 649d4dd920cSTomasz Figa SPAGE_SIZE); 650d4dd920cSTomasz Figa } 651d4dd920cSTomasz Figa 652c68a2921SDaniel Kurtz static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, 653c68a2921SDaniel Kurtz dma_addr_t iova) 654c68a2921SDaniel Kurtz { 655c68a2921SDaniel Kurtz u32 *page_table, *dte_addr; 6564f0aba67SShunqian Zheng u32 dte_index, dte; 657c68a2921SDaniel Kurtz phys_addr_t pt_phys; 6584f0aba67SShunqian Zheng dma_addr_t pt_dma; 659c68a2921SDaniel Kurtz 660c68a2921SDaniel Kurtz assert_spin_locked(&rk_domain->dt_lock); 661c68a2921SDaniel Kurtz 6624f0aba67SShunqian Zheng dte_index = rk_iova_dte_index(iova); 6634f0aba67SShunqian Zheng dte_addr = &rk_domain->dt[dte_index]; 664c68a2921SDaniel Kurtz dte = *dte_addr; 665c68a2921SDaniel Kurtz if (rk_dte_is_pt_valid(dte)) 666c68a2921SDaniel Kurtz goto done; 667c68a2921SDaniel Kurtz 668c68a2921SDaniel Kurtz page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); 669c68a2921SDaniel Kurtz if (!page_table) 670c68a2921SDaniel Kurtz return ERR_PTR(-ENOMEM); 671c68a2921SDaniel Kurtz 6729176a303SJeffy Chen pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); 6739176a303SJeffy Chen if (dma_mapping_error(dma_dev, pt_dma)) { 6749176a303SJeffy Chen dev_err(dma_dev, "DMA mapping error while allocating page table\n"); 6754f0aba67SShunqian Zheng free_page((unsigned long)page_table); 6764f0aba67SShunqian Zheng return ERR_PTR(-ENOMEM); 6774f0aba67SShunqian Zheng } 6784f0aba67SShunqian Zheng 6794f0aba67SShunqian Zheng dte = rk_mk_dte(pt_dma); 680c68a2921SDaniel Kurtz *dte_addr = dte; 681c68a2921SDaniel Kurtz 6824f0aba67SShunqian Zheng rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); 6834f0aba67SShunqian Zheng rk_table_flush(rk_domain, 6844f0aba67SShunqian Zheng rk_domain->dt_dma + dte_index * sizeof(u32), 1); 685c68a2921SDaniel Kurtz done: 686c68a2921SDaniel Kurtz pt_phys = rk_dte_pt_address(dte); 687c68a2921SDaniel Kurtz return (u32 *)phys_to_virt(pt_phys); 688c68a2921SDaniel Kurtz } 689c68a2921SDaniel Kurtz 690c68a2921SDaniel Kurtz static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, 6914f0aba67SShunqian Zheng u32 *pte_addr, dma_addr_t pte_dma, 6924f0aba67SShunqian Zheng size_t size) 693c68a2921SDaniel Kurtz { 694c68a2921SDaniel Kurtz unsigned int pte_count; 695c68a2921SDaniel Kurtz unsigned int pte_total = size / SPAGE_SIZE; 696c68a2921SDaniel Kurtz 697c68a2921SDaniel Kurtz assert_spin_locked(&rk_domain->dt_lock); 698c68a2921SDaniel Kurtz 699c68a2921SDaniel Kurtz for (pte_count = 0; pte_count < pte_total; pte_count++) { 700c68a2921SDaniel Kurtz u32 pte = pte_addr[pte_count]; 701c68a2921SDaniel Kurtz if (!rk_pte_is_page_valid(pte)) 702c68a2921SDaniel Kurtz break; 703c68a2921SDaniel Kurtz 704c68a2921SDaniel Kurtz pte_addr[pte_count] = rk_mk_pte_invalid(pte); 705c68a2921SDaniel Kurtz } 706c68a2921SDaniel Kurtz 7074f0aba67SShunqian Zheng rk_table_flush(rk_domain, pte_dma, pte_count); 708c68a2921SDaniel Kurtz 709c68a2921SDaniel Kurtz return pte_count * SPAGE_SIZE; 710c68a2921SDaniel Kurtz } 711c68a2921SDaniel Kurtz 712c68a2921SDaniel Kurtz static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, 7134f0aba67SShunqian Zheng dma_addr_t pte_dma, dma_addr_t iova, 7144f0aba67SShunqian Zheng phys_addr_t paddr, size_t size, int prot) 715c68a2921SDaniel Kurtz { 716c68a2921SDaniel Kurtz unsigned int pte_count; 717c68a2921SDaniel Kurtz unsigned int pte_total = size / SPAGE_SIZE; 718c68a2921SDaniel Kurtz phys_addr_t page_phys; 719c68a2921SDaniel Kurtz 720c68a2921SDaniel Kurtz assert_spin_locked(&rk_domain->dt_lock); 721c68a2921SDaniel Kurtz 722c68a2921SDaniel Kurtz for (pte_count = 0; pte_count < pte_total; pte_count++) { 723c68a2921SDaniel Kurtz u32 pte = pte_addr[pte_count]; 724c68a2921SDaniel Kurtz 725c68a2921SDaniel Kurtz if (rk_pte_is_page_valid(pte)) 726c68a2921SDaniel Kurtz goto unwind; 727c68a2921SDaniel Kurtz 728c68a2921SDaniel Kurtz pte_addr[pte_count] = rk_mk_pte(paddr, prot); 729c68a2921SDaniel Kurtz 730c68a2921SDaniel Kurtz paddr += SPAGE_SIZE; 731c68a2921SDaniel Kurtz } 732c68a2921SDaniel Kurtz 7334f0aba67SShunqian Zheng rk_table_flush(rk_domain, pte_dma, pte_total); 734c68a2921SDaniel Kurtz 735d4dd920cSTomasz Figa /* 736d4dd920cSTomasz Figa * Zap the first and last iova to evict from iotlb any previously 737d4dd920cSTomasz Figa * mapped cachelines holding stale values for its dte and pte. 738d4dd920cSTomasz Figa * We only zap the first and last iova, since only they could have 739d4dd920cSTomasz Figa * dte or pte shared with an existing mapping. 740d4dd920cSTomasz Figa */ 741d4dd920cSTomasz Figa rk_iommu_zap_iova_first_last(rk_domain, iova, size); 742d4dd920cSTomasz Figa 743c68a2921SDaniel Kurtz return 0; 744c68a2921SDaniel Kurtz unwind: 745c68a2921SDaniel Kurtz /* Unmap the range of iovas that we just mapped */ 7464f0aba67SShunqian Zheng rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, 7474f0aba67SShunqian Zheng pte_count * SPAGE_SIZE); 748c68a2921SDaniel Kurtz 749c68a2921SDaniel Kurtz iova += pte_count * SPAGE_SIZE; 750c68a2921SDaniel Kurtz page_phys = rk_pte_page_address(pte_addr[pte_count]); 751c68a2921SDaniel Kurtz pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", 752c68a2921SDaniel Kurtz &iova, &page_phys, &paddr, prot); 753c68a2921SDaniel Kurtz 754c68a2921SDaniel Kurtz return -EADDRINUSE; 755c68a2921SDaniel Kurtz } 756c68a2921SDaniel Kurtz 757c68a2921SDaniel Kurtz static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, 758c68a2921SDaniel Kurtz phys_addr_t paddr, size_t size, int prot) 759c68a2921SDaniel Kurtz { 760bcd516a3SJoerg Roedel struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 761c68a2921SDaniel Kurtz unsigned long flags; 7624f0aba67SShunqian Zheng dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 763c68a2921SDaniel Kurtz u32 *page_table, *pte_addr; 7644f0aba67SShunqian Zheng u32 dte_index, pte_index; 765c68a2921SDaniel Kurtz int ret; 766c68a2921SDaniel Kurtz 767c68a2921SDaniel Kurtz spin_lock_irqsave(&rk_domain->dt_lock, flags); 768c68a2921SDaniel Kurtz 769c68a2921SDaniel Kurtz /* 770c68a2921SDaniel Kurtz * pgsize_bitmap specifies iova sizes that fit in one page table 771c68a2921SDaniel Kurtz * (1024 4-KiB pages = 4 MiB). 772c68a2921SDaniel Kurtz * So, size will always be 4096 <= size <= 4194304. 773c68a2921SDaniel Kurtz * Since iommu_map() guarantees that both iova and size will be 774c68a2921SDaniel Kurtz * aligned, we will always only be mapping from a single dte here. 775c68a2921SDaniel Kurtz */ 776c68a2921SDaniel Kurtz page_table = rk_dte_get_page_table(rk_domain, iova); 777c68a2921SDaniel Kurtz if (IS_ERR(page_table)) { 778c68a2921SDaniel Kurtz spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 779c68a2921SDaniel Kurtz return PTR_ERR(page_table); 780c68a2921SDaniel Kurtz } 781c68a2921SDaniel Kurtz 7824f0aba67SShunqian Zheng dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; 7834f0aba67SShunqian Zheng pte_index = rk_iova_pte_index(iova); 7844f0aba67SShunqian Zheng pte_addr = &page_table[pte_index]; 7854f0aba67SShunqian Zheng pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32); 7864f0aba67SShunqian Zheng ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, 7874f0aba67SShunqian Zheng paddr, size, prot); 7884f0aba67SShunqian Zheng 789c68a2921SDaniel Kurtz spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 790c68a2921SDaniel Kurtz 791c68a2921SDaniel Kurtz return ret; 792c68a2921SDaniel Kurtz } 793c68a2921SDaniel Kurtz 794c68a2921SDaniel Kurtz static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, 795c68a2921SDaniel Kurtz size_t size) 796c68a2921SDaniel Kurtz { 797bcd516a3SJoerg Roedel struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 798c68a2921SDaniel Kurtz unsigned long flags; 7994f0aba67SShunqian Zheng dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 800c68a2921SDaniel Kurtz phys_addr_t pt_phys; 801c68a2921SDaniel Kurtz u32 dte; 802c68a2921SDaniel Kurtz u32 *pte_addr; 803c68a2921SDaniel Kurtz size_t unmap_size; 804c68a2921SDaniel Kurtz 805c68a2921SDaniel Kurtz spin_lock_irqsave(&rk_domain->dt_lock, flags); 806c68a2921SDaniel Kurtz 807c68a2921SDaniel Kurtz /* 808c68a2921SDaniel Kurtz * pgsize_bitmap specifies iova sizes that fit in one page table 809c68a2921SDaniel Kurtz * (1024 4-KiB pages = 4 MiB). 810c68a2921SDaniel Kurtz * So, size will always be 4096 <= size <= 4194304. 811c68a2921SDaniel Kurtz * Since iommu_unmap() guarantees that both iova and size will be 812c68a2921SDaniel Kurtz * aligned, we will always only be unmapping from a single dte here. 813c68a2921SDaniel Kurtz */ 814c68a2921SDaniel Kurtz dte = rk_domain->dt[rk_iova_dte_index(iova)]; 815c68a2921SDaniel Kurtz /* Just return 0 if iova is unmapped */ 816c68a2921SDaniel Kurtz if (!rk_dte_is_pt_valid(dte)) { 817c68a2921SDaniel Kurtz spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 818c68a2921SDaniel Kurtz return 0; 819c68a2921SDaniel Kurtz } 820c68a2921SDaniel Kurtz 821c68a2921SDaniel Kurtz pt_phys = rk_dte_pt_address(dte); 822c68a2921SDaniel Kurtz pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); 8234f0aba67SShunqian Zheng pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); 8244f0aba67SShunqian Zheng unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); 825c68a2921SDaniel Kurtz 826c68a2921SDaniel Kurtz spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 827c68a2921SDaniel Kurtz 828c68a2921SDaniel Kurtz /* Shootdown iotlb entries for iova range that was just unmapped */ 829c68a2921SDaniel Kurtz rk_iommu_zap_iova(rk_domain, iova, unmap_size); 830c68a2921SDaniel Kurtz 831c68a2921SDaniel Kurtz return unmap_size; 832c68a2921SDaniel Kurtz } 833c68a2921SDaniel Kurtz 834c68a2921SDaniel Kurtz static struct rk_iommu *rk_iommu_from_dev(struct device *dev) 835c68a2921SDaniel Kurtz { 8365fd577c3SJeffy Chen struct rk_iommudata *data = dev->archdata.iommu; 837c68a2921SDaniel Kurtz 8385fd577c3SJeffy Chen return data ? data->iommu : NULL; 839c68a2921SDaniel Kurtz } 840c68a2921SDaniel Kurtz 8410f181d3cSJeffy Chen /* Must be called with iommu powered on and attached */ 8420f181d3cSJeffy Chen static void rk_iommu_disable(struct rk_iommu *iommu) 843c68a2921SDaniel Kurtz { 8440f181d3cSJeffy Chen int i; 845c68a2921SDaniel Kurtz 8460f181d3cSJeffy Chen /* Ignore error while disabling, just keep going */ 8470f181d3cSJeffy Chen WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 8480f181d3cSJeffy Chen rk_iommu_enable_stall(iommu); 8490f181d3cSJeffy Chen rk_iommu_disable_paging(iommu); 8500f181d3cSJeffy Chen for (i = 0; i < iommu->num_mmu; i++) { 8510f181d3cSJeffy Chen rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); 8520f181d3cSJeffy Chen rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); 8530f181d3cSJeffy Chen } 8540f181d3cSJeffy Chen rk_iommu_disable_stall(iommu); 8550f181d3cSJeffy Chen clk_bulk_disable(iommu->num_clocks, iommu->clocks); 8560f181d3cSJeffy Chen } 8570f181d3cSJeffy Chen 8580f181d3cSJeffy Chen /* Must be called with iommu powered on and attached */ 8590f181d3cSJeffy Chen static int rk_iommu_enable(struct rk_iommu *iommu) 8600f181d3cSJeffy Chen { 8610f181d3cSJeffy Chen struct iommu_domain *domain = iommu->domain; 8620f181d3cSJeffy Chen struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 8630f181d3cSJeffy Chen int ret, i; 864c68a2921SDaniel Kurtz 865f2e3a5f5STomasz Figa ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); 866c68a2921SDaniel Kurtz if (ret) 867c68a2921SDaniel Kurtz return ret; 868c68a2921SDaniel Kurtz 869f2e3a5f5STomasz Figa ret = rk_iommu_enable_stall(iommu); 870f2e3a5f5STomasz Figa if (ret) 871f2e3a5f5STomasz Figa goto out_disable_clocks; 872f2e3a5f5STomasz Figa 873c68a2921SDaniel Kurtz ret = rk_iommu_force_reset(iommu); 874c68a2921SDaniel Kurtz if (ret) 875f6717d72STomasz Figa goto out_disable_stall; 876c68a2921SDaniel Kurtz 877cd6438c5SZhengShunQian for (i = 0; i < iommu->num_mmu; i++) { 8784f0aba67SShunqian Zheng rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 8794f0aba67SShunqian Zheng rk_domain->dt_dma); 880ae8a7910SJohn Keeping rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 881cd6438c5SZhengShunQian rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 882cd6438c5SZhengShunQian } 883c68a2921SDaniel Kurtz 884c68a2921SDaniel Kurtz ret = rk_iommu_enable_paging(iommu); 885c68a2921SDaniel Kurtz 886f6717d72STomasz Figa out_disable_stall: 887c68a2921SDaniel Kurtz rk_iommu_disable_stall(iommu); 888f2e3a5f5STomasz Figa out_disable_clocks: 889f2e3a5f5STomasz Figa clk_bulk_disable(iommu->num_clocks, iommu->clocks); 890f6717d72STomasz Figa return ret; 891c68a2921SDaniel Kurtz } 892c68a2921SDaniel Kurtz 893c68a2921SDaniel Kurtz static void rk_iommu_detach_device(struct iommu_domain *domain, 894c68a2921SDaniel Kurtz struct device *dev) 895c68a2921SDaniel Kurtz { 896c68a2921SDaniel Kurtz struct rk_iommu *iommu; 897bcd516a3SJoerg Roedel struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 898c68a2921SDaniel Kurtz unsigned long flags; 899*3fc7c5c0SMarc Zyngier int ret; 900c68a2921SDaniel Kurtz 901c68a2921SDaniel Kurtz /* Allow 'virtual devices' (eg drm) to detach from domain */ 902c68a2921SDaniel Kurtz iommu = rk_iommu_from_dev(dev); 903c68a2921SDaniel Kurtz if (!iommu) 904c68a2921SDaniel Kurtz return; 905c68a2921SDaniel Kurtz 9060f181d3cSJeffy Chen dev_dbg(dev, "Detaching from iommu domain\n"); 9070f181d3cSJeffy Chen 9080f181d3cSJeffy Chen /* iommu already detached */ 9090f181d3cSJeffy Chen if (iommu->domain != domain) 9100f181d3cSJeffy Chen return; 9110f181d3cSJeffy Chen 9120f181d3cSJeffy Chen iommu->domain = NULL; 9130f181d3cSJeffy Chen 914c68a2921SDaniel Kurtz spin_lock_irqsave(&rk_domain->iommus_lock, flags); 915c68a2921SDaniel Kurtz list_del_init(&iommu->node); 916c68a2921SDaniel Kurtz spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 917c68a2921SDaniel Kurtz 918*3fc7c5c0SMarc Zyngier ret = pm_runtime_get_if_in_use(iommu->dev); 919*3fc7c5c0SMarc Zyngier WARN_ON_ONCE(ret < 0); 920*3fc7c5c0SMarc Zyngier if (ret > 0) { 9210f181d3cSJeffy Chen rk_iommu_disable(iommu); 9220f181d3cSJeffy Chen pm_runtime_put(iommu->dev); 923cd6438c5SZhengShunQian } 9240f181d3cSJeffy Chen } 925c68a2921SDaniel Kurtz 9260f181d3cSJeffy Chen static int rk_iommu_attach_device(struct iommu_domain *domain, 9270f181d3cSJeffy Chen struct device *dev) 9280f181d3cSJeffy Chen { 9290f181d3cSJeffy Chen struct rk_iommu *iommu; 9300f181d3cSJeffy Chen struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 9310f181d3cSJeffy Chen unsigned long flags; 9320f181d3cSJeffy Chen int ret; 933c68a2921SDaniel Kurtz 9340f181d3cSJeffy Chen /* 9350f181d3cSJeffy Chen * Allow 'virtual devices' (e.g., drm) to attach to domain. 9360f181d3cSJeffy Chen * Such a device does not belong to an iommu group. 9370f181d3cSJeffy Chen */ 9380f181d3cSJeffy Chen iommu = rk_iommu_from_dev(dev); 9390f181d3cSJeffy Chen if (!iommu) 9400f181d3cSJeffy Chen return 0; 9410f181d3cSJeffy Chen 9420f181d3cSJeffy Chen dev_dbg(dev, "Attaching to iommu domain\n"); 9430f181d3cSJeffy Chen 9440f181d3cSJeffy Chen /* iommu already attached */ 9450f181d3cSJeffy Chen if (iommu->domain == domain) 9460f181d3cSJeffy Chen return 0; 9470f181d3cSJeffy Chen 9480f181d3cSJeffy Chen if (iommu->domain) 9490f181d3cSJeffy Chen rk_iommu_detach_device(iommu->domain, dev); 9500f181d3cSJeffy Chen 9510f181d3cSJeffy Chen iommu->domain = domain; 9520f181d3cSJeffy Chen 9530f181d3cSJeffy Chen spin_lock_irqsave(&rk_domain->iommus_lock, flags); 9540f181d3cSJeffy Chen list_add_tail(&iommu->node, &rk_domain->iommus); 9550f181d3cSJeffy Chen spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 9560f181d3cSJeffy Chen 957*3fc7c5c0SMarc Zyngier ret = pm_runtime_get_if_in_use(iommu->dev); 958*3fc7c5c0SMarc Zyngier if (!ret || WARN_ON_ONCE(ret < 0)) 9590f181d3cSJeffy Chen return 0; 9600f181d3cSJeffy Chen 9610f181d3cSJeffy Chen ret = rk_iommu_enable(iommu); 9620f181d3cSJeffy Chen if (ret) 9630f181d3cSJeffy Chen rk_iommu_detach_device(iommu->domain, dev); 9640f181d3cSJeffy Chen 9650f181d3cSJeffy Chen pm_runtime_put(iommu->dev); 9660f181d3cSJeffy Chen 9670f181d3cSJeffy Chen return ret; 968c68a2921SDaniel Kurtz } 969c68a2921SDaniel Kurtz 970bcd516a3SJoerg Roedel static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) 971c68a2921SDaniel Kurtz { 972c68a2921SDaniel Kurtz struct rk_iommu_domain *rk_domain; 973c68a2921SDaniel Kurtz 974a93db2f2SShunqian Zheng if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) 975bcd516a3SJoerg Roedel return NULL; 976bcd516a3SJoerg Roedel 9779176a303SJeffy Chen if (!dma_dev) 978bcd516a3SJoerg Roedel return NULL; 979c68a2921SDaniel Kurtz 9809176a303SJeffy Chen rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL); 9814f0aba67SShunqian Zheng if (!rk_domain) 9829176a303SJeffy Chen return NULL; 9834f0aba67SShunqian Zheng 984a93db2f2SShunqian Zheng if (type == IOMMU_DOMAIN_DMA && 985a93db2f2SShunqian Zheng iommu_get_dma_cookie(&rk_domain->domain)) 9869176a303SJeffy Chen return NULL; 9874f0aba67SShunqian Zheng 988c68a2921SDaniel Kurtz /* 989c68a2921SDaniel Kurtz * rk32xx iommus use a 2 level pagetable. 990c68a2921SDaniel Kurtz * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. 991c68a2921SDaniel Kurtz * Allocate one 4 KiB page for each table. 992c68a2921SDaniel Kurtz */ 993c68a2921SDaniel Kurtz rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); 994c68a2921SDaniel Kurtz if (!rk_domain->dt) 9954f0aba67SShunqian Zheng goto err_put_cookie; 996c68a2921SDaniel Kurtz 9979176a303SJeffy Chen rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, 9984f0aba67SShunqian Zheng SPAGE_SIZE, DMA_TO_DEVICE); 9999176a303SJeffy Chen if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { 10009176a303SJeffy Chen dev_err(dma_dev, "DMA map error for DT\n"); 10014f0aba67SShunqian Zheng goto err_free_dt; 10024f0aba67SShunqian Zheng } 10034f0aba67SShunqian Zheng 10044f0aba67SShunqian Zheng rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); 1005c68a2921SDaniel Kurtz 1006c68a2921SDaniel Kurtz spin_lock_init(&rk_domain->iommus_lock); 1007c68a2921SDaniel Kurtz spin_lock_init(&rk_domain->dt_lock); 1008c68a2921SDaniel Kurtz INIT_LIST_HEAD(&rk_domain->iommus); 1009c68a2921SDaniel Kurtz 1010a93db2f2SShunqian Zheng rk_domain->domain.geometry.aperture_start = 0; 1011a93db2f2SShunqian Zheng rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); 1012a93db2f2SShunqian Zheng rk_domain->domain.geometry.force_aperture = true; 1013a93db2f2SShunqian Zheng 1014bcd516a3SJoerg Roedel return &rk_domain->domain; 1015c68a2921SDaniel Kurtz 10164f0aba67SShunqian Zheng err_free_dt: 10174f0aba67SShunqian Zheng free_page((unsigned long)rk_domain->dt); 10184f0aba67SShunqian Zheng err_put_cookie: 1019a93db2f2SShunqian Zheng if (type == IOMMU_DOMAIN_DMA) 10204f0aba67SShunqian Zheng iommu_put_dma_cookie(&rk_domain->domain); 10214f0aba67SShunqian Zheng 1022bcd516a3SJoerg Roedel return NULL; 1023c68a2921SDaniel Kurtz } 1024c68a2921SDaniel Kurtz 1025bcd516a3SJoerg Roedel static void rk_iommu_domain_free(struct iommu_domain *domain) 1026c68a2921SDaniel Kurtz { 1027bcd516a3SJoerg Roedel struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 1028c68a2921SDaniel Kurtz int i; 1029c68a2921SDaniel Kurtz 1030c68a2921SDaniel Kurtz WARN_ON(!list_empty(&rk_domain->iommus)); 1031c68a2921SDaniel Kurtz 1032c68a2921SDaniel Kurtz for (i = 0; i < NUM_DT_ENTRIES; i++) { 1033c68a2921SDaniel Kurtz u32 dte = rk_domain->dt[i]; 1034c68a2921SDaniel Kurtz if (rk_dte_is_pt_valid(dte)) { 1035c68a2921SDaniel Kurtz phys_addr_t pt_phys = rk_dte_pt_address(dte); 1036c68a2921SDaniel Kurtz u32 *page_table = phys_to_virt(pt_phys); 10379176a303SJeffy Chen dma_unmap_single(dma_dev, pt_phys, 10384f0aba67SShunqian Zheng SPAGE_SIZE, DMA_TO_DEVICE); 1039c68a2921SDaniel Kurtz free_page((unsigned long)page_table); 1040c68a2921SDaniel Kurtz } 1041c68a2921SDaniel Kurtz } 1042c68a2921SDaniel Kurtz 10439176a303SJeffy Chen dma_unmap_single(dma_dev, rk_domain->dt_dma, 10444f0aba67SShunqian Zheng SPAGE_SIZE, DMA_TO_DEVICE); 1045c68a2921SDaniel Kurtz free_page((unsigned long)rk_domain->dt); 10464f0aba67SShunqian Zheng 1047a93db2f2SShunqian Zheng if (domain->type == IOMMU_DOMAIN_DMA) 10484f0aba67SShunqian Zheng iommu_put_dma_cookie(&rk_domain->domain); 1049c68a2921SDaniel Kurtz } 1050c68a2921SDaniel Kurtz 1051c68a2921SDaniel Kurtz static int rk_iommu_add_device(struct device *dev) 1052c68a2921SDaniel Kurtz { 1053c68a2921SDaniel Kurtz struct iommu_group *group; 1054c9d9f239SJoerg Roedel struct rk_iommu *iommu; 10550f181d3cSJeffy Chen struct rk_iommudata *data; 10560f181d3cSJeffy Chen 10570f181d3cSJeffy Chen data = dev->archdata.iommu; 10580f181d3cSJeffy Chen if (!data) 10590f181d3cSJeffy Chen return -ENODEV; 1060c68a2921SDaniel Kurtz 1061c9d9f239SJoerg Roedel iommu = rk_iommu_from_dev(dev); 10625fd577c3SJeffy Chen 10635fd577c3SJeffy Chen group = iommu_group_get_for_dev(dev); 10645fd577c3SJeffy Chen if (IS_ERR(group)) 10655fd577c3SJeffy Chen return PTR_ERR(group); 10665fd577c3SJeffy Chen iommu_group_put(group); 10675fd577c3SJeffy Chen 1068c9d9f239SJoerg Roedel iommu_device_link(&iommu->iommu, dev); 10690f181d3cSJeffy Chen data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME); 1070c9d9f239SJoerg Roedel 1071c68a2921SDaniel Kurtz return 0; 1072c68a2921SDaniel Kurtz } 1073c68a2921SDaniel Kurtz 1074c68a2921SDaniel Kurtz static void rk_iommu_remove_device(struct device *dev) 1075c68a2921SDaniel Kurtz { 1076c9d9f239SJoerg Roedel struct rk_iommu *iommu; 10770f181d3cSJeffy Chen struct rk_iommudata *data = dev->archdata.iommu; 1078c9d9f239SJoerg Roedel 1079c9d9f239SJoerg Roedel iommu = rk_iommu_from_dev(dev); 1080c9d9f239SJoerg Roedel 10810f181d3cSJeffy Chen device_link_del(data->link); 10825fd577c3SJeffy Chen iommu_device_unlink(&iommu->iommu, dev); 1083c68a2921SDaniel Kurtz iommu_group_remove_device(dev); 1084c68a2921SDaniel Kurtz } 1085c68a2921SDaniel Kurtz 108657c26957SJeffy Chen static struct iommu_group *rk_iommu_device_group(struct device *dev) 108757c26957SJeffy Chen { 108857c26957SJeffy Chen struct rk_iommu *iommu; 108957c26957SJeffy Chen 109057c26957SJeffy Chen iommu = rk_iommu_from_dev(dev); 109157c26957SJeffy Chen 109257c26957SJeffy Chen return iommu_group_ref_get(iommu->group); 109357c26957SJeffy Chen } 109457c26957SJeffy Chen 10955fd577c3SJeffy Chen static int rk_iommu_of_xlate(struct device *dev, 10965fd577c3SJeffy Chen struct of_phandle_args *args) 10975fd577c3SJeffy Chen { 10985fd577c3SJeffy Chen struct platform_device *iommu_dev; 10995fd577c3SJeffy Chen struct rk_iommudata *data; 11005fd577c3SJeffy Chen 11015fd577c3SJeffy Chen data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL); 11025fd577c3SJeffy Chen if (!data) 11035fd577c3SJeffy Chen return -ENOMEM; 11045fd577c3SJeffy Chen 11055fd577c3SJeffy Chen iommu_dev = of_find_device_by_node(args->np); 11065fd577c3SJeffy Chen 11075fd577c3SJeffy Chen data->iommu = platform_get_drvdata(iommu_dev); 11085fd577c3SJeffy Chen dev->archdata.iommu = data; 11095fd577c3SJeffy Chen 111040fa84e1SArnd Bergmann platform_device_put(iommu_dev); 11115fd577c3SJeffy Chen 11125fd577c3SJeffy Chen return 0; 11135fd577c3SJeffy Chen } 11145fd577c3SJeffy Chen 1115c68a2921SDaniel Kurtz static const struct iommu_ops rk_iommu_ops = { 1116bcd516a3SJoerg Roedel .domain_alloc = rk_iommu_domain_alloc, 1117bcd516a3SJoerg Roedel .domain_free = rk_iommu_domain_free, 1118c68a2921SDaniel Kurtz .attach_dev = rk_iommu_attach_device, 1119c68a2921SDaniel Kurtz .detach_dev = rk_iommu_detach_device, 1120c68a2921SDaniel Kurtz .map = rk_iommu_map, 1121c68a2921SDaniel Kurtz .unmap = rk_iommu_unmap, 1122e6d0f473SSimon Xue .map_sg = default_iommu_map_sg, 1123c68a2921SDaniel Kurtz .add_device = rk_iommu_add_device, 1124c68a2921SDaniel Kurtz .remove_device = rk_iommu_remove_device, 1125c68a2921SDaniel Kurtz .iova_to_phys = rk_iommu_iova_to_phys, 112657c26957SJeffy Chen .device_group = rk_iommu_device_group, 1127c68a2921SDaniel Kurtz .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, 11285fd577c3SJeffy Chen .of_xlate = rk_iommu_of_xlate, 1129c68a2921SDaniel Kurtz }; 1130c68a2921SDaniel Kurtz 1131c68a2921SDaniel Kurtz static int rk_iommu_probe(struct platform_device *pdev) 1132c68a2921SDaniel Kurtz { 1133c68a2921SDaniel Kurtz struct device *dev = &pdev->dev; 1134c68a2921SDaniel Kurtz struct rk_iommu *iommu; 1135c68a2921SDaniel Kurtz struct resource *res; 11363d08f434SShunqian Zheng int num_res = pdev->num_resources; 1137d0b912bdSJeffy Chen int err, i, irq; 1138c68a2921SDaniel Kurtz 1139c68a2921SDaniel Kurtz iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); 1140c68a2921SDaniel Kurtz if (!iommu) 1141c68a2921SDaniel Kurtz return -ENOMEM; 1142c68a2921SDaniel Kurtz 1143c68a2921SDaniel Kurtz platform_set_drvdata(pdev, iommu); 1144c68a2921SDaniel Kurtz iommu->dev = dev; 1145cd6438c5SZhengShunQian iommu->num_mmu = 0; 11463d08f434SShunqian Zheng 1147a86854d0SKees Cook iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), 1148cd6438c5SZhengShunQian GFP_KERNEL); 1149cd6438c5SZhengShunQian if (!iommu->bases) 1150cd6438c5SZhengShunQian return -ENOMEM; 1151c68a2921SDaniel Kurtz 11523d08f434SShunqian Zheng for (i = 0; i < num_res; i++) { 1153cd6438c5SZhengShunQian res = platform_get_resource(pdev, IORESOURCE_MEM, i); 11548d7f2d84STomeu Vizoso if (!res) 11558d7f2d84STomeu Vizoso continue; 1156cd6438c5SZhengShunQian iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); 1157cd6438c5SZhengShunQian if (IS_ERR(iommu->bases[i])) 1158cd6438c5SZhengShunQian continue; 1159cd6438c5SZhengShunQian iommu->num_mmu++; 1160cd6438c5SZhengShunQian } 1161cd6438c5SZhengShunQian if (iommu->num_mmu == 0) 1162cd6438c5SZhengShunQian return PTR_ERR(iommu->bases[0]); 1163c68a2921SDaniel Kurtz 1164d0b912bdSJeffy Chen i = 0; 1165d0b912bdSJeffy Chen while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { 1166d0b912bdSJeffy Chen if (irq < 0) 1167d0b912bdSJeffy Chen return irq; 116803f732f8SSimon Xue 1169d0b912bdSJeffy Chen err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, 1170d0b912bdSJeffy Chen IRQF_SHARED, dev_name(dev), iommu); 1171d0b912bdSJeffy Chen if (err) 1172d0b912bdSJeffy Chen return err; 1173c68a2921SDaniel Kurtz } 1174c68a2921SDaniel Kurtz 1175c3aa4742SSimon Xue iommu->reset_disabled = device_property_read_bool(dev, 1176c3aa4742SSimon Xue "rockchip,disable-mmu-reset"); 1177c68a2921SDaniel Kurtz 1178f2e3a5f5STomasz Figa iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks); 1179f2e3a5f5STomasz Figa iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks, 1180f2e3a5f5STomasz Figa sizeof(*iommu->clocks), GFP_KERNEL); 1181f2e3a5f5STomasz Figa if (!iommu->clocks) 1182f2e3a5f5STomasz Figa return -ENOMEM; 1183f2e3a5f5STomasz Figa 1184f2e3a5f5STomasz Figa for (i = 0; i < iommu->num_clocks; ++i) 1185f2e3a5f5STomasz Figa iommu->clocks[i].id = rk_iommu_clocks[i]; 1186f2e3a5f5STomasz Figa 11872f8c7f2eSHeiko Stuebner /* 11882f8c7f2eSHeiko Stuebner * iommu clocks should be present for all new devices and devicetrees 11892f8c7f2eSHeiko Stuebner * but there are older devicetrees without clocks out in the wild. 11902f8c7f2eSHeiko Stuebner * So clocks as optional for the time being. 11912f8c7f2eSHeiko Stuebner */ 1192f2e3a5f5STomasz Figa err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); 11932f8c7f2eSHeiko Stuebner if (err == -ENOENT) 11942f8c7f2eSHeiko Stuebner iommu->num_clocks = 0; 11952f8c7f2eSHeiko Stuebner else if (err) 1196c9d9f239SJoerg Roedel return err; 1197c9d9f239SJoerg Roedel 1198f2e3a5f5STomasz Figa err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); 1199f2e3a5f5STomasz Figa if (err) 1200f2e3a5f5STomasz Figa return err; 1201f2e3a5f5STomasz Figa 120257c26957SJeffy Chen iommu->group = iommu_group_alloc(); 120357c26957SJeffy Chen if (IS_ERR(iommu->group)) { 120457c26957SJeffy Chen err = PTR_ERR(iommu->group); 120557c26957SJeffy Chen goto err_unprepare_clocks; 120657c26957SJeffy Chen } 120757c26957SJeffy Chen 1208f2e3a5f5STomasz Figa err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); 1209f2e3a5f5STomasz Figa if (err) 121057c26957SJeffy Chen goto err_put_group; 1211f2e3a5f5STomasz Figa 1212c9d9f239SJoerg Roedel iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); 12135fd577c3SJeffy Chen iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode); 12145fd577c3SJeffy Chen 1215c9d9f239SJoerg Roedel err = iommu_device_register(&iommu->iommu); 12166d9ffaadSJeffy Chen if (err) 1217f2e3a5f5STomasz Figa goto err_remove_sysfs; 1218c9d9f239SJoerg Roedel 12199176a303SJeffy Chen /* 12209176a303SJeffy Chen * Use the first registered IOMMU device for domain to use with DMA 12219176a303SJeffy Chen * API, since a domain might not physically correspond to a single 12229176a303SJeffy Chen * IOMMU device.. 12239176a303SJeffy Chen */ 12249176a303SJeffy Chen if (!dma_dev) 12259176a303SJeffy Chen dma_dev = &pdev->dev; 12269176a303SJeffy Chen 12274d88a8a4SJeffy Chen bus_set_iommu(&platform_bus_type, &rk_iommu_ops); 12284d88a8a4SJeffy Chen 12290f181d3cSJeffy Chen pm_runtime_enable(dev); 12300f181d3cSJeffy Chen 1231f2e3a5f5STomasz Figa return 0; 1232f2e3a5f5STomasz Figa err_remove_sysfs: 1233f2e3a5f5STomasz Figa iommu_device_sysfs_remove(&iommu->iommu); 123457c26957SJeffy Chen err_put_group: 123557c26957SJeffy Chen iommu_group_put(iommu->group); 1236f2e3a5f5STomasz Figa err_unprepare_clocks: 1237f2e3a5f5STomasz Figa clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); 1238c9d9f239SJoerg Roedel return err; 1239c68a2921SDaniel Kurtz } 1240c68a2921SDaniel Kurtz 12411a4e90f2SMarc Zyngier static void rk_iommu_shutdown(struct platform_device *pdev) 12421a4e90f2SMarc Zyngier { 12430f181d3cSJeffy Chen pm_runtime_force_suspend(&pdev->dev); 12440f181d3cSJeffy Chen } 12451a4e90f2SMarc Zyngier 12460f181d3cSJeffy Chen static int __maybe_unused rk_iommu_suspend(struct device *dev) 12470f181d3cSJeffy Chen { 12480f181d3cSJeffy Chen struct rk_iommu *iommu = dev_get_drvdata(dev); 12490f181d3cSJeffy Chen 12500f181d3cSJeffy Chen if (!iommu->domain) 12510f181d3cSJeffy Chen return 0; 12520f181d3cSJeffy Chen 12530f181d3cSJeffy Chen rk_iommu_disable(iommu); 12540f181d3cSJeffy Chen return 0; 12551a4e90f2SMarc Zyngier } 12560f181d3cSJeffy Chen 12570f181d3cSJeffy Chen static int __maybe_unused rk_iommu_resume(struct device *dev) 12580f181d3cSJeffy Chen { 12590f181d3cSJeffy Chen struct rk_iommu *iommu = dev_get_drvdata(dev); 12600f181d3cSJeffy Chen 12610f181d3cSJeffy Chen if (!iommu->domain) 12620f181d3cSJeffy Chen return 0; 12630f181d3cSJeffy Chen 12640f181d3cSJeffy Chen return rk_iommu_enable(iommu); 12651a4e90f2SMarc Zyngier } 12661a4e90f2SMarc Zyngier 12670f181d3cSJeffy Chen static const struct dev_pm_ops rk_iommu_pm_ops = { 12680f181d3cSJeffy Chen SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL) 12690f181d3cSJeffy Chen SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 12700f181d3cSJeffy Chen pm_runtime_force_resume) 12710f181d3cSJeffy Chen }; 12720f181d3cSJeffy Chen 1273c68a2921SDaniel Kurtz static const struct of_device_id rk_iommu_dt_ids[] = { 1274c68a2921SDaniel Kurtz { .compatible = "rockchip,iommu" }, 1275c68a2921SDaniel Kurtz { /* sentinel */ } 1276c68a2921SDaniel Kurtz }; 1277c68a2921SDaniel Kurtz MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); 1278c68a2921SDaniel Kurtz 1279c68a2921SDaniel Kurtz static struct platform_driver rk_iommu_driver = { 1280c68a2921SDaniel Kurtz .probe = rk_iommu_probe, 12811a4e90f2SMarc Zyngier .shutdown = rk_iommu_shutdown, 1282c68a2921SDaniel Kurtz .driver = { 1283c68a2921SDaniel Kurtz .name = "rk_iommu", 1284d9e7eb15SArnd Bergmann .of_match_table = rk_iommu_dt_ids, 12850f181d3cSJeffy Chen .pm = &rk_iommu_pm_ops, 128698b72b94SJeffy Chen .suppress_bind_attrs = true, 1287c68a2921SDaniel Kurtz }, 1288c68a2921SDaniel Kurtz }; 1289c68a2921SDaniel Kurtz 1290c68a2921SDaniel Kurtz static int __init rk_iommu_init(void) 1291c68a2921SDaniel Kurtz { 12929176a303SJeffy Chen return platform_driver_register(&rk_iommu_driver); 1293c68a2921SDaniel Kurtz } 1294c68a2921SDaniel Kurtz subsys_initcall(rk_iommu_init); 1295c68a2921SDaniel Kurtz 12965fd577c3SJeffy Chen IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu"); 12975fd577c3SJeffy Chen 1298c68a2921SDaniel Kurtz MODULE_DESCRIPTION("IOMMU API for Rockchip"); 1299c68a2921SDaniel Kurtz MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); 1300c68a2921SDaniel Kurtz MODULE_ALIAS("platform:rockchip-iommu"); 1301c68a2921SDaniel Kurtz MODULE_LICENSE("GPL v2"); 1302