xref: /linux/drivers/iommu/rockchip-iommu.c (revision 40fa84e10134ef5c892b628e02382349b5db3e0c)
1c68a2921SDaniel Kurtz /*
2c68a2921SDaniel Kurtz  * This program is free software; you can redistribute it and/or modify
3c68a2921SDaniel Kurtz  * it under the terms of the GNU General Public License version 2 as
4c68a2921SDaniel Kurtz  * published by the Free Software Foundation.
5c68a2921SDaniel Kurtz  */
6c68a2921SDaniel Kurtz 
7f2e3a5f5STomasz Figa #include <linux/clk.h>
8c68a2921SDaniel Kurtz #include <linux/compiler.h>
9c68a2921SDaniel Kurtz #include <linux/delay.h>
10c68a2921SDaniel Kurtz #include <linux/device.h>
114f0aba67SShunqian Zheng #include <linux/dma-iommu.h>
12461a6946SJoerg Roedel #include <linux/dma-mapping.h>
13c68a2921SDaniel Kurtz #include <linux/errno.h>
14c68a2921SDaniel Kurtz #include <linux/interrupt.h>
15c68a2921SDaniel Kurtz #include <linux/io.h>
16c68a2921SDaniel Kurtz #include <linux/iommu.h>
170416bf64STomasz Figa #include <linux/iopoll.h>
18c68a2921SDaniel Kurtz #include <linux/list.h>
19c68a2921SDaniel Kurtz #include <linux/mm.h>
20c68a2921SDaniel Kurtz #include <linux/module.h>
21c68a2921SDaniel Kurtz #include <linux/of.h>
225fd577c3SJeffy Chen #include <linux/of_iommu.h>
23c68a2921SDaniel Kurtz #include <linux/of_platform.h>
24c68a2921SDaniel Kurtz #include <linux/platform_device.h>
250f181d3cSJeffy Chen #include <linux/pm_runtime.h>
26c68a2921SDaniel Kurtz #include <linux/slab.h>
27c68a2921SDaniel Kurtz #include <linux/spinlock.h>
28c68a2921SDaniel Kurtz 
29c68a2921SDaniel Kurtz /** MMU register offsets */
30c68a2921SDaniel Kurtz #define RK_MMU_DTE_ADDR		0x00	/* Directory table address */
31c68a2921SDaniel Kurtz #define RK_MMU_STATUS		0x04
32c68a2921SDaniel Kurtz #define RK_MMU_COMMAND		0x08
33c68a2921SDaniel Kurtz #define RK_MMU_PAGE_FAULT_ADDR	0x0C	/* IOVA of last page fault */
34c68a2921SDaniel Kurtz #define RK_MMU_ZAP_ONE_LINE	0x10	/* Shootdown one IOTLB entry */
35c68a2921SDaniel Kurtz #define RK_MMU_INT_RAWSTAT	0x14	/* IRQ status ignoring mask */
36c68a2921SDaniel Kurtz #define RK_MMU_INT_CLEAR	0x18	/* Acknowledge and re-arm irq */
37c68a2921SDaniel Kurtz #define RK_MMU_INT_MASK		0x1C	/* IRQ enable */
38c68a2921SDaniel Kurtz #define RK_MMU_INT_STATUS	0x20	/* IRQ status after masking */
39c68a2921SDaniel Kurtz #define RK_MMU_AUTO_GATING	0x24
40c68a2921SDaniel Kurtz 
41c68a2921SDaniel Kurtz #define DTE_ADDR_DUMMY		0xCAFEBABE
420416bf64STomasz Figa 
430416bf64STomasz Figa #define RK_MMU_POLL_PERIOD_US		100
440416bf64STomasz Figa #define RK_MMU_FORCE_RESET_TIMEOUT_US	100000
450416bf64STomasz Figa #define RK_MMU_POLL_TIMEOUT_US		1000
46c68a2921SDaniel Kurtz 
47c68a2921SDaniel Kurtz /* RK_MMU_STATUS fields */
48c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
49c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
50c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
51c68a2921SDaniel Kurtz #define RK_MMU_STATUS_IDLE                 BIT(3)
52c68a2921SDaniel Kurtz #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
53c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
54c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
55c68a2921SDaniel Kurtz 
56c68a2921SDaniel Kurtz /* RK_MMU_COMMAND command values */
57c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
58c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
59c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
60c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
61c68a2921SDaniel Kurtz #define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
62c68a2921SDaniel Kurtz #define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
63c68a2921SDaniel Kurtz #define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
64c68a2921SDaniel Kurtz 
65c68a2921SDaniel Kurtz /* RK_MMU_INT_* register fields */
66c68a2921SDaniel Kurtz #define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
67c68a2921SDaniel Kurtz #define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
68c68a2921SDaniel Kurtz #define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
69c68a2921SDaniel Kurtz 
70c68a2921SDaniel Kurtz #define NUM_DT_ENTRIES 1024
71c68a2921SDaniel Kurtz #define NUM_PT_ENTRIES 1024
72c68a2921SDaniel Kurtz 
73c68a2921SDaniel Kurtz #define SPAGE_ORDER 12
74c68a2921SDaniel Kurtz #define SPAGE_SIZE (1 << SPAGE_ORDER)
75c68a2921SDaniel Kurtz 
76c68a2921SDaniel Kurtz  /*
77c68a2921SDaniel Kurtz   * Support mapping any size that fits in one page table:
78c68a2921SDaniel Kurtz   *   4 KiB to 4 MiB
79c68a2921SDaniel Kurtz   */
80c68a2921SDaniel Kurtz #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
81c68a2921SDaniel Kurtz 
82c68a2921SDaniel Kurtz struct rk_iommu_domain {
83c68a2921SDaniel Kurtz 	struct list_head iommus;
84c68a2921SDaniel Kurtz 	u32 *dt; /* page directory table */
854f0aba67SShunqian Zheng 	dma_addr_t dt_dma;
86c68a2921SDaniel Kurtz 	spinlock_t iommus_lock; /* lock for iommus list */
87c68a2921SDaniel Kurtz 	spinlock_t dt_lock; /* lock for modifying page directory table */
88bcd516a3SJoerg Roedel 
89bcd516a3SJoerg Roedel 	struct iommu_domain domain;
90c68a2921SDaniel Kurtz };
91c68a2921SDaniel Kurtz 
92f2e3a5f5STomasz Figa /* list of clocks required by IOMMU */
93f2e3a5f5STomasz Figa static const char * const rk_iommu_clocks[] = {
94f2e3a5f5STomasz Figa 	"aclk", "iface",
95f2e3a5f5STomasz Figa };
96f2e3a5f5STomasz Figa 
97c68a2921SDaniel Kurtz struct rk_iommu {
98c68a2921SDaniel Kurtz 	struct device *dev;
99cd6438c5SZhengShunQian 	void __iomem **bases;
100cd6438c5SZhengShunQian 	int num_mmu;
101f2e3a5f5STomasz Figa 	struct clk_bulk_data *clocks;
102f2e3a5f5STomasz Figa 	int num_clocks;
103c3aa4742SSimon Xue 	bool reset_disabled;
104c9d9f239SJoerg Roedel 	struct iommu_device iommu;
105c68a2921SDaniel Kurtz 	struct list_head node; /* entry in rk_iommu_domain.iommus */
106c68a2921SDaniel Kurtz 	struct iommu_domain *domain; /* domain to which iommu is attached */
10757c26957SJeffy Chen 	struct iommu_group *group;
108c68a2921SDaniel Kurtz };
109c68a2921SDaniel Kurtz 
1105fd577c3SJeffy Chen struct rk_iommudata {
1110f181d3cSJeffy Chen 	struct device_link *link; /* runtime PM link from IOMMU to master */
1125fd577c3SJeffy Chen 	struct rk_iommu *iommu;
1135fd577c3SJeffy Chen };
1145fd577c3SJeffy Chen 
1159176a303SJeffy Chen static struct device *dma_dev;
1169176a303SJeffy Chen 
1174f0aba67SShunqian Zheng static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
1184f0aba67SShunqian Zheng 				  unsigned int count)
119c68a2921SDaniel Kurtz {
1204f0aba67SShunqian Zheng 	size_t size = count * sizeof(u32); /* count of u32 entry */
121c68a2921SDaniel Kurtz 
1229176a303SJeffy Chen 	dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
123c68a2921SDaniel Kurtz }
124c68a2921SDaniel Kurtz 
125bcd516a3SJoerg Roedel static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
126bcd516a3SJoerg Roedel {
127bcd516a3SJoerg Roedel 	return container_of(dom, struct rk_iommu_domain, domain);
128bcd516a3SJoerg Roedel }
129bcd516a3SJoerg Roedel 
130c68a2921SDaniel Kurtz /*
131c68a2921SDaniel Kurtz  * The Rockchip rk3288 iommu uses a 2-level page table.
132c68a2921SDaniel Kurtz  * The first level is the "Directory Table" (DT).
133c68a2921SDaniel Kurtz  * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
134c68a2921SDaniel Kurtz  * to a "Page Table".
135c68a2921SDaniel Kurtz  * The second level is the 1024 Page Tables (PT).
136c68a2921SDaniel Kurtz  * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
137c68a2921SDaniel Kurtz  * a 4 KB page of physical memory.
138c68a2921SDaniel Kurtz  *
139c68a2921SDaniel Kurtz  * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
140c68a2921SDaniel Kurtz  * Each iommu device has a MMU_DTE_ADDR register that contains the physical
141c68a2921SDaniel Kurtz  * address of the start of the DT page.
142c68a2921SDaniel Kurtz  *
143c68a2921SDaniel Kurtz  * The structure of the page table is as follows:
144c68a2921SDaniel Kurtz  *
145c68a2921SDaniel Kurtz  *                   DT
146c68a2921SDaniel Kurtz  * MMU_DTE_ADDR -> +-----+
147c68a2921SDaniel Kurtz  *                 |     |
148c68a2921SDaniel Kurtz  *                 +-----+     PT
149c68a2921SDaniel Kurtz  *                 | DTE | -> +-----+
150c68a2921SDaniel Kurtz  *                 +-----+    |     |     Memory
151c68a2921SDaniel Kurtz  *                 |     |    +-----+     Page
152c68a2921SDaniel Kurtz  *                 |     |    | PTE | -> +-----+
153c68a2921SDaniel Kurtz  *                 +-----+    +-----+    |     |
154c68a2921SDaniel Kurtz  *                            |     |    |     |
155c68a2921SDaniel Kurtz  *                            |     |    |     |
156c68a2921SDaniel Kurtz  *                            +-----+    |     |
157c68a2921SDaniel Kurtz  *                                       |     |
158c68a2921SDaniel Kurtz  *                                       |     |
159c68a2921SDaniel Kurtz  *                                       +-----+
160c68a2921SDaniel Kurtz  */
161c68a2921SDaniel Kurtz 
162c68a2921SDaniel Kurtz /*
163c68a2921SDaniel Kurtz  * Each DTE has a PT address and a valid bit:
164c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
165c68a2921SDaniel Kurtz  * | PT address          | Reserved  |V|
166c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
167c68a2921SDaniel Kurtz  *  31:12 - PT address (PTs always starts on a 4 KB boundary)
168c68a2921SDaniel Kurtz  *  11: 1 - Reserved
169c68a2921SDaniel Kurtz  *      0 - 1 if PT @ PT address is valid
170c68a2921SDaniel Kurtz  */
171c68a2921SDaniel Kurtz #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
172c68a2921SDaniel Kurtz #define RK_DTE_PT_VALID           BIT(0)
173c68a2921SDaniel Kurtz 
174c68a2921SDaniel Kurtz static inline phys_addr_t rk_dte_pt_address(u32 dte)
175c68a2921SDaniel Kurtz {
176c68a2921SDaniel Kurtz 	return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
177c68a2921SDaniel Kurtz }
178c68a2921SDaniel Kurtz 
179c68a2921SDaniel Kurtz static inline bool rk_dte_is_pt_valid(u32 dte)
180c68a2921SDaniel Kurtz {
181c68a2921SDaniel Kurtz 	return dte & RK_DTE_PT_VALID;
182c68a2921SDaniel Kurtz }
183c68a2921SDaniel Kurtz 
1844f0aba67SShunqian Zheng static inline u32 rk_mk_dte(dma_addr_t pt_dma)
185c68a2921SDaniel Kurtz {
1864f0aba67SShunqian Zheng 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
187c68a2921SDaniel Kurtz }
188c68a2921SDaniel Kurtz 
189c68a2921SDaniel Kurtz /*
190c68a2921SDaniel Kurtz  * Each PTE has a Page address, some flags and a valid bit:
191c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
192c68a2921SDaniel Kurtz  * | Page address        |Rsv| Flags |V|
193c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
194c68a2921SDaniel Kurtz  *  31:12 - Page address (Pages always start on a 4 KB boundary)
195c68a2921SDaniel Kurtz  *  11: 9 - Reserved
196c68a2921SDaniel Kurtz  *   8: 1 - Flags
197c68a2921SDaniel Kurtz  *      8 - Read allocate - allocate cache space on read misses
198c68a2921SDaniel Kurtz  *      7 - Read cache - enable cache & prefetch of data
199c68a2921SDaniel Kurtz  *      6 - Write buffer - enable delaying writes on their way to memory
200c68a2921SDaniel Kurtz  *      5 - Write allocate - allocate cache space on write misses
201c68a2921SDaniel Kurtz  *      4 - Write cache - different writes can be merged together
202c68a2921SDaniel Kurtz  *      3 - Override cache attributes
203c68a2921SDaniel Kurtz  *          if 1, bits 4-8 control cache attributes
204c68a2921SDaniel Kurtz  *          if 0, the system bus defaults are used
205c68a2921SDaniel Kurtz  *      2 - Writable
206c68a2921SDaniel Kurtz  *      1 - Readable
207c68a2921SDaniel Kurtz  *      0 - 1 if Page @ Page address is valid
208c68a2921SDaniel Kurtz  */
209c68a2921SDaniel Kurtz #define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
210c68a2921SDaniel Kurtz #define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
211c68a2921SDaniel Kurtz #define RK_PTE_PAGE_WRITABLE      BIT(2)
212c68a2921SDaniel Kurtz #define RK_PTE_PAGE_READABLE      BIT(1)
213c68a2921SDaniel Kurtz #define RK_PTE_PAGE_VALID         BIT(0)
214c68a2921SDaniel Kurtz 
215c68a2921SDaniel Kurtz static inline phys_addr_t rk_pte_page_address(u32 pte)
216c68a2921SDaniel Kurtz {
217c68a2921SDaniel Kurtz 	return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
218c68a2921SDaniel Kurtz }
219c68a2921SDaniel Kurtz 
220c68a2921SDaniel Kurtz static inline bool rk_pte_is_page_valid(u32 pte)
221c68a2921SDaniel Kurtz {
222c68a2921SDaniel Kurtz 	return pte & RK_PTE_PAGE_VALID;
223c68a2921SDaniel Kurtz }
224c68a2921SDaniel Kurtz 
225c68a2921SDaniel Kurtz /* TODO: set cache flags per prot IOMMU_CACHE */
226c68a2921SDaniel Kurtz static u32 rk_mk_pte(phys_addr_t page, int prot)
227c68a2921SDaniel Kurtz {
228c68a2921SDaniel Kurtz 	u32 flags = 0;
229c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
230c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
231c68a2921SDaniel Kurtz 	page &= RK_PTE_PAGE_ADDRESS_MASK;
232c68a2921SDaniel Kurtz 	return page | flags | RK_PTE_PAGE_VALID;
233c68a2921SDaniel Kurtz }
234c68a2921SDaniel Kurtz 
235c68a2921SDaniel Kurtz static u32 rk_mk_pte_invalid(u32 pte)
236c68a2921SDaniel Kurtz {
237c68a2921SDaniel Kurtz 	return pte & ~RK_PTE_PAGE_VALID;
238c68a2921SDaniel Kurtz }
239c68a2921SDaniel Kurtz 
240c68a2921SDaniel Kurtz /*
241c68a2921SDaniel Kurtz  * rk3288 iova (IOMMU Virtual Address) format
242c68a2921SDaniel Kurtz  *  31       22.21       12.11          0
243c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
244c68a2921SDaniel Kurtz  * | DTE index | PTE index | Page offset |
245c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
246c68a2921SDaniel Kurtz  *  31:22 - DTE index   - index of DTE in DT
247c68a2921SDaniel Kurtz  *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
248c68a2921SDaniel Kurtz  *  11: 0 - Page offset - offset into page @ PTE.page_address
249c68a2921SDaniel Kurtz  */
250c68a2921SDaniel Kurtz #define RK_IOVA_DTE_MASK    0xffc00000
251c68a2921SDaniel Kurtz #define RK_IOVA_DTE_SHIFT   22
252c68a2921SDaniel Kurtz #define RK_IOVA_PTE_MASK    0x003ff000
253c68a2921SDaniel Kurtz #define RK_IOVA_PTE_SHIFT   12
254c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_MASK   0x00000fff
255c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_SHIFT  0
256c68a2921SDaniel Kurtz 
257c68a2921SDaniel Kurtz static u32 rk_iova_dte_index(dma_addr_t iova)
258c68a2921SDaniel Kurtz {
259c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
260c68a2921SDaniel Kurtz }
261c68a2921SDaniel Kurtz 
262c68a2921SDaniel Kurtz static u32 rk_iova_pte_index(dma_addr_t iova)
263c68a2921SDaniel Kurtz {
264c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
265c68a2921SDaniel Kurtz }
266c68a2921SDaniel Kurtz 
267c68a2921SDaniel Kurtz static u32 rk_iova_page_offset(dma_addr_t iova)
268c68a2921SDaniel Kurtz {
269c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
270c68a2921SDaniel Kurtz }
271c68a2921SDaniel Kurtz 
272cd6438c5SZhengShunQian static u32 rk_iommu_read(void __iomem *base, u32 offset)
273c68a2921SDaniel Kurtz {
274cd6438c5SZhengShunQian 	return readl(base + offset);
275c68a2921SDaniel Kurtz }
276c68a2921SDaniel Kurtz 
277cd6438c5SZhengShunQian static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
278c68a2921SDaniel Kurtz {
279cd6438c5SZhengShunQian 	writel(value, base + offset);
280c68a2921SDaniel Kurtz }
281c68a2921SDaniel Kurtz 
282c68a2921SDaniel Kurtz static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
283c68a2921SDaniel Kurtz {
284cd6438c5SZhengShunQian 	int i;
285cd6438c5SZhengShunQian 
286cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
287cd6438c5SZhengShunQian 		writel(command, iommu->bases[i] + RK_MMU_COMMAND);
288c68a2921SDaniel Kurtz }
289c68a2921SDaniel Kurtz 
290cd6438c5SZhengShunQian static void rk_iommu_base_command(void __iomem *base, u32 command)
291cd6438c5SZhengShunQian {
292cd6438c5SZhengShunQian 	writel(command, base + RK_MMU_COMMAND);
293cd6438c5SZhengShunQian }
294bf2a5e71STomasz Figa static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
295c68a2921SDaniel Kurtz 			       size_t size)
296c68a2921SDaniel Kurtz {
297cd6438c5SZhengShunQian 	int i;
298bf2a5e71STomasz Figa 	dma_addr_t iova_end = iova_start + size;
299c68a2921SDaniel Kurtz 	/*
300c68a2921SDaniel Kurtz 	 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
301c68a2921SDaniel Kurtz 	 * entire iotlb rather than iterate over individual iovas.
302c68a2921SDaniel Kurtz 	 */
303bf2a5e71STomasz Figa 	for (i = 0; i < iommu->num_mmu; i++) {
304bf2a5e71STomasz Figa 		dma_addr_t iova;
305bf2a5e71STomasz Figa 
306bf2a5e71STomasz Figa 		for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
307cd6438c5SZhengShunQian 			rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
308c68a2921SDaniel Kurtz 	}
309bf2a5e71STomasz Figa }
310c68a2921SDaniel Kurtz 
311c68a2921SDaniel Kurtz static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
312c68a2921SDaniel Kurtz {
313cd6438c5SZhengShunQian 	bool active = true;
314cd6438c5SZhengShunQian 	int i;
315cd6438c5SZhengShunQian 
316cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
317fbedd9b9SJohn Keeping 		active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
318fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_STALL_ACTIVE);
319cd6438c5SZhengShunQian 
320cd6438c5SZhengShunQian 	return active;
321c68a2921SDaniel Kurtz }
322c68a2921SDaniel Kurtz 
323c68a2921SDaniel Kurtz static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
324c68a2921SDaniel Kurtz {
325cd6438c5SZhengShunQian 	bool enable = true;
326cd6438c5SZhengShunQian 	int i;
327cd6438c5SZhengShunQian 
328cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
329fbedd9b9SJohn Keeping 		enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
330fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_PAGING_ENABLED);
331cd6438c5SZhengShunQian 
332cd6438c5SZhengShunQian 	return enable;
333c68a2921SDaniel Kurtz }
334c68a2921SDaniel Kurtz 
3350416bf64STomasz Figa static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
3360416bf64STomasz Figa {
3370416bf64STomasz Figa 	bool done = true;
3380416bf64STomasz Figa 	int i;
3390416bf64STomasz Figa 
3400416bf64STomasz Figa 	for (i = 0; i < iommu->num_mmu; i++)
3410416bf64STomasz Figa 		done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
3420416bf64STomasz Figa 
3430416bf64STomasz Figa 	return done;
3440416bf64STomasz Figa }
3450416bf64STomasz Figa 
346c68a2921SDaniel Kurtz static int rk_iommu_enable_stall(struct rk_iommu *iommu)
347c68a2921SDaniel Kurtz {
348cd6438c5SZhengShunQian 	int ret, i;
3490416bf64STomasz Figa 	bool val;
350c68a2921SDaniel Kurtz 
351c68a2921SDaniel Kurtz 	if (rk_iommu_is_stall_active(iommu))
352c68a2921SDaniel Kurtz 		return 0;
353c68a2921SDaniel Kurtz 
354c68a2921SDaniel Kurtz 	/* Stall can only be enabled if paging is enabled */
355c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
356c68a2921SDaniel Kurtz 		return 0;
357c68a2921SDaniel Kurtz 
358c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
359c68a2921SDaniel Kurtz 
3600416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
3610416bf64STomasz Figa 				 val, RK_MMU_POLL_PERIOD_US,
3620416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
363c68a2921SDaniel Kurtz 	if (ret)
364cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
365c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
366cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
367c68a2921SDaniel Kurtz 
368c68a2921SDaniel Kurtz 	return ret;
369c68a2921SDaniel Kurtz }
370c68a2921SDaniel Kurtz 
371c68a2921SDaniel Kurtz static int rk_iommu_disable_stall(struct rk_iommu *iommu)
372c68a2921SDaniel Kurtz {
373cd6438c5SZhengShunQian 	int ret, i;
3740416bf64STomasz Figa 	bool val;
375c68a2921SDaniel Kurtz 
376c68a2921SDaniel Kurtz 	if (!rk_iommu_is_stall_active(iommu))
377c68a2921SDaniel Kurtz 		return 0;
378c68a2921SDaniel Kurtz 
379c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
380c68a2921SDaniel Kurtz 
3810416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
3820416bf64STomasz Figa 				 !val, RK_MMU_POLL_PERIOD_US,
3830416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
384c68a2921SDaniel Kurtz 	if (ret)
385cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
386c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
387cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
388c68a2921SDaniel Kurtz 
389c68a2921SDaniel Kurtz 	return ret;
390c68a2921SDaniel Kurtz }
391c68a2921SDaniel Kurtz 
392c68a2921SDaniel Kurtz static int rk_iommu_enable_paging(struct rk_iommu *iommu)
393c68a2921SDaniel Kurtz {
394cd6438c5SZhengShunQian 	int ret, i;
3950416bf64STomasz Figa 	bool val;
396c68a2921SDaniel Kurtz 
397c68a2921SDaniel Kurtz 	if (rk_iommu_is_paging_enabled(iommu))
398c68a2921SDaniel Kurtz 		return 0;
399c68a2921SDaniel Kurtz 
400c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
401c68a2921SDaniel Kurtz 
4020416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
4030416bf64STomasz Figa 				 val, RK_MMU_POLL_PERIOD_US,
4040416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
405c68a2921SDaniel Kurtz 	if (ret)
406cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
407c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
408cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
409c68a2921SDaniel Kurtz 
410c68a2921SDaniel Kurtz 	return ret;
411c68a2921SDaniel Kurtz }
412c68a2921SDaniel Kurtz 
413c68a2921SDaniel Kurtz static int rk_iommu_disable_paging(struct rk_iommu *iommu)
414c68a2921SDaniel Kurtz {
415cd6438c5SZhengShunQian 	int ret, i;
4160416bf64STomasz Figa 	bool val;
417c68a2921SDaniel Kurtz 
418c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
419c68a2921SDaniel Kurtz 		return 0;
420c68a2921SDaniel Kurtz 
421c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
422c68a2921SDaniel Kurtz 
4230416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
4240416bf64STomasz Figa 				 !val, RK_MMU_POLL_PERIOD_US,
4250416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
426c68a2921SDaniel Kurtz 	if (ret)
427cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
428c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
429cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
430c68a2921SDaniel Kurtz 
431c68a2921SDaniel Kurtz 	return ret;
432c68a2921SDaniel Kurtz }
433c68a2921SDaniel Kurtz 
434c68a2921SDaniel Kurtz static int rk_iommu_force_reset(struct rk_iommu *iommu)
435c68a2921SDaniel Kurtz {
436cd6438c5SZhengShunQian 	int ret, i;
437c68a2921SDaniel Kurtz 	u32 dte_addr;
4380416bf64STomasz Figa 	bool val;
439c68a2921SDaniel Kurtz 
440c3aa4742SSimon Xue 	if (iommu->reset_disabled)
441c3aa4742SSimon Xue 		return 0;
442c3aa4742SSimon Xue 
443c68a2921SDaniel Kurtz 	/*
444c68a2921SDaniel Kurtz 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
445c68a2921SDaniel Kurtz 	 * and verifying that upper 5 nybbles are read back.
446c68a2921SDaniel Kurtz 	 */
447cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
448cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
449c68a2921SDaniel Kurtz 
450cd6438c5SZhengShunQian 		dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
451c68a2921SDaniel Kurtz 		if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
452c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
453c68a2921SDaniel Kurtz 			return -EFAULT;
454c68a2921SDaniel Kurtz 		}
455cd6438c5SZhengShunQian 	}
456c68a2921SDaniel Kurtz 
457c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
458c68a2921SDaniel Kurtz 
4590416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
4600416bf64STomasz Figa 				 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
4610416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
462cd6438c5SZhengShunQian 	if (ret) {
463c68a2921SDaniel Kurtz 		dev_err(iommu->dev, "FORCE_RESET command timed out\n");
464c68a2921SDaniel Kurtz 		return ret;
465c68a2921SDaniel Kurtz 	}
466c68a2921SDaniel Kurtz 
467cd6438c5SZhengShunQian 	return 0;
468cd6438c5SZhengShunQian }
469cd6438c5SZhengShunQian 
470cd6438c5SZhengShunQian static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
471c68a2921SDaniel Kurtz {
472cd6438c5SZhengShunQian 	void __iomem *base = iommu->bases[index];
473c68a2921SDaniel Kurtz 	u32 dte_index, pte_index, page_offset;
474c68a2921SDaniel Kurtz 	u32 mmu_dte_addr;
475c68a2921SDaniel Kurtz 	phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
476c68a2921SDaniel Kurtz 	u32 *dte_addr;
477c68a2921SDaniel Kurtz 	u32 dte;
478c68a2921SDaniel Kurtz 	phys_addr_t pte_addr_phys = 0;
479c68a2921SDaniel Kurtz 	u32 *pte_addr = NULL;
480c68a2921SDaniel Kurtz 	u32 pte = 0;
481c68a2921SDaniel Kurtz 	phys_addr_t page_addr_phys = 0;
482c68a2921SDaniel Kurtz 	u32 page_flags = 0;
483c68a2921SDaniel Kurtz 
484c68a2921SDaniel Kurtz 	dte_index = rk_iova_dte_index(iova);
485c68a2921SDaniel Kurtz 	pte_index = rk_iova_pte_index(iova);
486c68a2921SDaniel Kurtz 	page_offset = rk_iova_page_offset(iova);
487c68a2921SDaniel Kurtz 
488cd6438c5SZhengShunQian 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
489c68a2921SDaniel Kurtz 	mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
490c68a2921SDaniel Kurtz 
491c68a2921SDaniel Kurtz 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
492c68a2921SDaniel Kurtz 	dte_addr = phys_to_virt(dte_addr_phys);
493c68a2921SDaniel Kurtz 	dte = *dte_addr;
494c68a2921SDaniel Kurtz 
495c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
496c68a2921SDaniel Kurtz 		goto print_it;
497c68a2921SDaniel Kurtz 
498c68a2921SDaniel Kurtz 	pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
499c68a2921SDaniel Kurtz 	pte_addr = phys_to_virt(pte_addr_phys);
500c68a2921SDaniel Kurtz 	pte = *pte_addr;
501c68a2921SDaniel Kurtz 
502c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
503c68a2921SDaniel Kurtz 		goto print_it;
504c68a2921SDaniel Kurtz 
505c68a2921SDaniel Kurtz 	page_addr_phys = rk_pte_page_address(pte) + page_offset;
506c68a2921SDaniel Kurtz 	page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
507c68a2921SDaniel Kurtz 
508c68a2921SDaniel Kurtz print_it:
509c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
510c68a2921SDaniel Kurtz 		&iova, dte_index, pte_index, page_offset);
511c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
512c68a2921SDaniel Kurtz 		&mmu_dte_addr_phys, &dte_addr_phys, dte,
513c68a2921SDaniel Kurtz 		rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
514c68a2921SDaniel Kurtz 		rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
515c68a2921SDaniel Kurtz }
516c68a2921SDaniel Kurtz 
517c68a2921SDaniel Kurtz static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
518c68a2921SDaniel Kurtz {
519c68a2921SDaniel Kurtz 	struct rk_iommu *iommu = dev_id;
520c68a2921SDaniel Kurtz 	u32 status;
521c68a2921SDaniel Kurtz 	u32 int_status;
522c68a2921SDaniel Kurtz 	dma_addr_t iova;
523cd6438c5SZhengShunQian 	irqreturn_t ret = IRQ_NONE;
524cd6438c5SZhengShunQian 	int i;
525c68a2921SDaniel Kurtz 
5260f181d3cSJeffy Chen 	if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
5270f181d3cSJeffy Chen 		return 0;
5280f181d3cSJeffy Chen 
5290f181d3cSJeffy Chen 	if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
5300f181d3cSJeffy Chen 		goto out;
531f2e3a5f5STomasz Figa 
532cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
533cd6438c5SZhengShunQian 		int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
534c68a2921SDaniel Kurtz 		if (int_status == 0)
535cd6438c5SZhengShunQian 			continue;
536c68a2921SDaniel Kurtz 
537cd6438c5SZhengShunQian 		ret = IRQ_HANDLED;
538cd6438c5SZhengShunQian 		iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
539c68a2921SDaniel Kurtz 
540c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
541c68a2921SDaniel Kurtz 			int flags;
542c68a2921SDaniel Kurtz 
543cd6438c5SZhengShunQian 			status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
544c68a2921SDaniel Kurtz 			flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
545c68a2921SDaniel Kurtz 					IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
546c68a2921SDaniel Kurtz 
547c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Page fault at %pad of type %s\n",
548c68a2921SDaniel Kurtz 				&iova,
549c68a2921SDaniel Kurtz 				(flags == IOMMU_FAULT_WRITE) ? "write" : "read");
550c68a2921SDaniel Kurtz 
551cd6438c5SZhengShunQian 			log_iova(iommu, i, iova);
552c68a2921SDaniel Kurtz 
553c68a2921SDaniel Kurtz 			/*
554c68a2921SDaniel Kurtz 			 * Report page fault to any installed handlers.
555c68a2921SDaniel Kurtz 			 * Ignore the return code, though, since we always zap cache
556c68a2921SDaniel Kurtz 			 * and clear the page fault anyway.
557c68a2921SDaniel Kurtz 			 */
558c68a2921SDaniel Kurtz 			if (iommu->domain)
559c68a2921SDaniel Kurtz 				report_iommu_fault(iommu->domain, iommu->dev, iova,
560c68a2921SDaniel Kurtz 						   flags);
561c68a2921SDaniel Kurtz 			else
562c68a2921SDaniel Kurtz 				dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
563c68a2921SDaniel Kurtz 
564cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
565cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
566c68a2921SDaniel Kurtz 		}
567c68a2921SDaniel Kurtz 
568c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_BUS_ERROR)
569c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
570c68a2921SDaniel Kurtz 
571c68a2921SDaniel Kurtz 		if (int_status & ~RK_MMU_IRQ_MASK)
572c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "unexpected int_status: %#08x\n",
573c68a2921SDaniel Kurtz 				int_status);
574c68a2921SDaniel Kurtz 
575cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
576cd6438c5SZhengShunQian 	}
577c68a2921SDaniel Kurtz 
578f2e3a5f5STomasz Figa 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
579f2e3a5f5STomasz Figa 
5800f181d3cSJeffy Chen out:
5810f181d3cSJeffy Chen 	pm_runtime_put(iommu->dev);
582cd6438c5SZhengShunQian 	return ret;
583c68a2921SDaniel Kurtz }
584c68a2921SDaniel Kurtz 
585c68a2921SDaniel Kurtz static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
586c68a2921SDaniel Kurtz 					 dma_addr_t iova)
587c68a2921SDaniel Kurtz {
588bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
589c68a2921SDaniel Kurtz 	unsigned long flags;
590c68a2921SDaniel Kurtz 	phys_addr_t pt_phys, phys = 0;
591c68a2921SDaniel Kurtz 	u32 dte, pte;
592c68a2921SDaniel Kurtz 	u32 *page_table;
593c68a2921SDaniel Kurtz 
594c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
595c68a2921SDaniel Kurtz 
596c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
597c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
598c68a2921SDaniel Kurtz 		goto out;
599c68a2921SDaniel Kurtz 
600c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
601c68a2921SDaniel Kurtz 	page_table = (u32 *)phys_to_virt(pt_phys);
602c68a2921SDaniel Kurtz 	pte = page_table[rk_iova_pte_index(iova)];
603c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
604c68a2921SDaniel Kurtz 		goto out;
605c68a2921SDaniel Kurtz 
606c68a2921SDaniel Kurtz 	phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
607c68a2921SDaniel Kurtz out:
608c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
609c68a2921SDaniel Kurtz 
610c68a2921SDaniel Kurtz 	return phys;
611c68a2921SDaniel Kurtz }
612c68a2921SDaniel Kurtz 
613c68a2921SDaniel Kurtz static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
614c68a2921SDaniel Kurtz 			      dma_addr_t iova, size_t size)
615c68a2921SDaniel Kurtz {
616c68a2921SDaniel Kurtz 	struct list_head *pos;
617c68a2921SDaniel Kurtz 	unsigned long flags;
618c68a2921SDaniel Kurtz 
619c68a2921SDaniel Kurtz 	/* shootdown these iova from all iommus using this domain */
620c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
621c68a2921SDaniel Kurtz 	list_for_each(pos, &rk_domain->iommus) {
622c68a2921SDaniel Kurtz 		struct rk_iommu *iommu;
6230f181d3cSJeffy Chen 
624c68a2921SDaniel Kurtz 		iommu = list_entry(pos, struct rk_iommu, node);
6250f181d3cSJeffy Chen 
6260f181d3cSJeffy Chen 		/* Only zap TLBs of IOMMUs that are powered on. */
6270f181d3cSJeffy Chen 		if (pm_runtime_get_if_in_use(iommu->dev)) {
6280f181d3cSJeffy Chen 			WARN_ON(clk_bulk_enable(iommu->num_clocks,
6290f181d3cSJeffy Chen 						iommu->clocks));
630c68a2921SDaniel Kurtz 			rk_iommu_zap_lines(iommu, iova, size);
631f2e3a5f5STomasz Figa 			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
6320f181d3cSJeffy Chen 			pm_runtime_put(iommu->dev);
6330f181d3cSJeffy Chen 		}
634c68a2921SDaniel Kurtz 	}
635c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
636c68a2921SDaniel Kurtz }
637c68a2921SDaniel Kurtz 
638d4dd920cSTomasz Figa static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
639d4dd920cSTomasz Figa 					 dma_addr_t iova, size_t size)
640d4dd920cSTomasz Figa {
641d4dd920cSTomasz Figa 	rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
642d4dd920cSTomasz Figa 	if (size > SPAGE_SIZE)
643d4dd920cSTomasz Figa 		rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
644d4dd920cSTomasz Figa 					SPAGE_SIZE);
645d4dd920cSTomasz Figa }
646d4dd920cSTomasz Figa 
647c68a2921SDaniel Kurtz static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
648c68a2921SDaniel Kurtz 				  dma_addr_t iova)
649c68a2921SDaniel Kurtz {
650c68a2921SDaniel Kurtz 	u32 *page_table, *dte_addr;
6514f0aba67SShunqian Zheng 	u32 dte_index, dte;
652c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
6534f0aba67SShunqian Zheng 	dma_addr_t pt_dma;
654c68a2921SDaniel Kurtz 
655c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
656c68a2921SDaniel Kurtz 
6574f0aba67SShunqian Zheng 	dte_index = rk_iova_dte_index(iova);
6584f0aba67SShunqian Zheng 	dte_addr = &rk_domain->dt[dte_index];
659c68a2921SDaniel Kurtz 	dte = *dte_addr;
660c68a2921SDaniel Kurtz 	if (rk_dte_is_pt_valid(dte))
661c68a2921SDaniel Kurtz 		goto done;
662c68a2921SDaniel Kurtz 
663c68a2921SDaniel Kurtz 	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
664c68a2921SDaniel Kurtz 	if (!page_table)
665c68a2921SDaniel Kurtz 		return ERR_PTR(-ENOMEM);
666c68a2921SDaniel Kurtz 
6679176a303SJeffy Chen 	pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
6689176a303SJeffy Chen 	if (dma_mapping_error(dma_dev, pt_dma)) {
6699176a303SJeffy Chen 		dev_err(dma_dev, "DMA mapping error while allocating page table\n");
6704f0aba67SShunqian Zheng 		free_page((unsigned long)page_table);
6714f0aba67SShunqian Zheng 		return ERR_PTR(-ENOMEM);
6724f0aba67SShunqian Zheng 	}
6734f0aba67SShunqian Zheng 
6744f0aba67SShunqian Zheng 	dte = rk_mk_dte(pt_dma);
675c68a2921SDaniel Kurtz 	*dte_addr = dte;
676c68a2921SDaniel Kurtz 
6774f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
6784f0aba67SShunqian Zheng 	rk_table_flush(rk_domain,
6794f0aba67SShunqian Zheng 		       rk_domain->dt_dma + dte_index * sizeof(u32), 1);
680c68a2921SDaniel Kurtz done:
681c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
682c68a2921SDaniel Kurtz 	return (u32 *)phys_to_virt(pt_phys);
683c68a2921SDaniel Kurtz }
684c68a2921SDaniel Kurtz 
685c68a2921SDaniel Kurtz static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
6864f0aba67SShunqian Zheng 				  u32 *pte_addr, dma_addr_t pte_dma,
6874f0aba67SShunqian Zheng 				  size_t size)
688c68a2921SDaniel Kurtz {
689c68a2921SDaniel Kurtz 	unsigned int pte_count;
690c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
691c68a2921SDaniel Kurtz 
692c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
693c68a2921SDaniel Kurtz 
694c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
695c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
696c68a2921SDaniel Kurtz 		if (!rk_pte_is_page_valid(pte))
697c68a2921SDaniel Kurtz 			break;
698c68a2921SDaniel Kurtz 
699c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte_invalid(pte);
700c68a2921SDaniel Kurtz 	}
701c68a2921SDaniel Kurtz 
7024f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_count);
703c68a2921SDaniel Kurtz 
704c68a2921SDaniel Kurtz 	return pte_count * SPAGE_SIZE;
705c68a2921SDaniel Kurtz }
706c68a2921SDaniel Kurtz 
707c68a2921SDaniel Kurtz static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
7084f0aba67SShunqian Zheng 			     dma_addr_t pte_dma, dma_addr_t iova,
7094f0aba67SShunqian Zheng 			     phys_addr_t paddr, size_t size, int prot)
710c68a2921SDaniel Kurtz {
711c68a2921SDaniel Kurtz 	unsigned int pte_count;
712c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
713c68a2921SDaniel Kurtz 	phys_addr_t page_phys;
714c68a2921SDaniel Kurtz 
715c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
716c68a2921SDaniel Kurtz 
717c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
718c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
719c68a2921SDaniel Kurtz 
720c68a2921SDaniel Kurtz 		if (rk_pte_is_page_valid(pte))
721c68a2921SDaniel Kurtz 			goto unwind;
722c68a2921SDaniel Kurtz 
723c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte(paddr, prot);
724c68a2921SDaniel Kurtz 
725c68a2921SDaniel Kurtz 		paddr += SPAGE_SIZE;
726c68a2921SDaniel Kurtz 	}
727c68a2921SDaniel Kurtz 
7284f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_total);
729c68a2921SDaniel Kurtz 
730d4dd920cSTomasz Figa 	/*
731d4dd920cSTomasz Figa 	 * Zap the first and last iova to evict from iotlb any previously
732d4dd920cSTomasz Figa 	 * mapped cachelines holding stale values for its dte and pte.
733d4dd920cSTomasz Figa 	 * We only zap the first and last iova, since only they could have
734d4dd920cSTomasz Figa 	 * dte or pte shared with an existing mapping.
735d4dd920cSTomasz Figa 	 */
736d4dd920cSTomasz Figa 	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
737d4dd920cSTomasz Figa 
738c68a2921SDaniel Kurtz 	return 0;
739c68a2921SDaniel Kurtz unwind:
740c68a2921SDaniel Kurtz 	/* Unmap the range of iovas that we just mapped */
7414f0aba67SShunqian Zheng 	rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
7424f0aba67SShunqian Zheng 			    pte_count * SPAGE_SIZE);
743c68a2921SDaniel Kurtz 
744c68a2921SDaniel Kurtz 	iova += pte_count * SPAGE_SIZE;
745c68a2921SDaniel Kurtz 	page_phys = rk_pte_page_address(pte_addr[pte_count]);
746c68a2921SDaniel Kurtz 	pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
747c68a2921SDaniel Kurtz 	       &iova, &page_phys, &paddr, prot);
748c68a2921SDaniel Kurtz 
749c68a2921SDaniel Kurtz 	return -EADDRINUSE;
750c68a2921SDaniel Kurtz }
751c68a2921SDaniel Kurtz 
752c68a2921SDaniel Kurtz static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
753c68a2921SDaniel Kurtz 			phys_addr_t paddr, size_t size, int prot)
754c68a2921SDaniel Kurtz {
755bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
756c68a2921SDaniel Kurtz 	unsigned long flags;
7574f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
758c68a2921SDaniel Kurtz 	u32 *page_table, *pte_addr;
7594f0aba67SShunqian Zheng 	u32 dte_index, pte_index;
760c68a2921SDaniel Kurtz 	int ret;
761c68a2921SDaniel Kurtz 
762c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
763c68a2921SDaniel Kurtz 
764c68a2921SDaniel Kurtz 	/*
765c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
766c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
767c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
768c68a2921SDaniel Kurtz 	 * Since iommu_map() guarantees that both iova and size will be
769c68a2921SDaniel Kurtz 	 * aligned, we will always only be mapping from a single dte here.
770c68a2921SDaniel Kurtz 	 */
771c68a2921SDaniel Kurtz 	page_table = rk_dte_get_page_table(rk_domain, iova);
772c68a2921SDaniel Kurtz 	if (IS_ERR(page_table)) {
773c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
774c68a2921SDaniel Kurtz 		return PTR_ERR(page_table);
775c68a2921SDaniel Kurtz 	}
776c68a2921SDaniel Kurtz 
7774f0aba67SShunqian Zheng 	dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
7784f0aba67SShunqian Zheng 	pte_index = rk_iova_pte_index(iova);
7794f0aba67SShunqian Zheng 	pte_addr = &page_table[pte_index];
7804f0aba67SShunqian Zheng 	pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
7814f0aba67SShunqian Zheng 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
7824f0aba67SShunqian Zheng 				paddr, size, prot);
7834f0aba67SShunqian Zheng 
784c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
785c68a2921SDaniel Kurtz 
786c68a2921SDaniel Kurtz 	return ret;
787c68a2921SDaniel Kurtz }
788c68a2921SDaniel Kurtz 
789c68a2921SDaniel Kurtz static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
790c68a2921SDaniel Kurtz 			     size_t size)
791c68a2921SDaniel Kurtz {
792bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
793c68a2921SDaniel Kurtz 	unsigned long flags;
7944f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
795c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
796c68a2921SDaniel Kurtz 	u32 dte;
797c68a2921SDaniel Kurtz 	u32 *pte_addr;
798c68a2921SDaniel Kurtz 	size_t unmap_size;
799c68a2921SDaniel Kurtz 
800c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
801c68a2921SDaniel Kurtz 
802c68a2921SDaniel Kurtz 	/*
803c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
804c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
805c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
806c68a2921SDaniel Kurtz 	 * Since iommu_unmap() guarantees that both iova and size will be
807c68a2921SDaniel Kurtz 	 * aligned, we will always only be unmapping from a single dte here.
808c68a2921SDaniel Kurtz 	 */
809c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
810c68a2921SDaniel Kurtz 	/* Just return 0 if iova is unmapped */
811c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte)) {
812c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
813c68a2921SDaniel Kurtz 		return 0;
814c68a2921SDaniel Kurtz 	}
815c68a2921SDaniel Kurtz 
816c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
817c68a2921SDaniel Kurtz 	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
8184f0aba67SShunqian Zheng 	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
8194f0aba67SShunqian Zheng 	unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
820c68a2921SDaniel Kurtz 
821c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
822c68a2921SDaniel Kurtz 
823c68a2921SDaniel Kurtz 	/* Shootdown iotlb entries for iova range that was just unmapped */
824c68a2921SDaniel Kurtz 	rk_iommu_zap_iova(rk_domain, iova, unmap_size);
825c68a2921SDaniel Kurtz 
826c68a2921SDaniel Kurtz 	return unmap_size;
827c68a2921SDaniel Kurtz }
828c68a2921SDaniel Kurtz 
829c68a2921SDaniel Kurtz static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
830c68a2921SDaniel Kurtz {
8315fd577c3SJeffy Chen 	struct rk_iommudata *data = dev->archdata.iommu;
832c68a2921SDaniel Kurtz 
8335fd577c3SJeffy Chen 	return data ? data->iommu : NULL;
834c68a2921SDaniel Kurtz }
835c68a2921SDaniel Kurtz 
8360f181d3cSJeffy Chen /* Must be called with iommu powered on and attached */
8370f181d3cSJeffy Chen static void rk_iommu_disable(struct rk_iommu *iommu)
838c68a2921SDaniel Kurtz {
8390f181d3cSJeffy Chen 	int i;
840c68a2921SDaniel Kurtz 
8410f181d3cSJeffy Chen 	/* Ignore error while disabling, just keep going */
8420f181d3cSJeffy Chen 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
8430f181d3cSJeffy Chen 	rk_iommu_enable_stall(iommu);
8440f181d3cSJeffy Chen 	rk_iommu_disable_paging(iommu);
8450f181d3cSJeffy Chen 	for (i = 0; i < iommu->num_mmu; i++) {
8460f181d3cSJeffy Chen 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
8470f181d3cSJeffy Chen 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
8480f181d3cSJeffy Chen 	}
8490f181d3cSJeffy Chen 	rk_iommu_disable_stall(iommu);
8500f181d3cSJeffy Chen 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
8510f181d3cSJeffy Chen }
8520f181d3cSJeffy Chen 
8530f181d3cSJeffy Chen /* Must be called with iommu powered on and attached */
8540f181d3cSJeffy Chen static int rk_iommu_enable(struct rk_iommu *iommu)
8550f181d3cSJeffy Chen {
8560f181d3cSJeffy Chen 	struct iommu_domain *domain = iommu->domain;
8570f181d3cSJeffy Chen 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
8580f181d3cSJeffy Chen 	int ret, i;
859c68a2921SDaniel Kurtz 
860f2e3a5f5STomasz Figa 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
861c68a2921SDaniel Kurtz 	if (ret)
862c68a2921SDaniel Kurtz 		return ret;
863c68a2921SDaniel Kurtz 
864f2e3a5f5STomasz Figa 	ret = rk_iommu_enable_stall(iommu);
865f2e3a5f5STomasz Figa 	if (ret)
866f2e3a5f5STomasz Figa 		goto out_disable_clocks;
867f2e3a5f5STomasz Figa 
868c68a2921SDaniel Kurtz 	ret = rk_iommu_force_reset(iommu);
869c68a2921SDaniel Kurtz 	if (ret)
870f6717d72STomasz Figa 		goto out_disable_stall;
871c68a2921SDaniel Kurtz 
872cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
8734f0aba67SShunqian Zheng 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
8744f0aba67SShunqian Zheng 			       rk_domain->dt_dma);
875ae8a7910SJohn Keeping 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
876cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
877cd6438c5SZhengShunQian 	}
878c68a2921SDaniel Kurtz 
879c68a2921SDaniel Kurtz 	ret = rk_iommu_enable_paging(iommu);
880c68a2921SDaniel Kurtz 
881f6717d72STomasz Figa out_disable_stall:
882c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
883f2e3a5f5STomasz Figa out_disable_clocks:
884f2e3a5f5STomasz Figa 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
885f6717d72STomasz Figa 	return ret;
886c68a2921SDaniel Kurtz }
887c68a2921SDaniel Kurtz 
888c68a2921SDaniel Kurtz static void rk_iommu_detach_device(struct iommu_domain *domain,
889c68a2921SDaniel Kurtz 				   struct device *dev)
890c68a2921SDaniel Kurtz {
891c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
892bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
893c68a2921SDaniel Kurtz 	unsigned long flags;
894c68a2921SDaniel Kurtz 
895c68a2921SDaniel Kurtz 	/* Allow 'virtual devices' (eg drm) to detach from domain */
896c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
897c68a2921SDaniel Kurtz 	if (!iommu)
898c68a2921SDaniel Kurtz 		return;
899c68a2921SDaniel Kurtz 
9000f181d3cSJeffy Chen 	dev_dbg(dev, "Detaching from iommu domain\n");
9010f181d3cSJeffy Chen 
9020f181d3cSJeffy Chen 	/* iommu already detached */
9030f181d3cSJeffy Chen 	if (iommu->domain != domain)
9040f181d3cSJeffy Chen 		return;
9050f181d3cSJeffy Chen 
9060f181d3cSJeffy Chen 	iommu->domain = NULL;
9070f181d3cSJeffy Chen 
908c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
909c68a2921SDaniel Kurtz 	list_del_init(&iommu->node);
910c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
911c68a2921SDaniel Kurtz 
9120f181d3cSJeffy Chen 	if (pm_runtime_get_if_in_use(iommu->dev)) {
9130f181d3cSJeffy Chen 		rk_iommu_disable(iommu);
9140f181d3cSJeffy Chen 		pm_runtime_put(iommu->dev);
915cd6438c5SZhengShunQian 	}
9160f181d3cSJeffy Chen }
917c68a2921SDaniel Kurtz 
9180f181d3cSJeffy Chen static int rk_iommu_attach_device(struct iommu_domain *domain,
9190f181d3cSJeffy Chen 		struct device *dev)
9200f181d3cSJeffy Chen {
9210f181d3cSJeffy Chen 	struct rk_iommu *iommu;
9220f181d3cSJeffy Chen 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
9230f181d3cSJeffy Chen 	unsigned long flags;
9240f181d3cSJeffy Chen 	int ret;
925c68a2921SDaniel Kurtz 
9260f181d3cSJeffy Chen 	/*
9270f181d3cSJeffy Chen 	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
9280f181d3cSJeffy Chen 	 * Such a device does not belong to an iommu group.
9290f181d3cSJeffy Chen 	 */
9300f181d3cSJeffy Chen 	iommu = rk_iommu_from_dev(dev);
9310f181d3cSJeffy Chen 	if (!iommu)
9320f181d3cSJeffy Chen 		return 0;
9330f181d3cSJeffy Chen 
9340f181d3cSJeffy Chen 	dev_dbg(dev, "Attaching to iommu domain\n");
9350f181d3cSJeffy Chen 
9360f181d3cSJeffy Chen 	/* iommu already attached */
9370f181d3cSJeffy Chen 	if (iommu->domain == domain)
9380f181d3cSJeffy Chen 		return 0;
9390f181d3cSJeffy Chen 
9400f181d3cSJeffy Chen 	if (iommu->domain)
9410f181d3cSJeffy Chen 		rk_iommu_detach_device(iommu->domain, dev);
9420f181d3cSJeffy Chen 
9430f181d3cSJeffy Chen 	iommu->domain = domain;
9440f181d3cSJeffy Chen 
9450f181d3cSJeffy Chen 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
9460f181d3cSJeffy Chen 	list_add_tail(&iommu->node, &rk_domain->iommus);
9470f181d3cSJeffy Chen 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
9480f181d3cSJeffy Chen 
9490f181d3cSJeffy Chen 	if (!pm_runtime_get_if_in_use(iommu->dev))
9500f181d3cSJeffy Chen 		return 0;
9510f181d3cSJeffy Chen 
9520f181d3cSJeffy Chen 	ret = rk_iommu_enable(iommu);
9530f181d3cSJeffy Chen 	if (ret)
9540f181d3cSJeffy Chen 		rk_iommu_detach_device(iommu->domain, dev);
9550f181d3cSJeffy Chen 
9560f181d3cSJeffy Chen 	pm_runtime_put(iommu->dev);
9570f181d3cSJeffy Chen 
9580f181d3cSJeffy Chen 	return ret;
959c68a2921SDaniel Kurtz }
960c68a2921SDaniel Kurtz 
961bcd516a3SJoerg Roedel static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
962c68a2921SDaniel Kurtz {
963c68a2921SDaniel Kurtz 	struct rk_iommu_domain *rk_domain;
964c68a2921SDaniel Kurtz 
965a93db2f2SShunqian Zheng 	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
966bcd516a3SJoerg Roedel 		return NULL;
967bcd516a3SJoerg Roedel 
9689176a303SJeffy Chen 	if (!dma_dev)
969bcd516a3SJoerg Roedel 		return NULL;
970c68a2921SDaniel Kurtz 
9719176a303SJeffy Chen 	rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
9724f0aba67SShunqian Zheng 	if (!rk_domain)
9739176a303SJeffy Chen 		return NULL;
9744f0aba67SShunqian Zheng 
975a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA &&
976a93db2f2SShunqian Zheng 	    iommu_get_dma_cookie(&rk_domain->domain))
9779176a303SJeffy Chen 		return NULL;
9784f0aba67SShunqian Zheng 
979c68a2921SDaniel Kurtz 	/*
980c68a2921SDaniel Kurtz 	 * rk32xx iommus use a 2 level pagetable.
981c68a2921SDaniel Kurtz 	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
982c68a2921SDaniel Kurtz 	 * Allocate one 4 KiB page for each table.
983c68a2921SDaniel Kurtz 	 */
984c68a2921SDaniel Kurtz 	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
985c68a2921SDaniel Kurtz 	if (!rk_domain->dt)
9864f0aba67SShunqian Zheng 		goto err_put_cookie;
987c68a2921SDaniel Kurtz 
9889176a303SJeffy Chen 	rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
9894f0aba67SShunqian Zheng 					   SPAGE_SIZE, DMA_TO_DEVICE);
9909176a303SJeffy Chen 	if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
9919176a303SJeffy Chen 		dev_err(dma_dev, "DMA map error for DT\n");
9924f0aba67SShunqian Zheng 		goto err_free_dt;
9934f0aba67SShunqian Zheng 	}
9944f0aba67SShunqian Zheng 
9954f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
996c68a2921SDaniel Kurtz 
997c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->iommus_lock);
998c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->dt_lock);
999c68a2921SDaniel Kurtz 	INIT_LIST_HEAD(&rk_domain->iommus);
1000c68a2921SDaniel Kurtz 
1001a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_start = 0;
1002a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_end   = DMA_BIT_MASK(32);
1003a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.force_aperture = true;
1004a93db2f2SShunqian Zheng 
1005bcd516a3SJoerg Roedel 	return &rk_domain->domain;
1006c68a2921SDaniel Kurtz 
10074f0aba67SShunqian Zheng err_free_dt:
10084f0aba67SShunqian Zheng 	free_page((unsigned long)rk_domain->dt);
10094f0aba67SShunqian Zheng err_put_cookie:
1010a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA)
10114f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
10124f0aba67SShunqian Zheng 
1013bcd516a3SJoerg Roedel 	return NULL;
1014c68a2921SDaniel Kurtz }
1015c68a2921SDaniel Kurtz 
1016bcd516a3SJoerg Roedel static void rk_iommu_domain_free(struct iommu_domain *domain)
1017c68a2921SDaniel Kurtz {
1018bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1019c68a2921SDaniel Kurtz 	int i;
1020c68a2921SDaniel Kurtz 
1021c68a2921SDaniel Kurtz 	WARN_ON(!list_empty(&rk_domain->iommus));
1022c68a2921SDaniel Kurtz 
1023c68a2921SDaniel Kurtz 	for (i = 0; i < NUM_DT_ENTRIES; i++) {
1024c68a2921SDaniel Kurtz 		u32 dte = rk_domain->dt[i];
1025c68a2921SDaniel Kurtz 		if (rk_dte_is_pt_valid(dte)) {
1026c68a2921SDaniel Kurtz 			phys_addr_t pt_phys = rk_dte_pt_address(dte);
1027c68a2921SDaniel Kurtz 			u32 *page_table = phys_to_virt(pt_phys);
10289176a303SJeffy Chen 			dma_unmap_single(dma_dev, pt_phys,
10294f0aba67SShunqian Zheng 					 SPAGE_SIZE, DMA_TO_DEVICE);
1030c68a2921SDaniel Kurtz 			free_page((unsigned long)page_table);
1031c68a2921SDaniel Kurtz 		}
1032c68a2921SDaniel Kurtz 	}
1033c68a2921SDaniel Kurtz 
10349176a303SJeffy Chen 	dma_unmap_single(dma_dev, rk_domain->dt_dma,
10354f0aba67SShunqian Zheng 			 SPAGE_SIZE, DMA_TO_DEVICE);
1036c68a2921SDaniel Kurtz 	free_page((unsigned long)rk_domain->dt);
10374f0aba67SShunqian Zheng 
1038a93db2f2SShunqian Zheng 	if (domain->type == IOMMU_DOMAIN_DMA)
10394f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
1040c68a2921SDaniel Kurtz }
1041c68a2921SDaniel Kurtz 
1042c68a2921SDaniel Kurtz static int rk_iommu_add_device(struct device *dev)
1043c68a2921SDaniel Kurtz {
1044c68a2921SDaniel Kurtz 	struct iommu_group *group;
1045c9d9f239SJoerg Roedel 	struct rk_iommu *iommu;
10460f181d3cSJeffy Chen 	struct rk_iommudata *data;
10470f181d3cSJeffy Chen 
10480f181d3cSJeffy Chen 	data = dev->archdata.iommu;
10490f181d3cSJeffy Chen 	if (!data)
10500f181d3cSJeffy Chen 		return -ENODEV;
1051c68a2921SDaniel Kurtz 
1052c9d9f239SJoerg Roedel 	iommu = rk_iommu_from_dev(dev);
10535fd577c3SJeffy Chen 
10545fd577c3SJeffy Chen 	group = iommu_group_get_for_dev(dev);
10555fd577c3SJeffy Chen 	if (IS_ERR(group))
10565fd577c3SJeffy Chen 		return PTR_ERR(group);
10575fd577c3SJeffy Chen 	iommu_group_put(group);
10585fd577c3SJeffy Chen 
1059c9d9f239SJoerg Roedel 	iommu_device_link(&iommu->iommu, dev);
10600f181d3cSJeffy Chen 	data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
1061c9d9f239SJoerg Roedel 
1062c68a2921SDaniel Kurtz 	return 0;
1063c68a2921SDaniel Kurtz }
1064c68a2921SDaniel Kurtz 
1065c68a2921SDaniel Kurtz static void rk_iommu_remove_device(struct device *dev)
1066c68a2921SDaniel Kurtz {
1067c9d9f239SJoerg Roedel 	struct rk_iommu *iommu;
10680f181d3cSJeffy Chen 	struct rk_iommudata *data = dev->archdata.iommu;
1069c9d9f239SJoerg Roedel 
1070c9d9f239SJoerg Roedel 	iommu = rk_iommu_from_dev(dev);
1071c9d9f239SJoerg Roedel 
10720f181d3cSJeffy Chen 	device_link_del(data->link);
10735fd577c3SJeffy Chen 	iommu_device_unlink(&iommu->iommu, dev);
1074c68a2921SDaniel Kurtz 	iommu_group_remove_device(dev);
1075c68a2921SDaniel Kurtz }
1076c68a2921SDaniel Kurtz 
107757c26957SJeffy Chen static struct iommu_group *rk_iommu_device_group(struct device *dev)
107857c26957SJeffy Chen {
107957c26957SJeffy Chen 	struct rk_iommu *iommu;
108057c26957SJeffy Chen 
108157c26957SJeffy Chen 	iommu = rk_iommu_from_dev(dev);
108257c26957SJeffy Chen 
108357c26957SJeffy Chen 	return iommu_group_ref_get(iommu->group);
108457c26957SJeffy Chen }
108557c26957SJeffy Chen 
10865fd577c3SJeffy Chen static int rk_iommu_of_xlate(struct device *dev,
10875fd577c3SJeffy Chen 			     struct of_phandle_args *args)
10885fd577c3SJeffy Chen {
10895fd577c3SJeffy Chen 	struct platform_device *iommu_dev;
10905fd577c3SJeffy Chen 	struct rk_iommudata *data;
10915fd577c3SJeffy Chen 
10925fd577c3SJeffy Chen 	data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
10935fd577c3SJeffy Chen 	if (!data)
10945fd577c3SJeffy Chen 		return -ENOMEM;
10955fd577c3SJeffy Chen 
10965fd577c3SJeffy Chen 	iommu_dev = of_find_device_by_node(args->np);
10975fd577c3SJeffy Chen 
10985fd577c3SJeffy Chen 	data->iommu = platform_get_drvdata(iommu_dev);
10995fd577c3SJeffy Chen 	dev->archdata.iommu = data;
11005fd577c3SJeffy Chen 
1101*40fa84e1SArnd Bergmann 	platform_device_put(iommu_dev);
11025fd577c3SJeffy Chen 
11035fd577c3SJeffy Chen 	return 0;
11045fd577c3SJeffy Chen }
11055fd577c3SJeffy Chen 
1106c68a2921SDaniel Kurtz static const struct iommu_ops rk_iommu_ops = {
1107bcd516a3SJoerg Roedel 	.domain_alloc = rk_iommu_domain_alloc,
1108bcd516a3SJoerg Roedel 	.domain_free = rk_iommu_domain_free,
1109c68a2921SDaniel Kurtz 	.attach_dev = rk_iommu_attach_device,
1110c68a2921SDaniel Kurtz 	.detach_dev = rk_iommu_detach_device,
1111c68a2921SDaniel Kurtz 	.map = rk_iommu_map,
1112c68a2921SDaniel Kurtz 	.unmap = rk_iommu_unmap,
1113e6d0f473SSimon Xue 	.map_sg = default_iommu_map_sg,
1114c68a2921SDaniel Kurtz 	.add_device = rk_iommu_add_device,
1115c68a2921SDaniel Kurtz 	.remove_device = rk_iommu_remove_device,
1116c68a2921SDaniel Kurtz 	.iova_to_phys = rk_iommu_iova_to_phys,
111757c26957SJeffy Chen 	.device_group = rk_iommu_device_group,
1118c68a2921SDaniel Kurtz 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
11195fd577c3SJeffy Chen 	.of_xlate = rk_iommu_of_xlate,
1120c68a2921SDaniel Kurtz };
1121c68a2921SDaniel Kurtz 
1122c68a2921SDaniel Kurtz static int rk_iommu_probe(struct platform_device *pdev)
1123c68a2921SDaniel Kurtz {
1124c68a2921SDaniel Kurtz 	struct device *dev = &pdev->dev;
1125c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
1126c68a2921SDaniel Kurtz 	struct resource *res;
11273d08f434SShunqian Zheng 	int num_res = pdev->num_resources;
1128d0b912bdSJeffy Chen 	int err, i, irq;
1129c68a2921SDaniel Kurtz 
1130c68a2921SDaniel Kurtz 	iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1131c68a2921SDaniel Kurtz 	if (!iommu)
1132c68a2921SDaniel Kurtz 		return -ENOMEM;
1133c68a2921SDaniel Kurtz 
1134c68a2921SDaniel Kurtz 	platform_set_drvdata(pdev, iommu);
1135c68a2921SDaniel Kurtz 	iommu->dev = dev;
1136cd6438c5SZhengShunQian 	iommu->num_mmu = 0;
11373d08f434SShunqian Zheng 
11383d08f434SShunqian Zheng 	iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
1139cd6438c5SZhengShunQian 				    GFP_KERNEL);
1140cd6438c5SZhengShunQian 	if (!iommu->bases)
1141cd6438c5SZhengShunQian 		return -ENOMEM;
1142c68a2921SDaniel Kurtz 
11433d08f434SShunqian Zheng 	for (i = 0; i < num_res; i++) {
1144cd6438c5SZhengShunQian 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
11458d7f2d84STomeu Vizoso 		if (!res)
11468d7f2d84STomeu Vizoso 			continue;
1147cd6438c5SZhengShunQian 		iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1148cd6438c5SZhengShunQian 		if (IS_ERR(iommu->bases[i]))
1149cd6438c5SZhengShunQian 			continue;
1150cd6438c5SZhengShunQian 		iommu->num_mmu++;
1151cd6438c5SZhengShunQian 	}
1152cd6438c5SZhengShunQian 	if (iommu->num_mmu == 0)
1153cd6438c5SZhengShunQian 		return PTR_ERR(iommu->bases[0]);
1154c68a2921SDaniel Kurtz 
1155d0b912bdSJeffy Chen 	i = 0;
1156d0b912bdSJeffy Chen 	while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1157d0b912bdSJeffy Chen 		if (irq < 0)
1158d0b912bdSJeffy Chen 			return irq;
115903f732f8SSimon Xue 
1160d0b912bdSJeffy Chen 		err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1161d0b912bdSJeffy Chen 				       IRQF_SHARED, dev_name(dev), iommu);
1162d0b912bdSJeffy Chen 		if (err)
1163d0b912bdSJeffy Chen 			return err;
1164c68a2921SDaniel Kurtz 	}
1165c68a2921SDaniel Kurtz 
1166c3aa4742SSimon Xue 	iommu->reset_disabled = device_property_read_bool(dev,
1167c3aa4742SSimon Xue 					"rockchip,disable-mmu-reset");
1168c68a2921SDaniel Kurtz 
1169f2e3a5f5STomasz Figa 	iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1170f2e3a5f5STomasz Figa 	iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1171f2e3a5f5STomasz Figa 				     sizeof(*iommu->clocks), GFP_KERNEL);
1172f2e3a5f5STomasz Figa 	if (!iommu->clocks)
1173f2e3a5f5STomasz Figa 		return -ENOMEM;
1174f2e3a5f5STomasz Figa 
1175f2e3a5f5STomasz Figa 	for (i = 0; i < iommu->num_clocks; ++i)
1176f2e3a5f5STomasz Figa 		iommu->clocks[i].id = rk_iommu_clocks[i];
1177f2e3a5f5STomasz Figa 
11782f8c7f2eSHeiko Stuebner 	/*
11792f8c7f2eSHeiko Stuebner 	 * iommu clocks should be present for all new devices and devicetrees
11802f8c7f2eSHeiko Stuebner 	 * but there are older devicetrees without clocks out in the wild.
11812f8c7f2eSHeiko Stuebner 	 * So clocks as optional for the time being.
11822f8c7f2eSHeiko Stuebner 	 */
1183f2e3a5f5STomasz Figa 	err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
11842f8c7f2eSHeiko Stuebner 	if (err == -ENOENT)
11852f8c7f2eSHeiko Stuebner 		iommu->num_clocks = 0;
11862f8c7f2eSHeiko Stuebner 	else if (err)
1187c9d9f239SJoerg Roedel 		return err;
1188c9d9f239SJoerg Roedel 
1189f2e3a5f5STomasz Figa 	err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1190f2e3a5f5STomasz Figa 	if (err)
1191f2e3a5f5STomasz Figa 		return err;
1192f2e3a5f5STomasz Figa 
119357c26957SJeffy Chen 	iommu->group = iommu_group_alloc();
119457c26957SJeffy Chen 	if (IS_ERR(iommu->group)) {
119557c26957SJeffy Chen 		err = PTR_ERR(iommu->group);
119657c26957SJeffy Chen 		goto err_unprepare_clocks;
119757c26957SJeffy Chen 	}
119857c26957SJeffy Chen 
1199f2e3a5f5STomasz Figa 	err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1200f2e3a5f5STomasz Figa 	if (err)
120157c26957SJeffy Chen 		goto err_put_group;
1202f2e3a5f5STomasz Figa 
1203c9d9f239SJoerg Roedel 	iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
12045fd577c3SJeffy Chen 	iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
12055fd577c3SJeffy Chen 
1206c9d9f239SJoerg Roedel 	err = iommu_device_register(&iommu->iommu);
12076d9ffaadSJeffy Chen 	if (err)
1208f2e3a5f5STomasz Figa 		goto err_remove_sysfs;
1209c9d9f239SJoerg Roedel 
12109176a303SJeffy Chen 	/*
12119176a303SJeffy Chen 	 * Use the first registered IOMMU device for domain to use with DMA
12129176a303SJeffy Chen 	 * API, since a domain might not physically correspond to a single
12139176a303SJeffy Chen 	 * IOMMU device..
12149176a303SJeffy Chen 	 */
12159176a303SJeffy Chen 	if (!dma_dev)
12169176a303SJeffy Chen 		dma_dev = &pdev->dev;
12179176a303SJeffy Chen 
12184d88a8a4SJeffy Chen 	bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
12194d88a8a4SJeffy Chen 
12200f181d3cSJeffy Chen 	pm_runtime_enable(dev);
12210f181d3cSJeffy Chen 
1222f2e3a5f5STomasz Figa 	return 0;
1223f2e3a5f5STomasz Figa err_remove_sysfs:
1224f2e3a5f5STomasz Figa 	iommu_device_sysfs_remove(&iommu->iommu);
122557c26957SJeffy Chen err_put_group:
122657c26957SJeffy Chen 	iommu_group_put(iommu->group);
1227f2e3a5f5STomasz Figa err_unprepare_clocks:
1228f2e3a5f5STomasz Figa 	clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1229c9d9f239SJoerg Roedel 	return err;
1230c68a2921SDaniel Kurtz }
1231c68a2921SDaniel Kurtz 
12321a4e90f2SMarc Zyngier static void rk_iommu_shutdown(struct platform_device *pdev)
12331a4e90f2SMarc Zyngier {
12340f181d3cSJeffy Chen 	pm_runtime_force_suspend(&pdev->dev);
12350f181d3cSJeffy Chen }
12361a4e90f2SMarc Zyngier 
12370f181d3cSJeffy Chen static int __maybe_unused rk_iommu_suspend(struct device *dev)
12380f181d3cSJeffy Chen {
12390f181d3cSJeffy Chen 	struct rk_iommu *iommu = dev_get_drvdata(dev);
12400f181d3cSJeffy Chen 
12410f181d3cSJeffy Chen 	if (!iommu->domain)
12420f181d3cSJeffy Chen 		return 0;
12430f181d3cSJeffy Chen 
12440f181d3cSJeffy Chen 	rk_iommu_disable(iommu);
12450f181d3cSJeffy Chen 	return 0;
12461a4e90f2SMarc Zyngier }
12470f181d3cSJeffy Chen 
12480f181d3cSJeffy Chen static int __maybe_unused rk_iommu_resume(struct device *dev)
12490f181d3cSJeffy Chen {
12500f181d3cSJeffy Chen 	struct rk_iommu *iommu = dev_get_drvdata(dev);
12510f181d3cSJeffy Chen 
12520f181d3cSJeffy Chen 	if (!iommu->domain)
12530f181d3cSJeffy Chen 		return 0;
12540f181d3cSJeffy Chen 
12550f181d3cSJeffy Chen 	return rk_iommu_enable(iommu);
12561a4e90f2SMarc Zyngier }
12571a4e90f2SMarc Zyngier 
12580f181d3cSJeffy Chen static const struct dev_pm_ops rk_iommu_pm_ops = {
12590f181d3cSJeffy Chen 	SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
12600f181d3cSJeffy Chen 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
12610f181d3cSJeffy Chen 				pm_runtime_force_resume)
12620f181d3cSJeffy Chen };
12630f181d3cSJeffy Chen 
1264c68a2921SDaniel Kurtz static const struct of_device_id rk_iommu_dt_ids[] = {
1265c68a2921SDaniel Kurtz 	{ .compatible = "rockchip,iommu" },
1266c68a2921SDaniel Kurtz 	{ /* sentinel */ }
1267c68a2921SDaniel Kurtz };
1268c68a2921SDaniel Kurtz MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1269c68a2921SDaniel Kurtz 
1270c68a2921SDaniel Kurtz static struct platform_driver rk_iommu_driver = {
1271c68a2921SDaniel Kurtz 	.probe = rk_iommu_probe,
12721a4e90f2SMarc Zyngier 	.shutdown = rk_iommu_shutdown,
1273c68a2921SDaniel Kurtz 	.driver = {
1274c68a2921SDaniel Kurtz 		   .name = "rk_iommu",
1275d9e7eb15SArnd Bergmann 		   .of_match_table = rk_iommu_dt_ids,
12760f181d3cSJeffy Chen 		   .pm = &rk_iommu_pm_ops,
127798b72b94SJeffy Chen 		   .suppress_bind_attrs = true,
1278c68a2921SDaniel Kurtz 	},
1279c68a2921SDaniel Kurtz };
1280c68a2921SDaniel Kurtz 
1281c68a2921SDaniel Kurtz static int __init rk_iommu_init(void)
1282c68a2921SDaniel Kurtz {
12839176a303SJeffy Chen 	return platform_driver_register(&rk_iommu_driver);
1284c68a2921SDaniel Kurtz }
1285c68a2921SDaniel Kurtz subsys_initcall(rk_iommu_init);
1286c68a2921SDaniel Kurtz 
12875fd577c3SJeffy Chen IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
12885fd577c3SJeffy Chen 
1289c68a2921SDaniel Kurtz MODULE_DESCRIPTION("IOMMU API for Rockchip");
1290c68a2921SDaniel Kurtz MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1291c68a2921SDaniel Kurtz MODULE_ALIAS("platform:rockchip-iommu");
1292c68a2921SDaniel Kurtz MODULE_LICENSE("GPL v2");
1293