xref: /linux/drivers/iommu/rockchip-iommu.c (revision 4d88a8a4c345cd16f634df855148cfb2a59a204a)
1c68a2921SDaniel Kurtz /*
2c68a2921SDaniel Kurtz  * This program is free software; you can redistribute it and/or modify
3c68a2921SDaniel Kurtz  * it under the terms of the GNU General Public License version 2 as
4c68a2921SDaniel Kurtz  * published by the Free Software Foundation.
5c68a2921SDaniel Kurtz  */
6c68a2921SDaniel Kurtz 
7f2e3a5f5STomasz Figa #include <linux/clk.h>
8c68a2921SDaniel Kurtz #include <linux/compiler.h>
9c68a2921SDaniel Kurtz #include <linux/delay.h>
10c68a2921SDaniel Kurtz #include <linux/device.h>
114f0aba67SShunqian Zheng #include <linux/dma-iommu.h>
12461a6946SJoerg Roedel #include <linux/dma-mapping.h>
13c68a2921SDaniel Kurtz #include <linux/errno.h>
14c68a2921SDaniel Kurtz #include <linux/interrupt.h>
15c68a2921SDaniel Kurtz #include <linux/io.h>
16c68a2921SDaniel Kurtz #include <linux/iommu.h>
170416bf64STomasz Figa #include <linux/iopoll.h>
18c68a2921SDaniel Kurtz #include <linux/list.h>
19c68a2921SDaniel Kurtz #include <linux/mm.h>
20c68a2921SDaniel Kurtz #include <linux/module.h>
21c68a2921SDaniel Kurtz #include <linux/of.h>
225fd577c3SJeffy Chen #include <linux/of_iommu.h>
23c68a2921SDaniel Kurtz #include <linux/of_platform.h>
24c68a2921SDaniel Kurtz #include <linux/platform_device.h>
25c68a2921SDaniel Kurtz #include <linux/slab.h>
26c68a2921SDaniel Kurtz #include <linux/spinlock.h>
27c68a2921SDaniel Kurtz 
28c68a2921SDaniel Kurtz /** MMU register offsets */
29c68a2921SDaniel Kurtz #define RK_MMU_DTE_ADDR		0x00	/* Directory table address */
30c68a2921SDaniel Kurtz #define RK_MMU_STATUS		0x04
31c68a2921SDaniel Kurtz #define RK_MMU_COMMAND		0x08
32c68a2921SDaniel Kurtz #define RK_MMU_PAGE_FAULT_ADDR	0x0C	/* IOVA of last page fault */
33c68a2921SDaniel Kurtz #define RK_MMU_ZAP_ONE_LINE	0x10	/* Shootdown one IOTLB entry */
34c68a2921SDaniel Kurtz #define RK_MMU_INT_RAWSTAT	0x14	/* IRQ status ignoring mask */
35c68a2921SDaniel Kurtz #define RK_MMU_INT_CLEAR	0x18	/* Acknowledge and re-arm irq */
36c68a2921SDaniel Kurtz #define RK_MMU_INT_MASK		0x1C	/* IRQ enable */
37c68a2921SDaniel Kurtz #define RK_MMU_INT_STATUS	0x20	/* IRQ status after masking */
38c68a2921SDaniel Kurtz #define RK_MMU_AUTO_GATING	0x24
39c68a2921SDaniel Kurtz 
40c68a2921SDaniel Kurtz #define DTE_ADDR_DUMMY		0xCAFEBABE
410416bf64STomasz Figa 
420416bf64STomasz Figa #define RK_MMU_POLL_PERIOD_US		100
430416bf64STomasz Figa #define RK_MMU_FORCE_RESET_TIMEOUT_US	100000
440416bf64STomasz Figa #define RK_MMU_POLL_TIMEOUT_US		1000
45c68a2921SDaniel Kurtz 
46c68a2921SDaniel Kurtz /* RK_MMU_STATUS fields */
47c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
48c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
49c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
50c68a2921SDaniel Kurtz #define RK_MMU_STATUS_IDLE                 BIT(3)
51c68a2921SDaniel Kurtz #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
52c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
53c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
54c68a2921SDaniel Kurtz 
55c68a2921SDaniel Kurtz /* RK_MMU_COMMAND command values */
56c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
57c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
58c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
59c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
60c68a2921SDaniel Kurtz #define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
61c68a2921SDaniel Kurtz #define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
62c68a2921SDaniel Kurtz #define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
63c68a2921SDaniel Kurtz 
64c68a2921SDaniel Kurtz /* RK_MMU_INT_* register fields */
65c68a2921SDaniel Kurtz #define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
66c68a2921SDaniel Kurtz #define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
67c68a2921SDaniel Kurtz #define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
68c68a2921SDaniel Kurtz 
69c68a2921SDaniel Kurtz #define NUM_DT_ENTRIES 1024
70c68a2921SDaniel Kurtz #define NUM_PT_ENTRIES 1024
71c68a2921SDaniel Kurtz 
72c68a2921SDaniel Kurtz #define SPAGE_ORDER 12
73c68a2921SDaniel Kurtz #define SPAGE_SIZE (1 << SPAGE_ORDER)
74c68a2921SDaniel Kurtz 
75c68a2921SDaniel Kurtz  /*
76c68a2921SDaniel Kurtz   * Support mapping any size that fits in one page table:
77c68a2921SDaniel Kurtz   *   4 KiB to 4 MiB
78c68a2921SDaniel Kurtz   */
79c68a2921SDaniel Kurtz #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
80c68a2921SDaniel Kurtz 
81c68a2921SDaniel Kurtz struct rk_iommu_domain {
82c68a2921SDaniel Kurtz 	struct list_head iommus;
83c68a2921SDaniel Kurtz 	u32 *dt; /* page directory table */
844f0aba67SShunqian Zheng 	dma_addr_t dt_dma;
85c68a2921SDaniel Kurtz 	spinlock_t iommus_lock; /* lock for iommus list */
86c68a2921SDaniel Kurtz 	spinlock_t dt_lock; /* lock for modifying page directory table */
87bcd516a3SJoerg Roedel 
88bcd516a3SJoerg Roedel 	struct iommu_domain domain;
89c68a2921SDaniel Kurtz };
90c68a2921SDaniel Kurtz 
91f2e3a5f5STomasz Figa /* list of clocks required by IOMMU */
92f2e3a5f5STomasz Figa static const char * const rk_iommu_clocks[] = {
93f2e3a5f5STomasz Figa 	"aclk", "iface",
94f2e3a5f5STomasz Figa };
95f2e3a5f5STomasz Figa 
96c68a2921SDaniel Kurtz struct rk_iommu {
97c68a2921SDaniel Kurtz 	struct device *dev;
98cd6438c5SZhengShunQian 	void __iomem **bases;
99cd6438c5SZhengShunQian 	int num_mmu;
100f2e3a5f5STomasz Figa 	struct clk_bulk_data *clocks;
101f2e3a5f5STomasz Figa 	int num_clocks;
102c3aa4742SSimon Xue 	bool reset_disabled;
103c9d9f239SJoerg Roedel 	struct iommu_device iommu;
104c68a2921SDaniel Kurtz 	struct list_head node; /* entry in rk_iommu_domain.iommus */
105c68a2921SDaniel Kurtz 	struct iommu_domain *domain; /* domain to which iommu is attached */
106c68a2921SDaniel Kurtz };
107c68a2921SDaniel Kurtz 
1085fd577c3SJeffy Chen struct rk_iommudata {
1095fd577c3SJeffy Chen 	struct rk_iommu *iommu;
1105fd577c3SJeffy Chen };
1115fd577c3SJeffy Chen 
1129176a303SJeffy Chen static struct device *dma_dev;
1139176a303SJeffy Chen 
1144f0aba67SShunqian Zheng static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
1154f0aba67SShunqian Zheng 				  unsigned int count)
116c68a2921SDaniel Kurtz {
1174f0aba67SShunqian Zheng 	size_t size = count * sizeof(u32); /* count of u32 entry */
118c68a2921SDaniel Kurtz 
1199176a303SJeffy Chen 	dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
120c68a2921SDaniel Kurtz }
121c68a2921SDaniel Kurtz 
122bcd516a3SJoerg Roedel static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
123bcd516a3SJoerg Roedel {
124bcd516a3SJoerg Roedel 	return container_of(dom, struct rk_iommu_domain, domain);
125bcd516a3SJoerg Roedel }
126bcd516a3SJoerg Roedel 
127c68a2921SDaniel Kurtz /*
128c68a2921SDaniel Kurtz  * The Rockchip rk3288 iommu uses a 2-level page table.
129c68a2921SDaniel Kurtz  * The first level is the "Directory Table" (DT).
130c68a2921SDaniel Kurtz  * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
131c68a2921SDaniel Kurtz  * to a "Page Table".
132c68a2921SDaniel Kurtz  * The second level is the 1024 Page Tables (PT).
133c68a2921SDaniel Kurtz  * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
134c68a2921SDaniel Kurtz  * a 4 KB page of physical memory.
135c68a2921SDaniel Kurtz  *
136c68a2921SDaniel Kurtz  * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
137c68a2921SDaniel Kurtz  * Each iommu device has a MMU_DTE_ADDR register that contains the physical
138c68a2921SDaniel Kurtz  * address of the start of the DT page.
139c68a2921SDaniel Kurtz  *
140c68a2921SDaniel Kurtz  * The structure of the page table is as follows:
141c68a2921SDaniel Kurtz  *
142c68a2921SDaniel Kurtz  *                   DT
143c68a2921SDaniel Kurtz  * MMU_DTE_ADDR -> +-----+
144c68a2921SDaniel Kurtz  *                 |     |
145c68a2921SDaniel Kurtz  *                 +-----+     PT
146c68a2921SDaniel Kurtz  *                 | DTE | -> +-----+
147c68a2921SDaniel Kurtz  *                 +-----+    |     |     Memory
148c68a2921SDaniel Kurtz  *                 |     |    +-----+     Page
149c68a2921SDaniel Kurtz  *                 |     |    | PTE | -> +-----+
150c68a2921SDaniel Kurtz  *                 +-----+    +-----+    |     |
151c68a2921SDaniel Kurtz  *                            |     |    |     |
152c68a2921SDaniel Kurtz  *                            |     |    |     |
153c68a2921SDaniel Kurtz  *                            +-----+    |     |
154c68a2921SDaniel Kurtz  *                                       |     |
155c68a2921SDaniel Kurtz  *                                       |     |
156c68a2921SDaniel Kurtz  *                                       +-----+
157c68a2921SDaniel Kurtz  */
158c68a2921SDaniel Kurtz 
159c68a2921SDaniel Kurtz /*
160c68a2921SDaniel Kurtz  * Each DTE has a PT address and a valid bit:
161c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
162c68a2921SDaniel Kurtz  * | PT address          | Reserved  |V|
163c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
164c68a2921SDaniel Kurtz  *  31:12 - PT address (PTs always starts on a 4 KB boundary)
165c68a2921SDaniel Kurtz  *  11: 1 - Reserved
166c68a2921SDaniel Kurtz  *      0 - 1 if PT @ PT address is valid
167c68a2921SDaniel Kurtz  */
168c68a2921SDaniel Kurtz #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
169c68a2921SDaniel Kurtz #define RK_DTE_PT_VALID           BIT(0)
170c68a2921SDaniel Kurtz 
171c68a2921SDaniel Kurtz static inline phys_addr_t rk_dte_pt_address(u32 dte)
172c68a2921SDaniel Kurtz {
173c68a2921SDaniel Kurtz 	return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
174c68a2921SDaniel Kurtz }
175c68a2921SDaniel Kurtz 
176c68a2921SDaniel Kurtz static inline bool rk_dte_is_pt_valid(u32 dte)
177c68a2921SDaniel Kurtz {
178c68a2921SDaniel Kurtz 	return dte & RK_DTE_PT_VALID;
179c68a2921SDaniel Kurtz }
180c68a2921SDaniel Kurtz 
1814f0aba67SShunqian Zheng static inline u32 rk_mk_dte(dma_addr_t pt_dma)
182c68a2921SDaniel Kurtz {
1834f0aba67SShunqian Zheng 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
184c68a2921SDaniel Kurtz }
185c68a2921SDaniel Kurtz 
186c68a2921SDaniel Kurtz /*
187c68a2921SDaniel Kurtz  * Each PTE has a Page address, some flags and a valid bit:
188c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
189c68a2921SDaniel Kurtz  * | Page address        |Rsv| Flags |V|
190c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
191c68a2921SDaniel Kurtz  *  31:12 - Page address (Pages always start on a 4 KB boundary)
192c68a2921SDaniel Kurtz  *  11: 9 - Reserved
193c68a2921SDaniel Kurtz  *   8: 1 - Flags
194c68a2921SDaniel Kurtz  *      8 - Read allocate - allocate cache space on read misses
195c68a2921SDaniel Kurtz  *      7 - Read cache - enable cache & prefetch of data
196c68a2921SDaniel Kurtz  *      6 - Write buffer - enable delaying writes on their way to memory
197c68a2921SDaniel Kurtz  *      5 - Write allocate - allocate cache space on write misses
198c68a2921SDaniel Kurtz  *      4 - Write cache - different writes can be merged together
199c68a2921SDaniel Kurtz  *      3 - Override cache attributes
200c68a2921SDaniel Kurtz  *          if 1, bits 4-8 control cache attributes
201c68a2921SDaniel Kurtz  *          if 0, the system bus defaults are used
202c68a2921SDaniel Kurtz  *      2 - Writable
203c68a2921SDaniel Kurtz  *      1 - Readable
204c68a2921SDaniel Kurtz  *      0 - 1 if Page @ Page address is valid
205c68a2921SDaniel Kurtz  */
206c68a2921SDaniel Kurtz #define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
207c68a2921SDaniel Kurtz #define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
208c68a2921SDaniel Kurtz #define RK_PTE_PAGE_WRITABLE      BIT(2)
209c68a2921SDaniel Kurtz #define RK_PTE_PAGE_READABLE      BIT(1)
210c68a2921SDaniel Kurtz #define RK_PTE_PAGE_VALID         BIT(0)
211c68a2921SDaniel Kurtz 
212c68a2921SDaniel Kurtz static inline phys_addr_t rk_pte_page_address(u32 pte)
213c68a2921SDaniel Kurtz {
214c68a2921SDaniel Kurtz 	return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
215c68a2921SDaniel Kurtz }
216c68a2921SDaniel Kurtz 
217c68a2921SDaniel Kurtz static inline bool rk_pte_is_page_valid(u32 pte)
218c68a2921SDaniel Kurtz {
219c68a2921SDaniel Kurtz 	return pte & RK_PTE_PAGE_VALID;
220c68a2921SDaniel Kurtz }
221c68a2921SDaniel Kurtz 
222c68a2921SDaniel Kurtz /* TODO: set cache flags per prot IOMMU_CACHE */
223c68a2921SDaniel Kurtz static u32 rk_mk_pte(phys_addr_t page, int prot)
224c68a2921SDaniel Kurtz {
225c68a2921SDaniel Kurtz 	u32 flags = 0;
226c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
227c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
228c68a2921SDaniel Kurtz 	page &= RK_PTE_PAGE_ADDRESS_MASK;
229c68a2921SDaniel Kurtz 	return page | flags | RK_PTE_PAGE_VALID;
230c68a2921SDaniel Kurtz }
231c68a2921SDaniel Kurtz 
232c68a2921SDaniel Kurtz static u32 rk_mk_pte_invalid(u32 pte)
233c68a2921SDaniel Kurtz {
234c68a2921SDaniel Kurtz 	return pte & ~RK_PTE_PAGE_VALID;
235c68a2921SDaniel Kurtz }
236c68a2921SDaniel Kurtz 
237c68a2921SDaniel Kurtz /*
238c68a2921SDaniel Kurtz  * rk3288 iova (IOMMU Virtual Address) format
239c68a2921SDaniel Kurtz  *  31       22.21       12.11          0
240c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
241c68a2921SDaniel Kurtz  * | DTE index | PTE index | Page offset |
242c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
243c68a2921SDaniel Kurtz  *  31:22 - DTE index   - index of DTE in DT
244c68a2921SDaniel Kurtz  *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
245c68a2921SDaniel Kurtz  *  11: 0 - Page offset - offset into page @ PTE.page_address
246c68a2921SDaniel Kurtz  */
247c68a2921SDaniel Kurtz #define RK_IOVA_DTE_MASK    0xffc00000
248c68a2921SDaniel Kurtz #define RK_IOVA_DTE_SHIFT   22
249c68a2921SDaniel Kurtz #define RK_IOVA_PTE_MASK    0x003ff000
250c68a2921SDaniel Kurtz #define RK_IOVA_PTE_SHIFT   12
251c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_MASK   0x00000fff
252c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_SHIFT  0
253c68a2921SDaniel Kurtz 
254c68a2921SDaniel Kurtz static u32 rk_iova_dte_index(dma_addr_t iova)
255c68a2921SDaniel Kurtz {
256c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
257c68a2921SDaniel Kurtz }
258c68a2921SDaniel Kurtz 
259c68a2921SDaniel Kurtz static u32 rk_iova_pte_index(dma_addr_t iova)
260c68a2921SDaniel Kurtz {
261c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
262c68a2921SDaniel Kurtz }
263c68a2921SDaniel Kurtz 
264c68a2921SDaniel Kurtz static u32 rk_iova_page_offset(dma_addr_t iova)
265c68a2921SDaniel Kurtz {
266c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
267c68a2921SDaniel Kurtz }
268c68a2921SDaniel Kurtz 
269cd6438c5SZhengShunQian static u32 rk_iommu_read(void __iomem *base, u32 offset)
270c68a2921SDaniel Kurtz {
271cd6438c5SZhengShunQian 	return readl(base + offset);
272c68a2921SDaniel Kurtz }
273c68a2921SDaniel Kurtz 
274cd6438c5SZhengShunQian static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
275c68a2921SDaniel Kurtz {
276cd6438c5SZhengShunQian 	writel(value, base + offset);
277c68a2921SDaniel Kurtz }
278c68a2921SDaniel Kurtz 
279c68a2921SDaniel Kurtz static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
280c68a2921SDaniel Kurtz {
281cd6438c5SZhengShunQian 	int i;
282cd6438c5SZhengShunQian 
283cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
284cd6438c5SZhengShunQian 		writel(command, iommu->bases[i] + RK_MMU_COMMAND);
285c68a2921SDaniel Kurtz }
286c68a2921SDaniel Kurtz 
287cd6438c5SZhengShunQian static void rk_iommu_base_command(void __iomem *base, u32 command)
288cd6438c5SZhengShunQian {
289cd6438c5SZhengShunQian 	writel(command, base + RK_MMU_COMMAND);
290cd6438c5SZhengShunQian }
291bf2a5e71STomasz Figa static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
292c68a2921SDaniel Kurtz 			       size_t size)
293c68a2921SDaniel Kurtz {
294cd6438c5SZhengShunQian 	int i;
295bf2a5e71STomasz Figa 	dma_addr_t iova_end = iova_start + size;
296c68a2921SDaniel Kurtz 	/*
297c68a2921SDaniel Kurtz 	 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
298c68a2921SDaniel Kurtz 	 * entire iotlb rather than iterate over individual iovas.
299c68a2921SDaniel Kurtz 	 */
300bf2a5e71STomasz Figa 	for (i = 0; i < iommu->num_mmu; i++) {
301bf2a5e71STomasz Figa 		dma_addr_t iova;
302bf2a5e71STomasz Figa 
303bf2a5e71STomasz Figa 		for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
304cd6438c5SZhengShunQian 			rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
305c68a2921SDaniel Kurtz 	}
306bf2a5e71STomasz Figa }
307c68a2921SDaniel Kurtz 
308c68a2921SDaniel Kurtz static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
309c68a2921SDaniel Kurtz {
310cd6438c5SZhengShunQian 	bool active = true;
311cd6438c5SZhengShunQian 	int i;
312cd6438c5SZhengShunQian 
313cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
314fbedd9b9SJohn Keeping 		active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
315fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_STALL_ACTIVE);
316cd6438c5SZhengShunQian 
317cd6438c5SZhengShunQian 	return active;
318c68a2921SDaniel Kurtz }
319c68a2921SDaniel Kurtz 
320c68a2921SDaniel Kurtz static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
321c68a2921SDaniel Kurtz {
322cd6438c5SZhengShunQian 	bool enable = true;
323cd6438c5SZhengShunQian 	int i;
324cd6438c5SZhengShunQian 
325cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
326fbedd9b9SJohn Keeping 		enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
327fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_PAGING_ENABLED);
328cd6438c5SZhengShunQian 
329cd6438c5SZhengShunQian 	return enable;
330c68a2921SDaniel Kurtz }
331c68a2921SDaniel Kurtz 
3320416bf64STomasz Figa static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
3330416bf64STomasz Figa {
3340416bf64STomasz Figa 	bool done = true;
3350416bf64STomasz Figa 	int i;
3360416bf64STomasz Figa 
3370416bf64STomasz Figa 	for (i = 0; i < iommu->num_mmu; i++)
3380416bf64STomasz Figa 		done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
3390416bf64STomasz Figa 
3400416bf64STomasz Figa 	return done;
3410416bf64STomasz Figa }
3420416bf64STomasz Figa 
343c68a2921SDaniel Kurtz static int rk_iommu_enable_stall(struct rk_iommu *iommu)
344c68a2921SDaniel Kurtz {
345cd6438c5SZhengShunQian 	int ret, i;
3460416bf64STomasz Figa 	bool val;
347c68a2921SDaniel Kurtz 
348c68a2921SDaniel Kurtz 	if (rk_iommu_is_stall_active(iommu))
349c68a2921SDaniel Kurtz 		return 0;
350c68a2921SDaniel Kurtz 
351c68a2921SDaniel Kurtz 	/* Stall can only be enabled if paging is enabled */
352c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
353c68a2921SDaniel Kurtz 		return 0;
354c68a2921SDaniel Kurtz 
355c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
356c68a2921SDaniel Kurtz 
3570416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
3580416bf64STomasz Figa 				 val, RK_MMU_POLL_PERIOD_US,
3590416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
360c68a2921SDaniel Kurtz 	if (ret)
361cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
362c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
363cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
364c68a2921SDaniel Kurtz 
365c68a2921SDaniel Kurtz 	return ret;
366c68a2921SDaniel Kurtz }
367c68a2921SDaniel Kurtz 
368c68a2921SDaniel Kurtz static int rk_iommu_disable_stall(struct rk_iommu *iommu)
369c68a2921SDaniel Kurtz {
370cd6438c5SZhengShunQian 	int ret, i;
3710416bf64STomasz Figa 	bool val;
372c68a2921SDaniel Kurtz 
373c68a2921SDaniel Kurtz 	if (!rk_iommu_is_stall_active(iommu))
374c68a2921SDaniel Kurtz 		return 0;
375c68a2921SDaniel Kurtz 
376c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
377c68a2921SDaniel Kurtz 
3780416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
3790416bf64STomasz Figa 				 !val, RK_MMU_POLL_PERIOD_US,
3800416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
381c68a2921SDaniel Kurtz 	if (ret)
382cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
383c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
384cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
385c68a2921SDaniel Kurtz 
386c68a2921SDaniel Kurtz 	return ret;
387c68a2921SDaniel Kurtz }
388c68a2921SDaniel Kurtz 
389c68a2921SDaniel Kurtz static int rk_iommu_enable_paging(struct rk_iommu *iommu)
390c68a2921SDaniel Kurtz {
391cd6438c5SZhengShunQian 	int ret, i;
3920416bf64STomasz Figa 	bool val;
393c68a2921SDaniel Kurtz 
394c68a2921SDaniel Kurtz 	if (rk_iommu_is_paging_enabled(iommu))
395c68a2921SDaniel Kurtz 		return 0;
396c68a2921SDaniel Kurtz 
397c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
398c68a2921SDaniel Kurtz 
3990416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
4000416bf64STomasz Figa 				 val, RK_MMU_POLL_PERIOD_US,
4010416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
402c68a2921SDaniel Kurtz 	if (ret)
403cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
404c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
405cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
406c68a2921SDaniel Kurtz 
407c68a2921SDaniel Kurtz 	return ret;
408c68a2921SDaniel Kurtz }
409c68a2921SDaniel Kurtz 
410c68a2921SDaniel Kurtz static int rk_iommu_disable_paging(struct rk_iommu *iommu)
411c68a2921SDaniel Kurtz {
412cd6438c5SZhengShunQian 	int ret, i;
4130416bf64STomasz Figa 	bool val;
414c68a2921SDaniel Kurtz 
415c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
416c68a2921SDaniel Kurtz 		return 0;
417c68a2921SDaniel Kurtz 
418c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
419c68a2921SDaniel Kurtz 
4200416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
4210416bf64STomasz Figa 				 !val, RK_MMU_POLL_PERIOD_US,
4220416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
423c68a2921SDaniel Kurtz 	if (ret)
424cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
425c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
426cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
427c68a2921SDaniel Kurtz 
428c68a2921SDaniel Kurtz 	return ret;
429c68a2921SDaniel Kurtz }
430c68a2921SDaniel Kurtz 
431c68a2921SDaniel Kurtz static int rk_iommu_force_reset(struct rk_iommu *iommu)
432c68a2921SDaniel Kurtz {
433cd6438c5SZhengShunQian 	int ret, i;
434c68a2921SDaniel Kurtz 	u32 dte_addr;
4350416bf64STomasz Figa 	bool val;
436c68a2921SDaniel Kurtz 
437c3aa4742SSimon Xue 	if (iommu->reset_disabled)
438c3aa4742SSimon Xue 		return 0;
439c3aa4742SSimon Xue 
440c68a2921SDaniel Kurtz 	/*
441c68a2921SDaniel Kurtz 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
442c68a2921SDaniel Kurtz 	 * and verifying that upper 5 nybbles are read back.
443c68a2921SDaniel Kurtz 	 */
444cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
445cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
446c68a2921SDaniel Kurtz 
447cd6438c5SZhengShunQian 		dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
448c68a2921SDaniel Kurtz 		if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
449c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
450c68a2921SDaniel Kurtz 			return -EFAULT;
451c68a2921SDaniel Kurtz 		}
452cd6438c5SZhengShunQian 	}
453c68a2921SDaniel Kurtz 
454c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
455c68a2921SDaniel Kurtz 
4560416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
4570416bf64STomasz Figa 				 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
4580416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
459cd6438c5SZhengShunQian 	if (ret) {
460c68a2921SDaniel Kurtz 		dev_err(iommu->dev, "FORCE_RESET command timed out\n");
461c68a2921SDaniel Kurtz 		return ret;
462c68a2921SDaniel Kurtz 	}
463c68a2921SDaniel Kurtz 
464cd6438c5SZhengShunQian 	return 0;
465cd6438c5SZhengShunQian }
466cd6438c5SZhengShunQian 
467cd6438c5SZhengShunQian static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
468c68a2921SDaniel Kurtz {
469cd6438c5SZhengShunQian 	void __iomem *base = iommu->bases[index];
470c68a2921SDaniel Kurtz 	u32 dte_index, pte_index, page_offset;
471c68a2921SDaniel Kurtz 	u32 mmu_dte_addr;
472c68a2921SDaniel Kurtz 	phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
473c68a2921SDaniel Kurtz 	u32 *dte_addr;
474c68a2921SDaniel Kurtz 	u32 dte;
475c68a2921SDaniel Kurtz 	phys_addr_t pte_addr_phys = 0;
476c68a2921SDaniel Kurtz 	u32 *pte_addr = NULL;
477c68a2921SDaniel Kurtz 	u32 pte = 0;
478c68a2921SDaniel Kurtz 	phys_addr_t page_addr_phys = 0;
479c68a2921SDaniel Kurtz 	u32 page_flags = 0;
480c68a2921SDaniel Kurtz 
481c68a2921SDaniel Kurtz 	dte_index = rk_iova_dte_index(iova);
482c68a2921SDaniel Kurtz 	pte_index = rk_iova_pte_index(iova);
483c68a2921SDaniel Kurtz 	page_offset = rk_iova_page_offset(iova);
484c68a2921SDaniel Kurtz 
485cd6438c5SZhengShunQian 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
486c68a2921SDaniel Kurtz 	mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
487c68a2921SDaniel Kurtz 
488c68a2921SDaniel Kurtz 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
489c68a2921SDaniel Kurtz 	dte_addr = phys_to_virt(dte_addr_phys);
490c68a2921SDaniel Kurtz 	dte = *dte_addr;
491c68a2921SDaniel Kurtz 
492c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
493c68a2921SDaniel Kurtz 		goto print_it;
494c68a2921SDaniel Kurtz 
495c68a2921SDaniel Kurtz 	pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
496c68a2921SDaniel Kurtz 	pte_addr = phys_to_virt(pte_addr_phys);
497c68a2921SDaniel Kurtz 	pte = *pte_addr;
498c68a2921SDaniel Kurtz 
499c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
500c68a2921SDaniel Kurtz 		goto print_it;
501c68a2921SDaniel Kurtz 
502c68a2921SDaniel Kurtz 	page_addr_phys = rk_pte_page_address(pte) + page_offset;
503c68a2921SDaniel Kurtz 	page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
504c68a2921SDaniel Kurtz 
505c68a2921SDaniel Kurtz print_it:
506c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
507c68a2921SDaniel Kurtz 		&iova, dte_index, pte_index, page_offset);
508c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
509c68a2921SDaniel Kurtz 		&mmu_dte_addr_phys, &dte_addr_phys, dte,
510c68a2921SDaniel Kurtz 		rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
511c68a2921SDaniel Kurtz 		rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
512c68a2921SDaniel Kurtz }
513c68a2921SDaniel Kurtz 
514c68a2921SDaniel Kurtz static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
515c68a2921SDaniel Kurtz {
516c68a2921SDaniel Kurtz 	struct rk_iommu *iommu = dev_id;
517c68a2921SDaniel Kurtz 	u32 status;
518c68a2921SDaniel Kurtz 	u32 int_status;
519c68a2921SDaniel Kurtz 	dma_addr_t iova;
520cd6438c5SZhengShunQian 	irqreturn_t ret = IRQ_NONE;
521cd6438c5SZhengShunQian 	int i;
522c68a2921SDaniel Kurtz 
523f2e3a5f5STomasz Figa 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
524f2e3a5f5STomasz Figa 
525cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
526cd6438c5SZhengShunQian 		int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
527c68a2921SDaniel Kurtz 		if (int_status == 0)
528cd6438c5SZhengShunQian 			continue;
529c68a2921SDaniel Kurtz 
530cd6438c5SZhengShunQian 		ret = IRQ_HANDLED;
531cd6438c5SZhengShunQian 		iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
532c68a2921SDaniel Kurtz 
533c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
534c68a2921SDaniel Kurtz 			int flags;
535c68a2921SDaniel Kurtz 
536cd6438c5SZhengShunQian 			status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
537c68a2921SDaniel Kurtz 			flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
538c68a2921SDaniel Kurtz 					IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
539c68a2921SDaniel Kurtz 
540c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Page fault at %pad of type %s\n",
541c68a2921SDaniel Kurtz 				&iova,
542c68a2921SDaniel Kurtz 				(flags == IOMMU_FAULT_WRITE) ? "write" : "read");
543c68a2921SDaniel Kurtz 
544cd6438c5SZhengShunQian 			log_iova(iommu, i, iova);
545c68a2921SDaniel Kurtz 
546c68a2921SDaniel Kurtz 			/*
547c68a2921SDaniel Kurtz 			 * Report page fault to any installed handlers.
548c68a2921SDaniel Kurtz 			 * Ignore the return code, though, since we always zap cache
549c68a2921SDaniel Kurtz 			 * and clear the page fault anyway.
550c68a2921SDaniel Kurtz 			 */
551c68a2921SDaniel Kurtz 			if (iommu->domain)
552c68a2921SDaniel Kurtz 				report_iommu_fault(iommu->domain, iommu->dev, iova,
553c68a2921SDaniel Kurtz 						   flags);
554c68a2921SDaniel Kurtz 			else
555c68a2921SDaniel Kurtz 				dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
556c68a2921SDaniel Kurtz 
557cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
558cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
559c68a2921SDaniel Kurtz 		}
560c68a2921SDaniel Kurtz 
561c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_BUS_ERROR)
562c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
563c68a2921SDaniel Kurtz 
564c68a2921SDaniel Kurtz 		if (int_status & ~RK_MMU_IRQ_MASK)
565c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "unexpected int_status: %#08x\n",
566c68a2921SDaniel Kurtz 				int_status);
567c68a2921SDaniel Kurtz 
568cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
569cd6438c5SZhengShunQian 	}
570c68a2921SDaniel Kurtz 
571f2e3a5f5STomasz Figa 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
572f2e3a5f5STomasz Figa 
573cd6438c5SZhengShunQian 	return ret;
574c68a2921SDaniel Kurtz }
575c68a2921SDaniel Kurtz 
576c68a2921SDaniel Kurtz static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
577c68a2921SDaniel Kurtz 					 dma_addr_t iova)
578c68a2921SDaniel Kurtz {
579bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
580c68a2921SDaniel Kurtz 	unsigned long flags;
581c68a2921SDaniel Kurtz 	phys_addr_t pt_phys, phys = 0;
582c68a2921SDaniel Kurtz 	u32 dte, pte;
583c68a2921SDaniel Kurtz 	u32 *page_table;
584c68a2921SDaniel Kurtz 
585c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
586c68a2921SDaniel Kurtz 
587c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
588c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
589c68a2921SDaniel Kurtz 		goto out;
590c68a2921SDaniel Kurtz 
591c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
592c68a2921SDaniel Kurtz 	page_table = (u32 *)phys_to_virt(pt_phys);
593c68a2921SDaniel Kurtz 	pte = page_table[rk_iova_pte_index(iova)];
594c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
595c68a2921SDaniel Kurtz 		goto out;
596c68a2921SDaniel Kurtz 
597c68a2921SDaniel Kurtz 	phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
598c68a2921SDaniel Kurtz out:
599c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
600c68a2921SDaniel Kurtz 
601c68a2921SDaniel Kurtz 	return phys;
602c68a2921SDaniel Kurtz }
603c68a2921SDaniel Kurtz 
604c68a2921SDaniel Kurtz static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
605c68a2921SDaniel Kurtz 			      dma_addr_t iova, size_t size)
606c68a2921SDaniel Kurtz {
607c68a2921SDaniel Kurtz 	struct list_head *pos;
608c68a2921SDaniel Kurtz 	unsigned long flags;
609c68a2921SDaniel Kurtz 
610c68a2921SDaniel Kurtz 	/* shootdown these iova from all iommus using this domain */
611c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
612c68a2921SDaniel Kurtz 	list_for_each(pos, &rk_domain->iommus) {
613c68a2921SDaniel Kurtz 		struct rk_iommu *iommu;
614c68a2921SDaniel Kurtz 		iommu = list_entry(pos, struct rk_iommu, node);
615f2e3a5f5STomasz Figa 		WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
616c68a2921SDaniel Kurtz 		rk_iommu_zap_lines(iommu, iova, size);
617f2e3a5f5STomasz Figa 		clk_bulk_disable(iommu->num_clocks, iommu->clocks);
618c68a2921SDaniel Kurtz 	}
619c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
620c68a2921SDaniel Kurtz }
621c68a2921SDaniel Kurtz 
622d4dd920cSTomasz Figa static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
623d4dd920cSTomasz Figa 					 dma_addr_t iova, size_t size)
624d4dd920cSTomasz Figa {
625d4dd920cSTomasz Figa 	rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
626d4dd920cSTomasz Figa 	if (size > SPAGE_SIZE)
627d4dd920cSTomasz Figa 		rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
628d4dd920cSTomasz Figa 					SPAGE_SIZE);
629d4dd920cSTomasz Figa }
630d4dd920cSTomasz Figa 
631c68a2921SDaniel Kurtz static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
632c68a2921SDaniel Kurtz 				  dma_addr_t iova)
633c68a2921SDaniel Kurtz {
634c68a2921SDaniel Kurtz 	u32 *page_table, *dte_addr;
6354f0aba67SShunqian Zheng 	u32 dte_index, dte;
636c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
6374f0aba67SShunqian Zheng 	dma_addr_t pt_dma;
638c68a2921SDaniel Kurtz 
639c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
640c68a2921SDaniel Kurtz 
6414f0aba67SShunqian Zheng 	dte_index = rk_iova_dte_index(iova);
6424f0aba67SShunqian Zheng 	dte_addr = &rk_domain->dt[dte_index];
643c68a2921SDaniel Kurtz 	dte = *dte_addr;
644c68a2921SDaniel Kurtz 	if (rk_dte_is_pt_valid(dte))
645c68a2921SDaniel Kurtz 		goto done;
646c68a2921SDaniel Kurtz 
647c68a2921SDaniel Kurtz 	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
648c68a2921SDaniel Kurtz 	if (!page_table)
649c68a2921SDaniel Kurtz 		return ERR_PTR(-ENOMEM);
650c68a2921SDaniel Kurtz 
6519176a303SJeffy Chen 	pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
6529176a303SJeffy Chen 	if (dma_mapping_error(dma_dev, pt_dma)) {
6539176a303SJeffy Chen 		dev_err(dma_dev, "DMA mapping error while allocating page table\n");
6544f0aba67SShunqian Zheng 		free_page((unsigned long)page_table);
6554f0aba67SShunqian Zheng 		return ERR_PTR(-ENOMEM);
6564f0aba67SShunqian Zheng 	}
6574f0aba67SShunqian Zheng 
6584f0aba67SShunqian Zheng 	dte = rk_mk_dte(pt_dma);
659c68a2921SDaniel Kurtz 	*dte_addr = dte;
660c68a2921SDaniel Kurtz 
6614f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
6624f0aba67SShunqian Zheng 	rk_table_flush(rk_domain,
6634f0aba67SShunqian Zheng 		       rk_domain->dt_dma + dte_index * sizeof(u32), 1);
664c68a2921SDaniel Kurtz done:
665c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
666c68a2921SDaniel Kurtz 	return (u32 *)phys_to_virt(pt_phys);
667c68a2921SDaniel Kurtz }
668c68a2921SDaniel Kurtz 
669c68a2921SDaniel Kurtz static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
6704f0aba67SShunqian Zheng 				  u32 *pte_addr, dma_addr_t pte_dma,
6714f0aba67SShunqian Zheng 				  size_t size)
672c68a2921SDaniel Kurtz {
673c68a2921SDaniel Kurtz 	unsigned int pte_count;
674c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
675c68a2921SDaniel Kurtz 
676c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
677c68a2921SDaniel Kurtz 
678c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
679c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
680c68a2921SDaniel Kurtz 		if (!rk_pte_is_page_valid(pte))
681c68a2921SDaniel Kurtz 			break;
682c68a2921SDaniel Kurtz 
683c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte_invalid(pte);
684c68a2921SDaniel Kurtz 	}
685c68a2921SDaniel Kurtz 
6864f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_count);
687c68a2921SDaniel Kurtz 
688c68a2921SDaniel Kurtz 	return pte_count * SPAGE_SIZE;
689c68a2921SDaniel Kurtz }
690c68a2921SDaniel Kurtz 
691c68a2921SDaniel Kurtz static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
6924f0aba67SShunqian Zheng 			     dma_addr_t pte_dma, dma_addr_t iova,
6934f0aba67SShunqian Zheng 			     phys_addr_t paddr, size_t size, int prot)
694c68a2921SDaniel Kurtz {
695c68a2921SDaniel Kurtz 	unsigned int pte_count;
696c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
697c68a2921SDaniel Kurtz 	phys_addr_t page_phys;
698c68a2921SDaniel Kurtz 
699c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
700c68a2921SDaniel Kurtz 
701c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
702c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
703c68a2921SDaniel Kurtz 
704c68a2921SDaniel Kurtz 		if (rk_pte_is_page_valid(pte))
705c68a2921SDaniel Kurtz 			goto unwind;
706c68a2921SDaniel Kurtz 
707c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte(paddr, prot);
708c68a2921SDaniel Kurtz 
709c68a2921SDaniel Kurtz 		paddr += SPAGE_SIZE;
710c68a2921SDaniel Kurtz 	}
711c68a2921SDaniel Kurtz 
7124f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_total);
713c68a2921SDaniel Kurtz 
714d4dd920cSTomasz Figa 	/*
715d4dd920cSTomasz Figa 	 * Zap the first and last iova to evict from iotlb any previously
716d4dd920cSTomasz Figa 	 * mapped cachelines holding stale values for its dte and pte.
717d4dd920cSTomasz Figa 	 * We only zap the first and last iova, since only they could have
718d4dd920cSTomasz Figa 	 * dte or pte shared with an existing mapping.
719d4dd920cSTomasz Figa 	 */
720d4dd920cSTomasz Figa 	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
721d4dd920cSTomasz Figa 
722c68a2921SDaniel Kurtz 	return 0;
723c68a2921SDaniel Kurtz unwind:
724c68a2921SDaniel Kurtz 	/* Unmap the range of iovas that we just mapped */
7254f0aba67SShunqian Zheng 	rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
7264f0aba67SShunqian Zheng 			    pte_count * SPAGE_SIZE);
727c68a2921SDaniel Kurtz 
728c68a2921SDaniel Kurtz 	iova += pte_count * SPAGE_SIZE;
729c68a2921SDaniel Kurtz 	page_phys = rk_pte_page_address(pte_addr[pte_count]);
730c68a2921SDaniel Kurtz 	pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
731c68a2921SDaniel Kurtz 	       &iova, &page_phys, &paddr, prot);
732c68a2921SDaniel Kurtz 
733c68a2921SDaniel Kurtz 	return -EADDRINUSE;
734c68a2921SDaniel Kurtz }
735c68a2921SDaniel Kurtz 
736c68a2921SDaniel Kurtz static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
737c68a2921SDaniel Kurtz 			phys_addr_t paddr, size_t size, int prot)
738c68a2921SDaniel Kurtz {
739bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
740c68a2921SDaniel Kurtz 	unsigned long flags;
7414f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
742c68a2921SDaniel Kurtz 	u32 *page_table, *pte_addr;
7434f0aba67SShunqian Zheng 	u32 dte_index, pte_index;
744c68a2921SDaniel Kurtz 	int ret;
745c68a2921SDaniel Kurtz 
746c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
747c68a2921SDaniel Kurtz 
748c68a2921SDaniel Kurtz 	/*
749c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
750c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
751c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
752c68a2921SDaniel Kurtz 	 * Since iommu_map() guarantees that both iova and size will be
753c68a2921SDaniel Kurtz 	 * aligned, we will always only be mapping from a single dte here.
754c68a2921SDaniel Kurtz 	 */
755c68a2921SDaniel Kurtz 	page_table = rk_dte_get_page_table(rk_domain, iova);
756c68a2921SDaniel Kurtz 	if (IS_ERR(page_table)) {
757c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
758c68a2921SDaniel Kurtz 		return PTR_ERR(page_table);
759c68a2921SDaniel Kurtz 	}
760c68a2921SDaniel Kurtz 
7614f0aba67SShunqian Zheng 	dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
7624f0aba67SShunqian Zheng 	pte_index = rk_iova_pte_index(iova);
7634f0aba67SShunqian Zheng 	pte_addr = &page_table[pte_index];
7644f0aba67SShunqian Zheng 	pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
7654f0aba67SShunqian Zheng 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
7664f0aba67SShunqian Zheng 				paddr, size, prot);
7674f0aba67SShunqian Zheng 
768c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
769c68a2921SDaniel Kurtz 
770c68a2921SDaniel Kurtz 	return ret;
771c68a2921SDaniel Kurtz }
772c68a2921SDaniel Kurtz 
773c68a2921SDaniel Kurtz static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
774c68a2921SDaniel Kurtz 			     size_t size)
775c68a2921SDaniel Kurtz {
776bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
777c68a2921SDaniel Kurtz 	unsigned long flags;
7784f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
779c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
780c68a2921SDaniel Kurtz 	u32 dte;
781c68a2921SDaniel Kurtz 	u32 *pte_addr;
782c68a2921SDaniel Kurtz 	size_t unmap_size;
783c68a2921SDaniel Kurtz 
784c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
785c68a2921SDaniel Kurtz 
786c68a2921SDaniel Kurtz 	/*
787c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
788c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
789c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
790c68a2921SDaniel Kurtz 	 * Since iommu_unmap() guarantees that both iova and size will be
791c68a2921SDaniel Kurtz 	 * aligned, we will always only be unmapping from a single dte here.
792c68a2921SDaniel Kurtz 	 */
793c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
794c68a2921SDaniel Kurtz 	/* Just return 0 if iova is unmapped */
795c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte)) {
796c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
797c68a2921SDaniel Kurtz 		return 0;
798c68a2921SDaniel Kurtz 	}
799c68a2921SDaniel Kurtz 
800c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
801c68a2921SDaniel Kurtz 	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
8024f0aba67SShunqian Zheng 	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
8034f0aba67SShunqian Zheng 	unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
804c68a2921SDaniel Kurtz 
805c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
806c68a2921SDaniel Kurtz 
807c68a2921SDaniel Kurtz 	/* Shootdown iotlb entries for iova range that was just unmapped */
808c68a2921SDaniel Kurtz 	rk_iommu_zap_iova(rk_domain, iova, unmap_size);
809c68a2921SDaniel Kurtz 
810c68a2921SDaniel Kurtz 	return unmap_size;
811c68a2921SDaniel Kurtz }
812c68a2921SDaniel Kurtz 
813c68a2921SDaniel Kurtz static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
814c68a2921SDaniel Kurtz {
8155fd577c3SJeffy Chen 	struct rk_iommudata *data = dev->archdata.iommu;
816c68a2921SDaniel Kurtz 
8175fd577c3SJeffy Chen 	return data ? data->iommu : NULL;
818c68a2921SDaniel Kurtz }
819c68a2921SDaniel Kurtz 
820c68a2921SDaniel Kurtz static int rk_iommu_attach_device(struct iommu_domain *domain,
821c68a2921SDaniel Kurtz 				  struct device *dev)
822c68a2921SDaniel Kurtz {
823c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
824bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
825c68a2921SDaniel Kurtz 	unsigned long flags;
826cd6438c5SZhengShunQian 	int ret, i;
827c68a2921SDaniel Kurtz 
828c68a2921SDaniel Kurtz 	/*
829c68a2921SDaniel Kurtz 	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
830c68a2921SDaniel Kurtz 	 * Such a device does not belong to an iommu group.
831c68a2921SDaniel Kurtz 	 */
832c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
833c68a2921SDaniel Kurtz 	if (!iommu)
834c68a2921SDaniel Kurtz 		return 0;
835c68a2921SDaniel Kurtz 
836f2e3a5f5STomasz Figa 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
837c68a2921SDaniel Kurtz 	if (ret)
838c68a2921SDaniel Kurtz 		return ret;
839c68a2921SDaniel Kurtz 
840f2e3a5f5STomasz Figa 	ret = rk_iommu_enable_stall(iommu);
841f2e3a5f5STomasz Figa 	if (ret)
842f2e3a5f5STomasz Figa 		goto out_disable_clocks;
843f2e3a5f5STomasz Figa 
844c68a2921SDaniel Kurtz 	ret = rk_iommu_force_reset(iommu);
845c68a2921SDaniel Kurtz 	if (ret)
846f6717d72STomasz Figa 		goto out_disable_stall;
847c68a2921SDaniel Kurtz 
848c68a2921SDaniel Kurtz 	iommu->domain = domain;
849c68a2921SDaniel Kurtz 
850cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
8514f0aba67SShunqian Zheng 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
8524f0aba67SShunqian Zheng 			       rk_domain->dt_dma);
853ae8a7910SJohn Keeping 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
854cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
855cd6438c5SZhengShunQian 	}
856c68a2921SDaniel Kurtz 
857c68a2921SDaniel Kurtz 	ret = rk_iommu_enable_paging(iommu);
858c68a2921SDaniel Kurtz 	if (ret)
859f6717d72STomasz Figa 		goto out_disable_stall;
860c68a2921SDaniel Kurtz 
861c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
862c68a2921SDaniel Kurtz 	list_add_tail(&iommu->node, &rk_domain->iommus);
863c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
864c68a2921SDaniel Kurtz 
865ec4292deSHeiko Stuebner 	dev_dbg(dev, "Attached to iommu domain\n");
866c68a2921SDaniel Kurtz 
867f6717d72STomasz Figa out_disable_stall:
868c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
869f2e3a5f5STomasz Figa out_disable_clocks:
870f2e3a5f5STomasz Figa 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
871f6717d72STomasz Figa 	return ret;
872c68a2921SDaniel Kurtz }
873c68a2921SDaniel Kurtz 
874c68a2921SDaniel Kurtz static void rk_iommu_detach_device(struct iommu_domain *domain,
875c68a2921SDaniel Kurtz 				   struct device *dev)
876c68a2921SDaniel Kurtz {
877c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
878bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
879c68a2921SDaniel Kurtz 	unsigned long flags;
880cd6438c5SZhengShunQian 	int i;
881c68a2921SDaniel Kurtz 
882c68a2921SDaniel Kurtz 	/* Allow 'virtual devices' (eg drm) to detach from domain */
883c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
884c68a2921SDaniel Kurtz 	if (!iommu)
885c68a2921SDaniel Kurtz 		return;
886c68a2921SDaniel Kurtz 
887c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
888c68a2921SDaniel Kurtz 	list_del_init(&iommu->node);
889c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
890c68a2921SDaniel Kurtz 
891c68a2921SDaniel Kurtz 	/* Ignore error while disabling, just keep going */
892f2e3a5f5STomasz Figa 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
893c68a2921SDaniel Kurtz 	rk_iommu_enable_stall(iommu);
894c68a2921SDaniel Kurtz 	rk_iommu_disable_paging(iommu);
895cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
896cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
897cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
898cd6438c5SZhengShunQian 	}
899c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
900f2e3a5f5STomasz Figa 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
901c68a2921SDaniel Kurtz 
902c68a2921SDaniel Kurtz 	iommu->domain = NULL;
903c68a2921SDaniel Kurtz 
904ec4292deSHeiko Stuebner 	dev_dbg(dev, "Detached from iommu domain\n");
905c68a2921SDaniel Kurtz }
906c68a2921SDaniel Kurtz 
907bcd516a3SJoerg Roedel static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
908c68a2921SDaniel Kurtz {
909c68a2921SDaniel Kurtz 	struct rk_iommu_domain *rk_domain;
910c68a2921SDaniel Kurtz 
911a93db2f2SShunqian Zheng 	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
912bcd516a3SJoerg Roedel 		return NULL;
913bcd516a3SJoerg Roedel 
9149176a303SJeffy Chen 	if (!dma_dev)
915bcd516a3SJoerg Roedel 		return NULL;
916c68a2921SDaniel Kurtz 
9179176a303SJeffy Chen 	rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
9184f0aba67SShunqian Zheng 	if (!rk_domain)
9199176a303SJeffy Chen 		return NULL;
9204f0aba67SShunqian Zheng 
921a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA &&
922a93db2f2SShunqian Zheng 	    iommu_get_dma_cookie(&rk_domain->domain))
9239176a303SJeffy Chen 		return NULL;
9244f0aba67SShunqian Zheng 
925c68a2921SDaniel Kurtz 	/*
926c68a2921SDaniel Kurtz 	 * rk32xx iommus use a 2 level pagetable.
927c68a2921SDaniel Kurtz 	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
928c68a2921SDaniel Kurtz 	 * Allocate one 4 KiB page for each table.
929c68a2921SDaniel Kurtz 	 */
930c68a2921SDaniel Kurtz 	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
931c68a2921SDaniel Kurtz 	if (!rk_domain->dt)
9324f0aba67SShunqian Zheng 		goto err_put_cookie;
933c68a2921SDaniel Kurtz 
9349176a303SJeffy Chen 	rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
9354f0aba67SShunqian Zheng 					   SPAGE_SIZE, DMA_TO_DEVICE);
9369176a303SJeffy Chen 	if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
9379176a303SJeffy Chen 		dev_err(dma_dev, "DMA map error for DT\n");
9384f0aba67SShunqian Zheng 		goto err_free_dt;
9394f0aba67SShunqian Zheng 	}
9404f0aba67SShunqian Zheng 
9414f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
942c68a2921SDaniel Kurtz 
943c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->iommus_lock);
944c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->dt_lock);
945c68a2921SDaniel Kurtz 	INIT_LIST_HEAD(&rk_domain->iommus);
946c68a2921SDaniel Kurtz 
947a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_start = 0;
948a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_end   = DMA_BIT_MASK(32);
949a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.force_aperture = true;
950a93db2f2SShunqian Zheng 
951bcd516a3SJoerg Roedel 	return &rk_domain->domain;
952c68a2921SDaniel Kurtz 
9534f0aba67SShunqian Zheng err_free_dt:
9544f0aba67SShunqian Zheng 	free_page((unsigned long)rk_domain->dt);
9554f0aba67SShunqian Zheng err_put_cookie:
956a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA)
9574f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
9584f0aba67SShunqian Zheng 
959bcd516a3SJoerg Roedel 	return NULL;
960c68a2921SDaniel Kurtz }
961c68a2921SDaniel Kurtz 
962bcd516a3SJoerg Roedel static void rk_iommu_domain_free(struct iommu_domain *domain)
963c68a2921SDaniel Kurtz {
964bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
965c68a2921SDaniel Kurtz 	int i;
966c68a2921SDaniel Kurtz 
967c68a2921SDaniel Kurtz 	WARN_ON(!list_empty(&rk_domain->iommus));
968c68a2921SDaniel Kurtz 
969c68a2921SDaniel Kurtz 	for (i = 0; i < NUM_DT_ENTRIES; i++) {
970c68a2921SDaniel Kurtz 		u32 dte = rk_domain->dt[i];
971c68a2921SDaniel Kurtz 		if (rk_dte_is_pt_valid(dte)) {
972c68a2921SDaniel Kurtz 			phys_addr_t pt_phys = rk_dte_pt_address(dte);
973c68a2921SDaniel Kurtz 			u32 *page_table = phys_to_virt(pt_phys);
9749176a303SJeffy Chen 			dma_unmap_single(dma_dev, pt_phys,
9754f0aba67SShunqian Zheng 					 SPAGE_SIZE, DMA_TO_DEVICE);
976c68a2921SDaniel Kurtz 			free_page((unsigned long)page_table);
977c68a2921SDaniel Kurtz 		}
978c68a2921SDaniel Kurtz 	}
979c68a2921SDaniel Kurtz 
9809176a303SJeffy Chen 	dma_unmap_single(dma_dev, rk_domain->dt_dma,
9814f0aba67SShunqian Zheng 			 SPAGE_SIZE, DMA_TO_DEVICE);
982c68a2921SDaniel Kurtz 	free_page((unsigned long)rk_domain->dt);
9834f0aba67SShunqian Zheng 
984a93db2f2SShunqian Zheng 	if (domain->type == IOMMU_DOMAIN_DMA)
9854f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
986c68a2921SDaniel Kurtz }
987c68a2921SDaniel Kurtz 
988c68a2921SDaniel Kurtz static int rk_iommu_add_device(struct device *dev)
989c68a2921SDaniel Kurtz {
990c68a2921SDaniel Kurtz 	struct iommu_group *group;
991c9d9f239SJoerg Roedel 	struct rk_iommu *iommu;
992c68a2921SDaniel Kurtz 
993c9d9f239SJoerg Roedel 	iommu = rk_iommu_from_dev(dev);
9945fd577c3SJeffy Chen 	if (!iommu)
9955fd577c3SJeffy Chen 		return -ENODEV;
9965fd577c3SJeffy Chen 
9975fd577c3SJeffy Chen 	group = iommu_group_get_for_dev(dev);
9985fd577c3SJeffy Chen 	if (IS_ERR(group))
9995fd577c3SJeffy Chen 		return PTR_ERR(group);
10005fd577c3SJeffy Chen 	iommu_group_put(group);
10015fd577c3SJeffy Chen 
1002c9d9f239SJoerg Roedel 	iommu_device_link(&iommu->iommu, dev);
1003c9d9f239SJoerg Roedel 
1004c68a2921SDaniel Kurtz 	return 0;
1005c68a2921SDaniel Kurtz }
1006c68a2921SDaniel Kurtz 
1007c68a2921SDaniel Kurtz static void rk_iommu_remove_device(struct device *dev)
1008c68a2921SDaniel Kurtz {
1009c9d9f239SJoerg Roedel 	struct rk_iommu *iommu;
1010c9d9f239SJoerg Roedel 
1011c9d9f239SJoerg Roedel 	iommu = rk_iommu_from_dev(dev);
1012c9d9f239SJoerg Roedel 
10135fd577c3SJeffy Chen 	iommu_device_unlink(&iommu->iommu, dev);
1014c68a2921SDaniel Kurtz 	iommu_group_remove_device(dev);
1015c68a2921SDaniel Kurtz }
1016c68a2921SDaniel Kurtz 
10175fd577c3SJeffy Chen static int rk_iommu_of_xlate(struct device *dev,
10185fd577c3SJeffy Chen 			     struct of_phandle_args *args)
10195fd577c3SJeffy Chen {
10205fd577c3SJeffy Chen 	struct platform_device *iommu_dev;
10215fd577c3SJeffy Chen 	struct rk_iommudata *data;
10225fd577c3SJeffy Chen 
10235fd577c3SJeffy Chen 	data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
10245fd577c3SJeffy Chen 	if (!data)
10255fd577c3SJeffy Chen 		return -ENOMEM;
10265fd577c3SJeffy Chen 
10275fd577c3SJeffy Chen 	iommu_dev = of_find_device_by_node(args->np);
10285fd577c3SJeffy Chen 
10295fd577c3SJeffy Chen 	data->iommu = platform_get_drvdata(iommu_dev);
10305fd577c3SJeffy Chen 	dev->archdata.iommu = data;
10315fd577c3SJeffy Chen 
10325fd577c3SJeffy Chen 	of_dev_put(iommu_dev);
10335fd577c3SJeffy Chen 
10345fd577c3SJeffy Chen 	return 0;
10355fd577c3SJeffy Chen }
10365fd577c3SJeffy Chen 
1037c68a2921SDaniel Kurtz static const struct iommu_ops rk_iommu_ops = {
1038bcd516a3SJoerg Roedel 	.domain_alloc = rk_iommu_domain_alloc,
1039bcd516a3SJoerg Roedel 	.domain_free = rk_iommu_domain_free,
1040c68a2921SDaniel Kurtz 	.attach_dev = rk_iommu_attach_device,
1041c68a2921SDaniel Kurtz 	.detach_dev = rk_iommu_detach_device,
1042c68a2921SDaniel Kurtz 	.map = rk_iommu_map,
1043c68a2921SDaniel Kurtz 	.unmap = rk_iommu_unmap,
1044e6d0f473SSimon Xue 	.map_sg = default_iommu_map_sg,
1045c68a2921SDaniel Kurtz 	.add_device = rk_iommu_add_device,
1046c68a2921SDaniel Kurtz 	.remove_device = rk_iommu_remove_device,
1047c68a2921SDaniel Kurtz 	.iova_to_phys = rk_iommu_iova_to_phys,
10485fd577c3SJeffy Chen 	.device_group = generic_device_group,
1049c68a2921SDaniel Kurtz 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
10505fd577c3SJeffy Chen 	.of_xlate = rk_iommu_of_xlate,
1051c68a2921SDaniel Kurtz };
1052c68a2921SDaniel Kurtz 
1053c68a2921SDaniel Kurtz static int rk_iommu_probe(struct platform_device *pdev)
1054c68a2921SDaniel Kurtz {
1055c68a2921SDaniel Kurtz 	struct device *dev = &pdev->dev;
1056c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
1057c68a2921SDaniel Kurtz 	struct resource *res;
10583d08f434SShunqian Zheng 	int num_res = pdev->num_resources;
1059d0b912bdSJeffy Chen 	int err, i, irq;
1060c68a2921SDaniel Kurtz 
1061c68a2921SDaniel Kurtz 	iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1062c68a2921SDaniel Kurtz 	if (!iommu)
1063c68a2921SDaniel Kurtz 		return -ENOMEM;
1064c68a2921SDaniel Kurtz 
1065c68a2921SDaniel Kurtz 	platform_set_drvdata(pdev, iommu);
1066c68a2921SDaniel Kurtz 	iommu->dev = dev;
1067cd6438c5SZhengShunQian 	iommu->num_mmu = 0;
10683d08f434SShunqian Zheng 
10693d08f434SShunqian Zheng 	iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
1070cd6438c5SZhengShunQian 				    GFP_KERNEL);
1071cd6438c5SZhengShunQian 	if (!iommu->bases)
1072cd6438c5SZhengShunQian 		return -ENOMEM;
1073c68a2921SDaniel Kurtz 
10743d08f434SShunqian Zheng 	for (i = 0; i < num_res; i++) {
1075cd6438c5SZhengShunQian 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
10768d7f2d84STomeu Vizoso 		if (!res)
10778d7f2d84STomeu Vizoso 			continue;
1078cd6438c5SZhengShunQian 		iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1079cd6438c5SZhengShunQian 		if (IS_ERR(iommu->bases[i]))
1080cd6438c5SZhengShunQian 			continue;
1081cd6438c5SZhengShunQian 		iommu->num_mmu++;
1082cd6438c5SZhengShunQian 	}
1083cd6438c5SZhengShunQian 	if (iommu->num_mmu == 0)
1084cd6438c5SZhengShunQian 		return PTR_ERR(iommu->bases[0]);
1085c68a2921SDaniel Kurtz 
1086d0b912bdSJeffy Chen 	i = 0;
1087d0b912bdSJeffy Chen 	while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1088d0b912bdSJeffy Chen 		if (irq < 0)
1089d0b912bdSJeffy Chen 			return irq;
109003f732f8SSimon Xue 
1091d0b912bdSJeffy Chen 		err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1092d0b912bdSJeffy Chen 				       IRQF_SHARED, dev_name(dev), iommu);
1093d0b912bdSJeffy Chen 		if (err)
1094d0b912bdSJeffy Chen 			return err;
1095c68a2921SDaniel Kurtz 	}
1096c68a2921SDaniel Kurtz 
1097c3aa4742SSimon Xue 	iommu->reset_disabled = device_property_read_bool(dev,
1098c3aa4742SSimon Xue 					"rockchip,disable-mmu-reset");
1099c68a2921SDaniel Kurtz 
1100f2e3a5f5STomasz Figa 	iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1101f2e3a5f5STomasz Figa 	iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1102f2e3a5f5STomasz Figa 				     sizeof(*iommu->clocks), GFP_KERNEL);
1103f2e3a5f5STomasz Figa 	if (!iommu->clocks)
1104f2e3a5f5STomasz Figa 		return -ENOMEM;
1105f2e3a5f5STomasz Figa 
1106f2e3a5f5STomasz Figa 	for (i = 0; i < iommu->num_clocks; ++i)
1107f2e3a5f5STomasz Figa 		iommu->clocks[i].id = rk_iommu_clocks[i];
1108f2e3a5f5STomasz Figa 
1109f2e3a5f5STomasz Figa 	err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1110c9d9f239SJoerg Roedel 	if (err)
1111c9d9f239SJoerg Roedel 		return err;
1112c9d9f239SJoerg Roedel 
1113f2e3a5f5STomasz Figa 	err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1114f2e3a5f5STomasz Figa 	if (err)
1115f2e3a5f5STomasz Figa 		return err;
1116f2e3a5f5STomasz Figa 
1117f2e3a5f5STomasz Figa 	err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1118f2e3a5f5STomasz Figa 	if (err)
1119f2e3a5f5STomasz Figa 		goto err_unprepare_clocks;
1120f2e3a5f5STomasz Figa 
1121c9d9f239SJoerg Roedel 	iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
11225fd577c3SJeffy Chen 	iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
11235fd577c3SJeffy Chen 
1124c9d9f239SJoerg Roedel 	err = iommu_device_register(&iommu->iommu);
11256d9ffaadSJeffy Chen 	if (err)
1126f2e3a5f5STomasz Figa 		goto err_remove_sysfs;
1127c9d9f239SJoerg Roedel 
11289176a303SJeffy Chen 	/*
11299176a303SJeffy Chen 	 * Use the first registered IOMMU device for domain to use with DMA
11309176a303SJeffy Chen 	 * API, since a domain might not physically correspond to a single
11319176a303SJeffy Chen 	 * IOMMU device..
11329176a303SJeffy Chen 	 */
11339176a303SJeffy Chen 	if (!dma_dev)
11349176a303SJeffy Chen 		dma_dev = &pdev->dev;
11359176a303SJeffy Chen 
1136*4d88a8a4SJeffy Chen 	bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1137*4d88a8a4SJeffy Chen 
1138f2e3a5f5STomasz Figa 	return 0;
1139f2e3a5f5STomasz Figa err_remove_sysfs:
1140f2e3a5f5STomasz Figa 	iommu_device_sysfs_remove(&iommu->iommu);
1141f2e3a5f5STomasz Figa err_unprepare_clocks:
1142f2e3a5f5STomasz Figa 	clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1143c9d9f239SJoerg Roedel 	return err;
1144c68a2921SDaniel Kurtz }
1145c68a2921SDaniel Kurtz 
11461a4e90f2SMarc Zyngier static void rk_iommu_shutdown(struct platform_device *pdev)
11471a4e90f2SMarc Zyngier {
11481a4e90f2SMarc Zyngier 	struct rk_iommu *iommu = platform_get_drvdata(pdev);
11491a4e90f2SMarc Zyngier 
11501a4e90f2SMarc Zyngier 	/*
11511a4e90f2SMarc Zyngier 	 * Be careful not to try to shutdown an otherwise unused
11521a4e90f2SMarc Zyngier 	 * IOMMU, as it is likely not to be clocked, and accessing it
11531a4e90f2SMarc Zyngier 	 * would just block. An IOMMU without a domain is likely to be
11541a4e90f2SMarc Zyngier 	 * unused, so let's use this as a (weak) guard.
11551a4e90f2SMarc Zyngier 	 */
11561a4e90f2SMarc Zyngier 	if (iommu && iommu->domain) {
11571a4e90f2SMarc Zyngier 		rk_iommu_enable_stall(iommu);
11581a4e90f2SMarc Zyngier 		rk_iommu_disable_paging(iommu);
11591a4e90f2SMarc Zyngier 		rk_iommu_force_reset(iommu);
11601a4e90f2SMarc Zyngier 	}
11611a4e90f2SMarc Zyngier }
11621a4e90f2SMarc Zyngier 
1163c68a2921SDaniel Kurtz static const struct of_device_id rk_iommu_dt_ids[] = {
1164c68a2921SDaniel Kurtz 	{ .compatible = "rockchip,iommu" },
1165c68a2921SDaniel Kurtz 	{ /* sentinel */ }
1166c68a2921SDaniel Kurtz };
1167c68a2921SDaniel Kurtz MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1168c68a2921SDaniel Kurtz 
1169c68a2921SDaniel Kurtz static struct platform_driver rk_iommu_driver = {
1170c68a2921SDaniel Kurtz 	.probe = rk_iommu_probe,
11711a4e90f2SMarc Zyngier 	.shutdown = rk_iommu_shutdown,
1172c68a2921SDaniel Kurtz 	.driver = {
1173c68a2921SDaniel Kurtz 		   .name = "rk_iommu",
1174d9e7eb15SArnd Bergmann 		   .of_match_table = rk_iommu_dt_ids,
117598b72b94SJeffy Chen 		   .suppress_bind_attrs = true,
1176c68a2921SDaniel Kurtz 	},
1177c68a2921SDaniel Kurtz };
1178c68a2921SDaniel Kurtz 
1179c68a2921SDaniel Kurtz static int __init rk_iommu_init(void)
1180c68a2921SDaniel Kurtz {
11819176a303SJeffy Chen 	return platform_driver_register(&rk_iommu_driver);
1182c68a2921SDaniel Kurtz }
1183c68a2921SDaniel Kurtz subsys_initcall(rk_iommu_init);
1184c68a2921SDaniel Kurtz 
11855fd577c3SJeffy Chen IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
11865fd577c3SJeffy Chen 
1187c68a2921SDaniel Kurtz MODULE_DESCRIPTION("IOMMU API for Rockchip");
1188c68a2921SDaniel Kurtz MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1189c68a2921SDaniel Kurtz MODULE_ALIAS("platform:rockchip-iommu");
1190c68a2921SDaniel Kurtz MODULE_LICENSE("GPL v2");
1191