xref: /linux/drivers/iommu/rockchip-iommu.c (revision 6d9ffaad7eddefaa0a166ba612665aef5264e352)
1c68a2921SDaniel Kurtz /*
2c68a2921SDaniel Kurtz  * This program is free software; you can redistribute it and/or modify
3c68a2921SDaniel Kurtz  * it under the terms of the GNU General Public License version 2 as
4c68a2921SDaniel Kurtz  * published by the Free Software Foundation.
5c68a2921SDaniel Kurtz  */
6c68a2921SDaniel Kurtz 
7c68a2921SDaniel Kurtz #include <linux/compiler.h>
8c68a2921SDaniel Kurtz #include <linux/delay.h>
9c68a2921SDaniel Kurtz #include <linux/device.h>
104f0aba67SShunqian Zheng #include <linux/dma-iommu.h>
11461a6946SJoerg Roedel #include <linux/dma-mapping.h>
12c68a2921SDaniel Kurtz #include <linux/errno.h>
13c68a2921SDaniel Kurtz #include <linux/interrupt.h>
14c68a2921SDaniel Kurtz #include <linux/io.h>
15c68a2921SDaniel Kurtz #include <linux/iommu.h>
16c68a2921SDaniel Kurtz #include <linux/jiffies.h>
17c68a2921SDaniel Kurtz #include <linux/list.h>
18c68a2921SDaniel Kurtz #include <linux/mm.h>
19c68a2921SDaniel Kurtz #include <linux/module.h>
20c68a2921SDaniel Kurtz #include <linux/of.h>
21c68a2921SDaniel Kurtz #include <linux/of_platform.h>
22c68a2921SDaniel Kurtz #include <linux/platform_device.h>
23c68a2921SDaniel Kurtz #include <linux/slab.h>
24c68a2921SDaniel Kurtz #include <linux/spinlock.h>
25c68a2921SDaniel Kurtz 
26c68a2921SDaniel Kurtz /** MMU register offsets */
27c68a2921SDaniel Kurtz #define RK_MMU_DTE_ADDR		0x00	/* Directory table address */
28c68a2921SDaniel Kurtz #define RK_MMU_STATUS		0x04
29c68a2921SDaniel Kurtz #define RK_MMU_COMMAND		0x08
30c68a2921SDaniel Kurtz #define RK_MMU_PAGE_FAULT_ADDR	0x0C	/* IOVA of last page fault */
31c68a2921SDaniel Kurtz #define RK_MMU_ZAP_ONE_LINE	0x10	/* Shootdown one IOTLB entry */
32c68a2921SDaniel Kurtz #define RK_MMU_INT_RAWSTAT	0x14	/* IRQ status ignoring mask */
33c68a2921SDaniel Kurtz #define RK_MMU_INT_CLEAR	0x18	/* Acknowledge and re-arm irq */
34c68a2921SDaniel Kurtz #define RK_MMU_INT_MASK		0x1C	/* IRQ enable */
35c68a2921SDaniel Kurtz #define RK_MMU_INT_STATUS	0x20	/* IRQ status after masking */
36c68a2921SDaniel Kurtz #define RK_MMU_AUTO_GATING	0x24
37c68a2921SDaniel Kurtz 
38c68a2921SDaniel Kurtz #define DTE_ADDR_DUMMY		0xCAFEBABE
39c68a2921SDaniel Kurtz #define FORCE_RESET_TIMEOUT	100	/* ms */
40c68a2921SDaniel Kurtz 
41c68a2921SDaniel Kurtz /* RK_MMU_STATUS fields */
42c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
43c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
44c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
45c68a2921SDaniel Kurtz #define RK_MMU_STATUS_IDLE                 BIT(3)
46c68a2921SDaniel Kurtz #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
47c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
48c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
49c68a2921SDaniel Kurtz 
50c68a2921SDaniel Kurtz /* RK_MMU_COMMAND command values */
51c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
52c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
53c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
54c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
55c68a2921SDaniel Kurtz #define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
56c68a2921SDaniel Kurtz #define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
57c68a2921SDaniel Kurtz #define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
58c68a2921SDaniel Kurtz 
59c68a2921SDaniel Kurtz /* RK_MMU_INT_* register fields */
60c68a2921SDaniel Kurtz #define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
61c68a2921SDaniel Kurtz #define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
62c68a2921SDaniel Kurtz #define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
63c68a2921SDaniel Kurtz 
64c68a2921SDaniel Kurtz #define NUM_DT_ENTRIES 1024
65c68a2921SDaniel Kurtz #define NUM_PT_ENTRIES 1024
66c68a2921SDaniel Kurtz 
67c68a2921SDaniel Kurtz #define SPAGE_ORDER 12
68c68a2921SDaniel Kurtz #define SPAGE_SIZE (1 << SPAGE_ORDER)
69c68a2921SDaniel Kurtz 
70c68a2921SDaniel Kurtz  /*
71c68a2921SDaniel Kurtz   * Support mapping any size that fits in one page table:
72c68a2921SDaniel Kurtz   *   4 KiB to 4 MiB
73c68a2921SDaniel Kurtz   */
74c68a2921SDaniel Kurtz #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
75c68a2921SDaniel Kurtz 
76c68a2921SDaniel Kurtz #define IOMMU_REG_POLL_COUNT_FAST 1000
77c68a2921SDaniel Kurtz 
78c68a2921SDaniel Kurtz struct rk_iommu_domain {
79c68a2921SDaniel Kurtz 	struct list_head iommus;
804f0aba67SShunqian Zheng 	struct platform_device *pdev;
81c68a2921SDaniel Kurtz 	u32 *dt; /* page directory table */
824f0aba67SShunqian Zheng 	dma_addr_t dt_dma;
83c68a2921SDaniel Kurtz 	spinlock_t iommus_lock; /* lock for iommus list */
84c68a2921SDaniel Kurtz 	spinlock_t dt_lock; /* lock for modifying page directory table */
85bcd516a3SJoerg Roedel 
86bcd516a3SJoerg Roedel 	struct iommu_domain domain;
87c68a2921SDaniel Kurtz };
88c68a2921SDaniel Kurtz 
89c68a2921SDaniel Kurtz struct rk_iommu {
90c68a2921SDaniel Kurtz 	struct device *dev;
91cd6438c5SZhengShunQian 	void __iomem **bases;
92cd6438c5SZhengShunQian 	int num_mmu;
9303f732f8SSimon Xue 	int *irq;
9403f732f8SSimon Xue 	int num_irq;
95c3aa4742SSimon Xue 	bool reset_disabled;
96c9d9f239SJoerg Roedel 	struct iommu_device iommu;
97c68a2921SDaniel Kurtz 	struct list_head node; /* entry in rk_iommu_domain.iommus */
98c68a2921SDaniel Kurtz 	struct iommu_domain *domain; /* domain to which iommu is attached */
99c68a2921SDaniel Kurtz };
100c68a2921SDaniel Kurtz 
1014f0aba67SShunqian Zheng static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
1024f0aba67SShunqian Zheng 				  unsigned int count)
103c68a2921SDaniel Kurtz {
1044f0aba67SShunqian Zheng 	size_t size = count * sizeof(u32); /* count of u32 entry */
105c68a2921SDaniel Kurtz 
1064f0aba67SShunqian Zheng 	dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
107c68a2921SDaniel Kurtz }
108c68a2921SDaniel Kurtz 
109bcd516a3SJoerg Roedel static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
110bcd516a3SJoerg Roedel {
111bcd516a3SJoerg Roedel 	return container_of(dom, struct rk_iommu_domain, domain);
112bcd516a3SJoerg Roedel }
113bcd516a3SJoerg Roedel 
114c68a2921SDaniel Kurtz /**
115c68a2921SDaniel Kurtz  * Inspired by _wait_for in intel_drv.h
116c68a2921SDaniel Kurtz  * This is NOT safe for use in interrupt context.
117c68a2921SDaniel Kurtz  *
118c68a2921SDaniel Kurtz  * Note that it's important that we check the condition again after having
119c68a2921SDaniel Kurtz  * timed out, since the timeout could be due to preemption or similar and
120c68a2921SDaniel Kurtz  * we've never had a chance to check the condition before the timeout.
121c68a2921SDaniel Kurtz  */
122c68a2921SDaniel Kurtz #define rk_wait_for(COND, MS) ({ \
123c68a2921SDaniel Kurtz 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
124c68a2921SDaniel Kurtz 	int ret__ = 0;							\
125c68a2921SDaniel Kurtz 	while (!(COND)) {						\
126c68a2921SDaniel Kurtz 		if (time_after(jiffies, timeout__)) {			\
127c68a2921SDaniel Kurtz 			ret__ = (COND) ? 0 : -ETIMEDOUT;		\
128c68a2921SDaniel Kurtz 			break;						\
129c68a2921SDaniel Kurtz 		}							\
130c68a2921SDaniel Kurtz 		usleep_range(50, 100);					\
131c68a2921SDaniel Kurtz 	}								\
132c68a2921SDaniel Kurtz 	ret__;								\
133c68a2921SDaniel Kurtz })
134c68a2921SDaniel Kurtz 
135c68a2921SDaniel Kurtz /*
136c68a2921SDaniel Kurtz  * The Rockchip rk3288 iommu uses a 2-level page table.
137c68a2921SDaniel Kurtz  * The first level is the "Directory Table" (DT).
138c68a2921SDaniel Kurtz  * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
139c68a2921SDaniel Kurtz  * to a "Page Table".
140c68a2921SDaniel Kurtz  * The second level is the 1024 Page Tables (PT).
141c68a2921SDaniel Kurtz  * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
142c68a2921SDaniel Kurtz  * a 4 KB page of physical memory.
143c68a2921SDaniel Kurtz  *
144c68a2921SDaniel Kurtz  * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
145c68a2921SDaniel Kurtz  * Each iommu device has a MMU_DTE_ADDR register that contains the physical
146c68a2921SDaniel Kurtz  * address of the start of the DT page.
147c68a2921SDaniel Kurtz  *
148c68a2921SDaniel Kurtz  * The structure of the page table is as follows:
149c68a2921SDaniel Kurtz  *
150c68a2921SDaniel Kurtz  *                   DT
151c68a2921SDaniel Kurtz  * MMU_DTE_ADDR -> +-----+
152c68a2921SDaniel Kurtz  *                 |     |
153c68a2921SDaniel Kurtz  *                 +-----+     PT
154c68a2921SDaniel Kurtz  *                 | DTE | -> +-----+
155c68a2921SDaniel Kurtz  *                 +-----+    |     |     Memory
156c68a2921SDaniel Kurtz  *                 |     |    +-----+     Page
157c68a2921SDaniel Kurtz  *                 |     |    | PTE | -> +-----+
158c68a2921SDaniel Kurtz  *                 +-----+    +-----+    |     |
159c68a2921SDaniel Kurtz  *                            |     |    |     |
160c68a2921SDaniel Kurtz  *                            |     |    |     |
161c68a2921SDaniel Kurtz  *                            +-----+    |     |
162c68a2921SDaniel Kurtz  *                                       |     |
163c68a2921SDaniel Kurtz  *                                       |     |
164c68a2921SDaniel Kurtz  *                                       +-----+
165c68a2921SDaniel Kurtz  */
166c68a2921SDaniel Kurtz 
167c68a2921SDaniel Kurtz /*
168c68a2921SDaniel Kurtz  * Each DTE has a PT address and a valid bit:
169c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
170c68a2921SDaniel Kurtz  * | PT address          | Reserved  |V|
171c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
172c68a2921SDaniel Kurtz  *  31:12 - PT address (PTs always starts on a 4 KB boundary)
173c68a2921SDaniel Kurtz  *  11: 1 - Reserved
174c68a2921SDaniel Kurtz  *      0 - 1 if PT @ PT address is valid
175c68a2921SDaniel Kurtz  */
176c68a2921SDaniel Kurtz #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
177c68a2921SDaniel Kurtz #define RK_DTE_PT_VALID           BIT(0)
178c68a2921SDaniel Kurtz 
179c68a2921SDaniel Kurtz static inline phys_addr_t rk_dte_pt_address(u32 dte)
180c68a2921SDaniel Kurtz {
181c68a2921SDaniel Kurtz 	return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
182c68a2921SDaniel Kurtz }
183c68a2921SDaniel Kurtz 
184c68a2921SDaniel Kurtz static inline bool rk_dte_is_pt_valid(u32 dte)
185c68a2921SDaniel Kurtz {
186c68a2921SDaniel Kurtz 	return dte & RK_DTE_PT_VALID;
187c68a2921SDaniel Kurtz }
188c68a2921SDaniel Kurtz 
1894f0aba67SShunqian Zheng static inline u32 rk_mk_dte(dma_addr_t pt_dma)
190c68a2921SDaniel Kurtz {
1914f0aba67SShunqian Zheng 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
192c68a2921SDaniel Kurtz }
193c68a2921SDaniel Kurtz 
194c68a2921SDaniel Kurtz /*
195c68a2921SDaniel Kurtz  * Each PTE has a Page address, some flags and a valid bit:
196c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
197c68a2921SDaniel Kurtz  * | Page address        |Rsv| Flags |V|
198c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
199c68a2921SDaniel Kurtz  *  31:12 - Page address (Pages always start on a 4 KB boundary)
200c68a2921SDaniel Kurtz  *  11: 9 - Reserved
201c68a2921SDaniel Kurtz  *   8: 1 - Flags
202c68a2921SDaniel Kurtz  *      8 - Read allocate - allocate cache space on read misses
203c68a2921SDaniel Kurtz  *      7 - Read cache - enable cache & prefetch of data
204c68a2921SDaniel Kurtz  *      6 - Write buffer - enable delaying writes on their way to memory
205c68a2921SDaniel Kurtz  *      5 - Write allocate - allocate cache space on write misses
206c68a2921SDaniel Kurtz  *      4 - Write cache - different writes can be merged together
207c68a2921SDaniel Kurtz  *      3 - Override cache attributes
208c68a2921SDaniel Kurtz  *          if 1, bits 4-8 control cache attributes
209c68a2921SDaniel Kurtz  *          if 0, the system bus defaults are used
210c68a2921SDaniel Kurtz  *      2 - Writable
211c68a2921SDaniel Kurtz  *      1 - Readable
212c68a2921SDaniel Kurtz  *      0 - 1 if Page @ Page address is valid
213c68a2921SDaniel Kurtz  */
214c68a2921SDaniel Kurtz #define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
215c68a2921SDaniel Kurtz #define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
216c68a2921SDaniel Kurtz #define RK_PTE_PAGE_WRITABLE      BIT(2)
217c68a2921SDaniel Kurtz #define RK_PTE_PAGE_READABLE      BIT(1)
218c68a2921SDaniel Kurtz #define RK_PTE_PAGE_VALID         BIT(0)
219c68a2921SDaniel Kurtz 
220c68a2921SDaniel Kurtz static inline phys_addr_t rk_pte_page_address(u32 pte)
221c68a2921SDaniel Kurtz {
222c68a2921SDaniel Kurtz 	return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
223c68a2921SDaniel Kurtz }
224c68a2921SDaniel Kurtz 
225c68a2921SDaniel Kurtz static inline bool rk_pte_is_page_valid(u32 pte)
226c68a2921SDaniel Kurtz {
227c68a2921SDaniel Kurtz 	return pte & RK_PTE_PAGE_VALID;
228c68a2921SDaniel Kurtz }
229c68a2921SDaniel Kurtz 
230c68a2921SDaniel Kurtz /* TODO: set cache flags per prot IOMMU_CACHE */
231c68a2921SDaniel Kurtz static u32 rk_mk_pte(phys_addr_t page, int prot)
232c68a2921SDaniel Kurtz {
233c68a2921SDaniel Kurtz 	u32 flags = 0;
234c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
235c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
236c68a2921SDaniel Kurtz 	page &= RK_PTE_PAGE_ADDRESS_MASK;
237c68a2921SDaniel Kurtz 	return page | flags | RK_PTE_PAGE_VALID;
238c68a2921SDaniel Kurtz }
239c68a2921SDaniel Kurtz 
240c68a2921SDaniel Kurtz static u32 rk_mk_pte_invalid(u32 pte)
241c68a2921SDaniel Kurtz {
242c68a2921SDaniel Kurtz 	return pte & ~RK_PTE_PAGE_VALID;
243c68a2921SDaniel Kurtz }
244c68a2921SDaniel Kurtz 
245c68a2921SDaniel Kurtz /*
246c68a2921SDaniel Kurtz  * rk3288 iova (IOMMU Virtual Address) format
247c68a2921SDaniel Kurtz  *  31       22.21       12.11          0
248c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
249c68a2921SDaniel Kurtz  * | DTE index | PTE index | Page offset |
250c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
251c68a2921SDaniel Kurtz  *  31:22 - DTE index   - index of DTE in DT
252c68a2921SDaniel Kurtz  *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
253c68a2921SDaniel Kurtz  *  11: 0 - Page offset - offset into page @ PTE.page_address
254c68a2921SDaniel Kurtz  */
255c68a2921SDaniel Kurtz #define RK_IOVA_DTE_MASK    0xffc00000
256c68a2921SDaniel Kurtz #define RK_IOVA_DTE_SHIFT   22
257c68a2921SDaniel Kurtz #define RK_IOVA_PTE_MASK    0x003ff000
258c68a2921SDaniel Kurtz #define RK_IOVA_PTE_SHIFT   12
259c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_MASK   0x00000fff
260c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_SHIFT  0
261c68a2921SDaniel Kurtz 
262c68a2921SDaniel Kurtz static u32 rk_iova_dte_index(dma_addr_t iova)
263c68a2921SDaniel Kurtz {
264c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
265c68a2921SDaniel Kurtz }
266c68a2921SDaniel Kurtz 
267c68a2921SDaniel Kurtz static u32 rk_iova_pte_index(dma_addr_t iova)
268c68a2921SDaniel Kurtz {
269c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
270c68a2921SDaniel Kurtz }
271c68a2921SDaniel Kurtz 
272c68a2921SDaniel Kurtz static u32 rk_iova_page_offset(dma_addr_t iova)
273c68a2921SDaniel Kurtz {
274c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
275c68a2921SDaniel Kurtz }
276c68a2921SDaniel Kurtz 
277cd6438c5SZhengShunQian static u32 rk_iommu_read(void __iomem *base, u32 offset)
278c68a2921SDaniel Kurtz {
279cd6438c5SZhengShunQian 	return readl(base + offset);
280c68a2921SDaniel Kurtz }
281c68a2921SDaniel Kurtz 
282cd6438c5SZhengShunQian static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
283c68a2921SDaniel Kurtz {
284cd6438c5SZhengShunQian 	writel(value, base + offset);
285c68a2921SDaniel Kurtz }
286c68a2921SDaniel Kurtz 
287c68a2921SDaniel Kurtz static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
288c68a2921SDaniel Kurtz {
289cd6438c5SZhengShunQian 	int i;
290cd6438c5SZhengShunQian 
291cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
292cd6438c5SZhengShunQian 		writel(command, iommu->bases[i] + RK_MMU_COMMAND);
293c68a2921SDaniel Kurtz }
294c68a2921SDaniel Kurtz 
295cd6438c5SZhengShunQian static void rk_iommu_base_command(void __iomem *base, u32 command)
296cd6438c5SZhengShunQian {
297cd6438c5SZhengShunQian 	writel(command, base + RK_MMU_COMMAND);
298cd6438c5SZhengShunQian }
299c68a2921SDaniel Kurtz static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
300c68a2921SDaniel Kurtz 			       size_t size)
301c68a2921SDaniel Kurtz {
302cd6438c5SZhengShunQian 	int i;
303cd6438c5SZhengShunQian 
304c68a2921SDaniel Kurtz 	dma_addr_t iova_end = iova + size;
305c68a2921SDaniel Kurtz 	/*
306c68a2921SDaniel Kurtz 	 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
307c68a2921SDaniel Kurtz 	 * entire iotlb rather than iterate over individual iovas.
308c68a2921SDaniel Kurtz 	 */
309cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
310c68a2921SDaniel Kurtz 		for (; iova < iova_end; iova += SPAGE_SIZE)
311cd6438c5SZhengShunQian 			rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
312c68a2921SDaniel Kurtz }
313c68a2921SDaniel Kurtz 
314c68a2921SDaniel Kurtz static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
315c68a2921SDaniel Kurtz {
316cd6438c5SZhengShunQian 	bool active = true;
317cd6438c5SZhengShunQian 	int i;
318cd6438c5SZhengShunQian 
319cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
320fbedd9b9SJohn Keeping 		active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
321fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_STALL_ACTIVE);
322cd6438c5SZhengShunQian 
323cd6438c5SZhengShunQian 	return active;
324c68a2921SDaniel Kurtz }
325c68a2921SDaniel Kurtz 
326c68a2921SDaniel Kurtz static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
327c68a2921SDaniel Kurtz {
328cd6438c5SZhengShunQian 	bool enable = true;
329cd6438c5SZhengShunQian 	int i;
330cd6438c5SZhengShunQian 
331cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
332fbedd9b9SJohn Keeping 		enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
333fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_PAGING_ENABLED);
334cd6438c5SZhengShunQian 
335cd6438c5SZhengShunQian 	return enable;
336c68a2921SDaniel Kurtz }
337c68a2921SDaniel Kurtz 
338c68a2921SDaniel Kurtz static int rk_iommu_enable_stall(struct rk_iommu *iommu)
339c68a2921SDaniel Kurtz {
340cd6438c5SZhengShunQian 	int ret, i;
341c68a2921SDaniel Kurtz 
342c68a2921SDaniel Kurtz 	if (rk_iommu_is_stall_active(iommu))
343c68a2921SDaniel Kurtz 		return 0;
344c68a2921SDaniel Kurtz 
345c68a2921SDaniel Kurtz 	/* Stall can only be enabled if paging is enabled */
346c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
347c68a2921SDaniel Kurtz 		return 0;
348c68a2921SDaniel Kurtz 
349c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
350c68a2921SDaniel Kurtz 
351c68a2921SDaniel Kurtz 	ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
352c68a2921SDaniel Kurtz 	if (ret)
353cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
354c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
355cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
356c68a2921SDaniel Kurtz 
357c68a2921SDaniel Kurtz 	return ret;
358c68a2921SDaniel Kurtz }
359c68a2921SDaniel Kurtz 
360c68a2921SDaniel Kurtz static int rk_iommu_disable_stall(struct rk_iommu *iommu)
361c68a2921SDaniel Kurtz {
362cd6438c5SZhengShunQian 	int ret, i;
363c68a2921SDaniel Kurtz 
364c68a2921SDaniel Kurtz 	if (!rk_iommu_is_stall_active(iommu))
365c68a2921SDaniel Kurtz 		return 0;
366c68a2921SDaniel Kurtz 
367c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
368c68a2921SDaniel Kurtz 
369c68a2921SDaniel Kurtz 	ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
370c68a2921SDaniel Kurtz 	if (ret)
371cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
372c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
373cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
374c68a2921SDaniel Kurtz 
375c68a2921SDaniel Kurtz 	return ret;
376c68a2921SDaniel Kurtz }
377c68a2921SDaniel Kurtz 
378c68a2921SDaniel Kurtz static int rk_iommu_enable_paging(struct rk_iommu *iommu)
379c68a2921SDaniel Kurtz {
380cd6438c5SZhengShunQian 	int ret, i;
381c68a2921SDaniel Kurtz 
382c68a2921SDaniel Kurtz 	if (rk_iommu_is_paging_enabled(iommu))
383c68a2921SDaniel Kurtz 		return 0;
384c68a2921SDaniel Kurtz 
385c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
386c68a2921SDaniel Kurtz 
387c68a2921SDaniel Kurtz 	ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
388c68a2921SDaniel Kurtz 	if (ret)
389cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
390c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
391cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
392c68a2921SDaniel Kurtz 
393c68a2921SDaniel Kurtz 	return ret;
394c68a2921SDaniel Kurtz }
395c68a2921SDaniel Kurtz 
396c68a2921SDaniel Kurtz static int rk_iommu_disable_paging(struct rk_iommu *iommu)
397c68a2921SDaniel Kurtz {
398cd6438c5SZhengShunQian 	int ret, i;
399c68a2921SDaniel Kurtz 
400c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
401c68a2921SDaniel Kurtz 		return 0;
402c68a2921SDaniel Kurtz 
403c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
404c68a2921SDaniel Kurtz 
405c68a2921SDaniel Kurtz 	ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
406c68a2921SDaniel Kurtz 	if (ret)
407cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
408c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
409cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
410c68a2921SDaniel Kurtz 
411c68a2921SDaniel Kurtz 	return ret;
412c68a2921SDaniel Kurtz }
413c68a2921SDaniel Kurtz 
414c68a2921SDaniel Kurtz static int rk_iommu_force_reset(struct rk_iommu *iommu)
415c68a2921SDaniel Kurtz {
416cd6438c5SZhengShunQian 	int ret, i;
417c68a2921SDaniel Kurtz 	u32 dte_addr;
418c68a2921SDaniel Kurtz 
419c3aa4742SSimon Xue 	if (iommu->reset_disabled)
420c3aa4742SSimon Xue 		return 0;
421c3aa4742SSimon Xue 
422c68a2921SDaniel Kurtz 	/*
423c68a2921SDaniel Kurtz 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
424c68a2921SDaniel Kurtz 	 * and verifying that upper 5 nybbles are read back.
425c68a2921SDaniel Kurtz 	 */
426cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
427cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
428c68a2921SDaniel Kurtz 
429cd6438c5SZhengShunQian 		dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
430c68a2921SDaniel Kurtz 		if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
431c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
432c68a2921SDaniel Kurtz 			return -EFAULT;
433c68a2921SDaniel Kurtz 		}
434cd6438c5SZhengShunQian 	}
435c68a2921SDaniel Kurtz 
436c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
437c68a2921SDaniel Kurtz 
438cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
439cd6438c5SZhengShunQian 		ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
440c68a2921SDaniel Kurtz 				  FORCE_RESET_TIMEOUT);
441cd6438c5SZhengShunQian 		if (ret) {
442c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "FORCE_RESET command timed out\n");
443c68a2921SDaniel Kurtz 			return ret;
444c68a2921SDaniel Kurtz 		}
445cd6438c5SZhengShunQian 	}
446c68a2921SDaniel Kurtz 
447cd6438c5SZhengShunQian 	return 0;
448cd6438c5SZhengShunQian }
449cd6438c5SZhengShunQian 
450cd6438c5SZhengShunQian static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
451c68a2921SDaniel Kurtz {
452cd6438c5SZhengShunQian 	void __iomem *base = iommu->bases[index];
453c68a2921SDaniel Kurtz 	u32 dte_index, pte_index, page_offset;
454c68a2921SDaniel Kurtz 	u32 mmu_dte_addr;
455c68a2921SDaniel Kurtz 	phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
456c68a2921SDaniel Kurtz 	u32 *dte_addr;
457c68a2921SDaniel Kurtz 	u32 dte;
458c68a2921SDaniel Kurtz 	phys_addr_t pte_addr_phys = 0;
459c68a2921SDaniel Kurtz 	u32 *pte_addr = NULL;
460c68a2921SDaniel Kurtz 	u32 pte = 0;
461c68a2921SDaniel Kurtz 	phys_addr_t page_addr_phys = 0;
462c68a2921SDaniel Kurtz 	u32 page_flags = 0;
463c68a2921SDaniel Kurtz 
464c68a2921SDaniel Kurtz 	dte_index = rk_iova_dte_index(iova);
465c68a2921SDaniel Kurtz 	pte_index = rk_iova_pte_index(iova);
466c68a2921SDaniel Kurtz 	page_offset = rk_iova_page_offset(iova);
467c68a2921SDaniel Kurtz 
468cd6438c5SZhengShunQian 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
469c68a2921SDaniel Kurtz 	mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
470c68a2921SDaniel Kurtz 
471c68a2921SDaniel Kurtz 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
472c68a2921SDaniel Kurtz 	dte_addr = phys_to_virt(dte_addr_phys);
473c68a2921SDaniel Kurtz 	dte = *dte_addr;
474c68a2921SDaniel Kurtz 
475c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
476c68a2921SDaniel Kurtz 		goto print_it;
477c68a2921SDaniel Kurtz 
478c68a2921SDaniel Kurtz 	pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
479c68a2921SDaniel Kurtz 	pte_addr = phys_to_virt(pte_addr_phys);
480c68a2921SDaniel Kurtz 	pte = *pte_addr;
481c68a2921SDaniel Kurtz 
482c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
483c68a2921SDaniel Kurtz 		goto print_it;
484c68a2921SDaniel Kurtz 
485c68a2921SDaniel Kurtz 	page_addr_phys = rk_pte_page_address(pte) + page_offset;
486c68a2921SDaniel Kurtz 	page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
487c68a2921SDaniel Kurtz 
488c68a2921SDaniel Kurtz print_it:
489c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
490c68a2921SDaniel Kurtz 		&iova, dte_index, pte_index, page_offset);
491c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
492c68a2921SDaniel Kurtz 		&mmu_dte_addr_phys, &dte_addr_phys, dte,
493c68a2921SDaniel Kurtz 		rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
494c68a2921SDaniel Kurtz 		rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
495c68a2921SDaniel Kurtz }
496c68a2921SDaniel Kurtz 
497c68a2921SDaniel Kurtz static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
498c68a2921SDaniel Kurtz {
499c68a2921SDaniel Kurtz 	struct rk_iommu *iommu = dev_id;
500c68a2921SDaniel Kurtz 	u32 status;
501c68a2921SDaniel Kurtz 	u32 int_status;
502c68a2921SDaniel Kurtz 	dma_addr_t iova;
503cd6438c5SZhengShunQian 	irqreturn_t ret = IRQ_NONE;
504cd6438c5SZhengShunQian 	int i;
505c68a2921SDaniel Kurtz 
506cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
507cd6438c5SZhengShunQian 		int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
508c68a2921SDaniel Kurtz 		if (int_status == 0)
509cd6438c5SZhengShunQian 			continue;
510c68a2921SDaniel Kurtz 
511cd6438c5SZhengShunQian 		ret = IRQ_HANDLED;
512cd6438c5SZhengShunQian 		iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
513c68a2921SDaniel Kurtz 
514c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
515c68a2921SDaniel Kurtz 			int flags;
516c68a2921SDaniel Kurtz 
517cd6438c5SZhengShunQian 			status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
518c68a2921SDaniel Kurtz 			flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
519c68a2921SDaniel Kurtz 					IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
520c68a2921SDaniel Kurtz 
521c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Page fault at %pad of type %s\n",
522c68a2921SDaniel Kurtz 				&iova,
523c68a2921SDaniel Kurtz 				(flags == IOMMU_FAULT_WRITE) ? "write" : "read");
524c68a2921SDaniel Kurtz 
525cd6438c5SZhengShunQian 			log_iova(iommu, i, iova);
526c68a2921SDaniel Kurtz 
527c68a2921SDaniel Kurtz 			/*
528c68a2921SDaniel Kurtz 			 * Report page fault to any installed handlers.
529c68a2921SDaniel Kurtz 			 * Ignore the return code, though, since we always zap cache
530c68a2921SDaniel Kurtz 			 * and clear the page fault anyway.
531c68a2921SDaniel Kurtz 			 */
532c68a2921SDaniel Kurtz 			if (iommu->domain)
533c68a2921SDaniel Kurtz 				report_iommu_fault(iommu->domain, iommu->dev, iova,
534c68a2921SDaniel Kurtz 						   flags);
535c68a2921SDaniel Kurtz 			else
536c68a2921SDaniel Kurtz 				dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
537c68a2921SDaniel Kurtz 
538cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
539cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
540c68a2921SDaniel Kurtz 		}
541c68a2921SDaniel Kurtz 
542c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_BUS_ERROR)
543c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
544c68a2921SDaniel Kurtz 
545c68a2921SDaniel Kurtz 		if (int_status & ~RK_MMU_IRQ_MASK)
546c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "unexpected int_status: %#08x\n",
547c68a2921SDaniel Kurtz 				int_status);
548c68a2921SDaniel Kurtz 
549cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
550cd6438c5SZhengShunQian 	}
551c68a2921SDaniel Kurtz 
552cd6438c5SZhengShunQian 	return ret;
553c68a2921SDaniel Kurtz }
554c68a2921SDaniel Kurtz 
555c68a2921SDaniel Kurtz static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
556c68a2921SDaniel Kurtz 					 dma_addr_t iova)
557c68a2921SDaniel Kurtz {
558bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
559c68a2921SDaniel Kurtz 	unsigned long flags;
560c68a2921SDaniel Kurtz 	phys_addr_t pt_phys, phys = 0;
561c68a2921SDaniel Kurtz 	u32 dte, pte;
562c68a2921SDaniel Kurtz 	u32 *page_table;
563c68a2921SDaniel Kurtz 
564c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
565c68a2921SDaniel Kurtz 
566c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
567c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
568c68a2921SDaniel Kurtz 		goto out;
569c68a2921SDaniel Kurtz 
570c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
571c68a2921SDaniel Kurtz 	page_table = (u32 *)phys_to_virt(pt_phys);
572c68a2921SDaniel Kurtz 	pte = page_table[rk_iova_pte_index(iova)];
573c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
574c68a2921SDaniel Kurtz 		goto out;
575c68a2921SDaniel Kurtz 
576c68a2921SDaniel Kurtz 	phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
577c68a2921SDaniel Kurtz out:
578c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
579c68a2921SDaniel Kurtz 
580c68a2921SDaniel Kurtz 	return phys;
581c68a2921SDaniel Kurtz }
582c68a2921SDaniel Kurtz 
583c68a2921SDaniel Kurtz static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
584c68a2921SDaniel Kurtz 			      dma_addr_t iova, size_t size)
585c68a2921SDaniel Kurtz {
586c68a2921SDaniel Kurtz 	struct list_head *pos;
587c68a2921SDaniel Kurtz 	unsigned long flags;
588c68a2921SDaniel Kurtz 
589c68a2921SDaniel Kurtz 	/* shootdown these iova from all iommus using this domain */
590c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
591c68a2921SDaniel Kurtz 	list_for_each(pos, &rk_domain->iommus) {
592c68a2921SDaniel Kurtz 		struct rk_iommu *iommu;
593c68a2921SDaniel Kurtz 		iommu = list_entry(pos, struct rk_iommu, node);
594c68a2921SDaniel Kurtz 		rk_iommu_zap_lines(iommu, iova, size);
595c68a2921SDaniel Kurtz 	}
596c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
597c68a2921SDaniel Kurtz }
598c68a2921SDaniel Kurtz 
599d4dd920cSTomasz Figa static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
600d4dd920cSTomasz Figa 					 dma_addr_t iova, size_t size)
601d4dd920cSTomasz Figa {
602d4dd920cSTomasz Figa 	rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
603d4dd920cSTomasz Figa 	if (size > SPAGE_SIZE)
604d4dd920cSTomasz Figa 		rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
605d4dd920cSTomasz Figa 					SPAGE_SIZE);
606d4dd920cSTomasz Figa }
607d4dd920cSTomasz Figa 
608c68a2921SDaniel Kurtz static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
609c68a2921SDaniel Kurtz 				  dma_addr_t iova)
610c68a2921SDaniel Kurtz {
6114f0aba67SShunqian Zheng 	struct device *dev = &rk_domain->pdev->dev;
612c68a2921SDaniel Kurtz 	u32 *page_table, *dte_addr;
6134f0aba67SShunqian Zheng 	u32 dte_index, dte;
614c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
6154f0aba67SShunqian Zheng 	dma_addr_t pt_dma;
616c68a2921SDaniel Kurtz 
617c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
618c68a2921SDaniel Kurtz 
6194f0aba67SShunqian Zheng 	dte_index = rk_iova_dte_index(iova);
6204f0aba67SShunqian Zheng 	dte_addr = &rk_domain->dt[dte_index];
621c68a2921SDaniel Kurtz 	dte = *dte_addr;
622c68a2921SDaniel Kurtz 	if (rk_dte_is_pt_valid(dte))
623c68a2921SDaniel Kurtz 		goto done;
624c68a2921SDaniel Kurtz 
625c68a2921SDaniel Kurtz 	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
626c68a2921SDaniel Kurtz 	if (!page_table)
627c68a2921SDaniel Kurtz 		return ERR_PTR(-ENOMEM);
628c68a2921SDaniel Kurtz 
6294f0aba67SShunqian Zheng 	pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
6304f0aba67SShunqian Zheng 	if (dma_mapping_error(dev, pt_dma)) {
6314f0aba67SShunqian Zheng 		dev_err(dev, "DMA mapping error while allocating page table\n");
6324f0aba67SShunqian Zheng 		free_page((unsigned long)page_table);
6334f0aba67SShunqian Zheng 		return ERR_PTR(-ENOMEM);
6344f0aba67SShunqian Zheng 	}
6354f0aba67SShunqian Zheng 
6364f0aba67SShunqian Zheng 	dte = rk_mk_dte(pt_dma);
637c68a2921SDaniel Kurtz 	*dte_addr = dte;
638c68a2921SDaniel Kurtz 
6394f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
6404f0aba67SShunqian Zheng 	rk_table_flush(rk_domain,
6414f0aba67SShunqian Zheng 		       rk_domain->dt_dma + dte_index * sizeof(u32), 1);
642c68a2921SDaniel Kurtz done:
643c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
644c68a2921SDaniel Kurtz 	return (u32 *)phys_to_virt(pt_phys);
645c68a2921SDaniel Kurtz }
646c68a2921SDaniel Kurtz 
647c68a2921SDaniel Kurtz static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
6484f0aba67SShunqian Zheng 				  u32 *pte_addr, dma_addr_t pte_dma,
6494f0aba67SShunqian Zheng 				  size_t size)
650c68a2921SDaniel Kurtz {
651c68a2921SDaniel Kurtz 	unsigned int pte_count;
652c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
653c68a2921SDaniel Kurtz 
654c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
655c68a2921SDaniel Kurtz 
656c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
657c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
658c68a2921SDaniel Kurtz 		if (!rk_pte_is_page_valid(pte))
659c68a2921SDaniel Kurtz 			break;
660c68a2921SDaniel Kurtz 
661c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte_invalid(pte);
662c68a2921SDaniel Kurtz 	}
663c68a2921SDaniel Kurtz 
6644f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_count);
665c68a2921SDaniel Kurtz 
666c68a2921SDaniel Kurtz 	return pte_count * SPAGE_SIZE;
667c68a2921SDaniel Kurtz }
668c68a2921SDaniel Kurtz 
669c68a2921SDaniel Kurtz static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
6704f0aba67SShunqian Zheng 			     dma_addr_t pte_dma, dma_addr_t iova,
6714f0aba67SShunqian Zheng 			     phys_addr_t paddr, size_t size, int prot)
672c68a2921SDaniel Kurtz {
673c68a2921SDaniel Kurtz 	unsigned int pte_count;
674c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
675c68a2921SDaniel Kurtz 	phys_addr_t page_phys;
676c68a2921SDaniel Kurtz 
677c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
678c68a2921SDaniel Kurtz 
679c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
680c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
681c68a2921SDaniel Kurtz 
682c68a2921SDaniel Kurtz 		if (rk_pte_is_page_valid(pte))
683c68a2921SDaniel Kurtz 			goto unwind;
684c68a2921SDaniel Kurtz 
685c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte(paddr, prot);
686c68a2921SDaniel Kurtz 
687c68a2921SDaniel Kurtz 		paddr += SPAGE_SIZE;
688c68a2921SDaniel Kurtz 	}
689c68a2921SDaniel Kurtz 
6904f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_total);
691c68a2921SDaniel Kurtz 
692d4dd920cSTomasz Figa 	/*
693d4dd920cSTomasz Figa 	 * Zap the first and last iova to evict from iotlb any previously
694d4dd920cSTomasz Figa 	 * mapped cachelines holding stale values for its dte and pte.
695d4dd920cSTomasz Figa 	 * We only zap the first and last iova, since only they could have
696d4dd920cSTomasz Figa 	 * dte or pte shared with an existing mapping.
697d4dd920cSTomasz Figa 	 */
698d4dd920cSTomasz Figa 	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
699d4dd920cSTomasz Figa 
700c68a2921SDaniel Kurtz 	return 0;
701c68a2921SDaniel Kurtz unwind:
702c68a2921SDaniel Kurtz 	/* Unmap the range of iovas that we just mapped */
7034f0aba67SShunqian Zheng 	rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
7044f0aba67SShunqian Zheng 			    pte_count * SPAGE_SIZE);
705c68a2921SDaniel Kurtz 
706c68a2921SDaniel Kurtz 	iova += pte_count * SPAGE_SIZE;
707c68a2921SDaniel Kurtz 	page_phys = rk_pte_page_address(pte_addr[pte_count]);
708c68a2921SDaniel Kurtz 	pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
709c68a2921SDaniel Kurtz 	       &iova, &page_phys, &paddr, prot);
710c68a2921SDaniel Kurtz 
711c68a2921SDaniel Kurtz 	return -EADDRINUSE;
712c68a2921SDaniel Kurtz }
713c68a2921SDaniel Kurtz 
714c68a2921SDaniel Kurtz static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
715c68a2921SDaniel Kurtz 			phys_addr_t paddr, size_t size, int prot)
716c68a2921SDaniel Kurtz {
717bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
718c68a2921SDaniel Kurtz 	unsigned long flags;
7194f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
720c68a2921SDaniel Kurtz 	u32 *page_table, *pte_addr;
7214f0aba67SShunqian Zheng 	u32 dte_index, pte_index;
722c68a2921SDaniel Kurtz 	int ret;
723c68a2921SDaniel Kurtz 
724c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
725c68a2921SDaniel Kurtz 
726c68a2921SDaniel Kurtz 	/*
727c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
728c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
729c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
730c68a2921SDaniel Kurtz 	 * Since iommu_map() guarantees that both iova and size will be
731c68a2921SDaniel Kurtz 	 * aligned, we will always only be mapping from a single dte here.
732c68a2921SDaniel Kurtz 	 */
733c68a2921SDaniel Kurtz 	page_table = rk_dte_get_page_table(rk_domain, iova);
734c68a2921SDaniel Kurtz 	if (IS_ERR(page_table)) {
735c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
736c68a2921SDaniel Kurtz 		return PTR_ERR(page_table);
737c68a2921SDaniel Kurtz 	}
738c68a2921SDaniel Kurtz 
7394f0aba67SShunqian Zheng 	dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
7404f0aba67SShunqian Zheng 	pte_index = rk_iova_pte_index(iova);
7414f0aba67SShunqian Zheng 	pte_addr = &page_table[pte_index];
7424f0aba67SShunqian Zheng 	pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
7434f0aba67SShunqian Zheng 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
7444f0aba67SShunqian Zheng 				paddr, size, prot);
7454f0aba67SShunqian Zheng 
746c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
747c68a2921SDaniel Kurtz 
748c68a2921SDaniel Kurtz 	return ret;
749c68a2921SDaniel Kurtz }
750c68a2921SDaniel Kurtz 
751c68a2921SDaniel Kurtz static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
752c68a2921SDaniel Kurtz 			     size_t size)
753c68a2921SDaniel Kurtz {
754bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
755c68a2921SDaniel Kurtz 	unsigned long flags;
7564f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
757c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
758c68a2921SDaniel Kurtz 	u32 dte;
759c68a2921SDaniel Kurtz 	u32 *pte_addr;
760c68a2921SDaniel Kurtz 	size_t unmap_size;
761c68a2921SDaniel Kurtz 
762c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
763c68a2921SDaniel Kurtz 
764c68a2921SDaniel Kurtz 	/*
765c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
766c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
767c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
768c68a2921SDaniel Kurtz 	 * Since iommu_unmap() guarantees that both iova and size will be
769c68a2921SDaniel Kurtz 	 * aligned, we will always only be unmapping from a single dte here.
770c68a2921SDaniel Kurtz 	 */
771c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
772c68a2921SDaniel Kurtz 	/* Just return 0 if iova is unmapped */
773c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte)) {
774c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
775c68a2921SDaniel Kurtz 		return 0;
776c68a2921SDaniel Kurtz 	}
777c68a2921SDaniel Kurtz 
778c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
779c68a2921SDaniel Kurtz 	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
7804f0aba67SShunqian Zheng 	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
7814f0aba67SShunqian Zheng 	unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
782c68a2921SDaniel Kurtz 
783c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
784c68a2921SDaniel Kurtz 
785c68a2921SDaniel Kurtz 	/* Shootdown iotlb entries for iova range that was just unmapped */
786c68a2921SDaniel Kurtz 	rk_iommu_zap_iova(rk_domain, iova, unmap_size);
787c68a2921SDaniel Kurtz 
788c68a2921SDaniel Kurtz 	return unmap_size;
789c68a2921SDaniel Kurtz }
790c68a2921SDaniel Kurtz 
791c68a2921SDaniel Kurtz static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
792c68a2921SDaniel Kurtz {
793c68a2921SDaniel Kurtz 	struct iommu_group *group;
794c68a2921SDaniel Kurtz 	struct device *iommu_dev;
795c68a2921SDaniel Kurtz 	struct rk_iommu *rk_iommu;
796c68a2921SDaniel Kurtz 
797c68a2921SDaniel Kurtz 	group = iommu_group_get(dev);
798c68a2921SDaniel Kurtz 	if (!group)
799c68a2921SDaniel Kurtz 		return NULL;
800c68a2921SDaniel Kurtz 	iommu_dev = iommu_group_get_iommudata(group);
801c68a2921SDaniel Kurtz 	rk_iommu = dev_get_drvdata(iommu_dev);
802c68a2921SDaniel Kurtz 	iommu_group_put(group);
803c68a2921SDaniel Kurtz 
804c68a2921SDaniel Kurtz 	return rk_iommu;
805c68a2921SDaniel Kurtz }
806c68a2921SDaniel Kurtz 
807c68a2921SDaniel Kurtz static int rk_iommu_attach_device(struct iommu_domain *domain,
808c68a2921SDaniel Kurtz 				  struct device *dev)
809c68a2921SDaniel Kurtz {
810c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
811bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
812c68a2921SDaniel Kurtz 	unsigned long flags;
813cd6438c5SZhengShunQian 	int ret, i;
814c68a2921SDaniel Kurtz 
815c68a2921SDaniel Kurtz 	/*
816c68a2921SDaniel Kurtz 	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
817c68a2921SDaniel Kurtz 	 * Such a device does not belong to an iommu group.
818c68a2921SDaniel Kurtz 	 */
819c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
820c68a2921SDaniel Kurtz 	if (!iommu)
821c68a2921SDaniel Kurtz 		return 0;
822c68a2921SDaniel Kurtz 
823c68a2921SDaniel Kurtz 	ret = rk_iommu_enable_stall(iommu);
824c68a2921SDaniel Kurtz 	if (ret)
825c68a2921SDaniel Kurtz 		return ret;
826c68a2921SDaniel Kurtz 
827c68a2921SDaniel Kurtz 	ret = rk_iommu_force_reset(iommu);
828c68a2921SDaniel Kurtz 	if (ret)
829c68a2921SDaniel Kurtz 		return ret;
830c68a2921SDaniel Kurtz 
831c68a2921SDaniel Kurtz 	iommu->domain = domain;
832c68a2921SDaniel Kurtz 
83303f732f8SSimon Xue 	for (i = 0; i < iommu->num_irq; i++) {
83403f732f8SSimon Xue 		ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
835c68a2921SDaniel Kurtz 				       IRQF_SHARED, dev_name(dev), iommu);
836c68a2921SDaniel Kurtz 		if (ret)
837c68a2921SDaniel Kurtz 			return ret;
83803f732f8SSimon Xue 	}
839c68a2921SDaniel Kurtz 
840cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
8414f0aba67SShunqian Zheng 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
8424f0aba67SShunqian Zheng 			       rk_domain->dt_dma);
843ae8a7910SJohn Keeping 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
844cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
845cd6438c5SZhengShunQian 	}
846c68a2921SDaniel Kurtz 
847c68a2921SDaniel Kurtz 	ret = rk_iommu_enable_paging(iommu);
848c68a2921SDaniel Kurtz 	if (ret)
849c68a2921SDaniel Kurtz 		return ret;
850c68a2921SDaniel Kurtz 
851c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
852c68a2921SDaniel Kurtz 	list_add_tail(&iommu->node, &rk_domain->iommus);
853c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
854c68a2921SDaniel Kurtz 
855ec4292deSHeiko Stuebner 	dev_dbg(dev, "Attached to iommu domain\n");
856c68a2921SDaniel Kurtz 
857c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
858c68a2921SDaniel Kurtz 
859c68a2921SDaniel Kurtz 	return 0;
860c68a2921SDaniel Kurtz }
861c68a2921SDaniel Kurtz 
862c68a2921SDaniel Kurtz static void rk_iommu_detach_device(struct iommu_domain *domain,
863c68a2921SDaniel Kurtz 				   struct device *dev)
864c68a2921SDaniel Kurtz {
865c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
866bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
867c68a2921SDaniel Kurtz 	unsigned long flags;
868cd6438c5SZhengShunQian 	int i;
869c68a2921SDaniel Kurtz 
870c68a2921SDaniel Kurtz 	/* Allow 'virtual devices' (eg drm) to detach from domain */
871c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
872c68a2921SDaniel Kurtz 	if (!iommu)
873c68a2921SDaniel Kurtz 		return;
874c68a2921SDaniel Kurtz 
875c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
876c68a2921SDaniel Kurtz 	list_del_init(&iommu->node);
877c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
878c68a2921SDaniel Kurtz 
879c68a2921SDaniel Kurtz 	/* Ignore error while disabling, just keep going */
880c68a2921SDaniel Kurtz 	rk_iommu_enable_stall(iommu);
881c68a2921SDaniel Kurtz 	rk_iommu_disable_paging(iommu);
882cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
883cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
884cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
885cd6438c5SZhengShunQian 	}
886c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
887c68a2921SDaniel Kurtz 
88803f732f8SSimon Xue 	for (i = 0; i < iommu->num_irq; i++)
88903f732f8SSimon Xue 		devm_free_irq(iommu->dev, iommu->irq[i], iommu);
890c68a2921SDaniel Kurtz 
891c68a2921SDaniel Kurtz 	iommu->domain = NULL;
892c68a2921SDaniel Kurtz 
893ec4292deSHeiko Stuebner 	dev_dbg(dev, "Detached from iommu domain\n");
894c68a2921SDaniel Kurtz }
895c68a2921SDaniel Kurtz 
896bcd516a3SJoerg Roedel static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
897c68a2921SDaniel Kurtz {
898c68a2921SDaniel Kurtz 	struct rk_iommu_domain *rk_domain;
8994f0aba67SShunqian Zheng 	struct platform_device *pdev;
9004f0aba67SShunqian Zheng 	struct device *iommu_dev;
901c68a2921SDaniel Kurtz 
902a93db2f2SShunqian Zheng 	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
903bcd516a3SJoerg Roedel 		return NULL;
904bcd516a3SJoerg Roedel 
9054f0aba67SShunqian Zheng 	/* Register a pdev per domain, so DMA API can base on this *dev
9064f0aba67SShunqian Zheng 	 * even some virtual master doesn't have an iommu slave
9074f0aba67SShunqian Zheng 	 */
9084f0aba67SShunqian Zheng 	pdev = platform_device_register_simple("rk_iommu_domain",
9094f0aba67SShunqian Zheng 					       PLATFORM_DEVID_AUTO, NULL, 0);
9104f0aba67SShunqian Zheng 	if (IS_ERR(pdev))
911bcd516a3SJoerg Roedel 		return NULL;
912c68a2921SDaniel Kurtz 
9134f0aba67SShunqian Zheng 	rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
9144f0aba67SShunqian Zheng 	if (!rk_domain)
9154f0aba67SShunqian Zheng 		goto err_unreg_pdev;
9164f0aba67SShunqian Zheng 
9174f0aba67SShunqian Zheng 	rk_domain->pdev = pdev;
9184f0aba67SShunqian Zheng 
919a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA &&
920a93db2f2SShunqian Zheng 	    iommu_get_dma_cookie(&rk_domain->domain))
9214f0aba67SShunqian Zheng 		goto err_unreg_pdev;
9224f0aba67SShunqian Zheng 
923c68a2921SDaniel Kurtz 	/*
924c68a2921SDaniel Kurtz 	 * rk32xx iommus use a 2 level pagetable.
925c68a2921SDaniel Kurtz 	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
926c68a2921SDaniel Kurtz 	 * Allocate one 4 KiB page for each table.
927c68a2921SDaniel Kurtz 	 */
928c68a2921SDaniel Kurtz 	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
929c68a2921SDaniel Kurtz 	if (!rk_domain->dt)
9304f0aba67SShunqian Zheng 		goto err_put_cookie;
931c68a2921SDaniel Kurtz 
9324f0aba67SShunqian Zheng 	iommu_dev = &pdev->dev;
9334f0aba67SShunqian Zheng 	rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
9344f0aba67SShunqian Zheng 					   SPAGE_SIZE, DMA_TO_DEVICE);
9354f0aba67SShunqian Zheng 	if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
9364f0aba67SShunqian Zheng 		dev_err(iommu_dev, "DMA map error for DT\n");
9374f0aba67SShunqian Zheng 		goto err_free_dt;
9384f0aba67SShunqian Zheng 	}
9394f0aba67SShunqian Zheng 
9404f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
941c68a2921SDaniel Kurtz 
942c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->iommus_lock);
943c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->dt_lock);
944c68a2921SDaniel Kurtz 	INIT_LIST_HEAD(&rk_domain->iommus);
945c68a2921SDaniel Kurtz 
946a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_start = 0;
947a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_end   = DMA_BIT_MASK(32);
948a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.force_aperture = true;
949a93db2f2SShunqian Zheng 
950bcd516a3SJoerg Roedel 	return &rk_domain->domain;
951c68a2921SDaniel Kurtz 
9524f0aba67SShunqian Zheng err_free_dt:
9534f0aba67SShunqian Zheng 	free_page((unsigned long)rk_domain->dt);
9544f0aba67SShunqian Zheng err_put_cookie:
955a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA)
9564f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
9574f0aba67SShunqian Zheng err_unreg_pdev:
9584f0aba67SShunqian Zheng 	platform_device_unregister(pdev);
9594f0aba67SShunqian Zheng 
960bcd516a3SJoerg Roedel 	return NULL;
961c68a2921SDaniel Kurtz }
962c68a2921SDaniel Kurtz 
963bcd516a3SJoerg Roedel static void rk_iommu_domain_free(struct iommu_domain *domain)
964c68a2921SDaniel Kurtz {
965bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
966c68a2921SDaniel Kurtz 	int i;
967c68a2921SDaniel Kurtz 
968c68a2921SDaniel Kurtz 	WARN_ON(!list_empty(&rk_domain->iommus));
969c68a2921SDaniel Kurtz 
970c68a2921SDaniel Kurtz 	for (i = 0; i < NUM_DT_ENTRIES; i++) {
971c68a2921SDaniel Kurtz 		u32 dte = rk_domain->dt[i];
972c68a2921SDaniel Kurtz 		if (rk_dte_is_pt_valid(dte)) {
973c68a2921SDaniel Kurtz 			phys_addr_t pt_phys = rk_dte_pt_address(dte);
974c68a2921SDaniel Kurtz 			u32 *page_table = phys_to_virt(pt_phys);
9754f0aba67SShunqian Zheng 			dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
9764f0aba67SShunqian Zheng 					 SPAGE_SIZE, DMA_TO_DEVICE);
977c68a2921SDaniel Kurtz 			free_page((unsigned long)page_table);
978c68a2921SDaniel Kurtz 		}
979c68a2921SDaniel Kurtz 	}
980c68a2921SDaniel Kurtz 
9814f0aba67SShunqian Zheng 	dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
9824f0aba67SShunqian Zheng 			 SPAGE_SIZE, DMA_TO_DEVICE);
983c68a2921SDaniel Kurtz 	free_page((unsigned long)rk_domain->dt);
9844f0aba67SShunqian Zheng 
985a93db2f2SShunqian Zheng 	if (domain->type == IOMMU_DOMAIN_DMA)
9864f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
9874f0aba67SShunqian Zheng 
9884f0aba67SShunqian Zheng 	platform_device_unregister(rk_domain->pdev);
989c68a2921SDaniel Kurtz }
990c68a2921SDaniel Kurtz 
991c68a2921SDaniel Kurtz static bool rk_iommu_is_dev_iommu_master(struct device *dev)
992c68a2921SDaniel Kurtz {
993c68a2921SDaniel Kurtz 	struct device_node *np = dev->of_node;
994c68a2921SDaniel Kurtz 	int ret;
995c68a2921SDaniel Kurtz 
996c68a2921SDaniel Kurtz 	/*
997c68a2921SDaniel Kurtz 	 * An iommu master has an iommus property containing a list of phandles
998c68a2921SDaniel Kurtz 	 * to iommu nodes, each with an #iommu-cells property with value 0.
999c68a2921SDaniel Kurtz 	 */
1000c68a2921SDaniel Kurtz 	ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
1001c68a2921SDaniel Kurtz 	return (ret > 0);
1002c68a2921SDaniel Kurtz }
1003c68a2921SDaniel Kurtz 
1004c68a2921SDaniel Kurtz static int rk_iommu_group_set_iommudata(struct iommu_group *group,
1005c68a2921SDaniel Kurtz 					struct device *dev)
1006c68a2921SDaniel Kurtz {
1007c68a2921SDaniel Kurtz 	struct device_node *np = dev->of_node;
1008c68a2921SDaniel Kurtz 	struct platform_device *pd;
1009c68a2921SDaniel Kurtz 	int ret;
1010c68a2921SDaniel Kurtz 	struct of_phandle_args args;
1011c68a2921SDaniel Kurtz 
1012c68a2921SDaniel Kurtz 	/*
1013c68a2921SDaniel Kurtz 	 * An iommu master has an iommus property containing a list of phandles
1014c68a2921SDaniel Kurtz 	 * to iommu nodes, each with an #iommu-cells property with value 0.
1015c68a2921SDaniel Kurtz 	 */
1016c68a2921SDaniel Kurtz 	ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
1017c68a2921SDaniel Kurtz 					 &args);
1018c68a2921SDaniel Kurtz 	if (ret) {
10196bd4f1c7SRob Herring 		dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n",
10206bd4f1c7SRob Herring 			np, ret);
1021c68a2921SDaniel Kurtz 		return ret;
1022c68a2921SDaniel Kurtz 	}
1023c68a2921SDaniel Kurtz 	if (args.args_count != 0) {
10246bd4f1c7SRob Herring 		dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n",
10256bd4f1c7SRob Herring 			args.np, args.args_count);
1026c68a2921SDaniel Kurtz 		return -EINVAL;
1027c68a2921SDaniel Kurtz 	}
1028c68a2921SDaniel Kurtz 
1029c68a2921SDaniel Kurtz 	pd = of_find_device_by_node(args.np);
1030c68a2921SDaniel Kurtz 	of_node_put(args.np);
1031c68a2921SDaniel Kurtz 	if (!pd) {
10326bd4f1c7SRob Herring 		dev_err(dev, "iommu %pOF not found\n", args.np);
1033c68a2921SDaniel Kurtz 		return -EPROBE_DEFER;
1034c68a2921SDaniel Kurtz 	}
1035c68a2921SDaniel Kurtz 
1036c68a2921SDaniel Kurtz 	/* TODO(djkurtz): handle multiple slave iommus for a single master */
1037c68a2921SDaniel Kurtz 	iommu_group_set_iommudata(group, &pd->dev, NULL);
1038c68a2921SDaniel Kurtz 
1039c68a2921SDaniel Kurtz 	return 0;
1040c68a2921SDaniel Kurtz }
1041c68a2921SDaniel Kurtz 
1042c68a2921SDaniel Kurtz static int rk_iommu_add_device(struct device *dev)
1043c68a2921SDaniel Kurtz {
1044c68a2921SDaniel Kurtz 	struct iommu_group *group;
1045c9d9f239SJoerg Roedel 	struct rk_iommu *iommu;
1046c68a2921SDaniel Kurtz 	int ret;
1047c68a2921SDaniel Kurtz 
1048c68a2921SDaniel Kurtz 	if (!rk_iommu_is_dev_iommu_master(dev))
1049c68a2921SDaniel Kurtz 		return -ENODEV;
1050c68a2921SDaniel Kurtz 
1051c68a2921SDaniel Kurtz 	group = iommu_group_get(dev);
1052c68a2921SDaniel Kurtz 	if (!group) {
1053c68a2921SDaniel Kurtz 		group = iommu_group_alloc();
1054c68a2921SDaniel Kurtz 		if (IS_ERR(group)) {
1055c68a2921SDaniel Kurtz 			dev_err(dev, "Failed to allocate IOMMU group\n");
1056c68a2921SDaniel Kurtz 			return PTR_ERR(group);
1057c68a2921SDaniel Kurtz 		}
1058c68a2921SDaniel Kurtz 	}
1059c68a2921SDaniel Kurtz 
1060c68a2921SDaniel Kurtz 	ret = iommu_group_add_device(group, dev);
1061c68a2921SDaniel Kurtz 	if (ret)
1062c68a2921SDaniel Kurtz 		goto err_put_group;
1063c68a2921SDaniel Kurtz 
1064c68a2921SDaniel Kurtz 	ret = rk_iommu_group_set_iommudata(group, dev);
1065c68a2921SDaniel Kurtz 	if (ret)
1066c68a2921SDaniel Kurtz 		goto err_remove_device;
1067c68a2921SDaniel Kurtz 
1068c9d9f239SJoerg Roedel 	iommu = rk_iommu_from_dev(dev);
1069c9d9f239SJoerg Roedel 	if (iommu)
1070c9d9f239SJoerg Roedel 		iommu_device_link(&iommu->iommu, dev);
1071c9d9f239SJoerg Roedel 
1072c68a2921SDaniel Kurtz 	iommu_group_put(group);
1073c68a2921SDaniel Kurtz 
1074c68a2921SDaniel Kurtz 	return 0;
1075c68a2921SDaniel Kurtz 
1076c68a2921SDaniel Kurtz err_remove_device:
1077c68a2921SDaniel Kurtz 	iommu_group_remove_device(dev);
1078c68a2921SDaniel Kurtz err_put_group:
1079c68a2921SDaniel Kurtz 	iommu_group_put(group);
1080c68a2921SDaniel Kurtz 	return ret;
1081c68a2921SDaniel Kurtz }
1082c68a2921SDaniel Kurtz 
1083c68a2921SDaniel Kurtz static void rk_iommu_remove_device(struct device *dev)
1084c68a2921SDaniel Kurtz {
1085c9d9f239SJoerg Roedel 	struct rk_iommu *iommu;
1086c9d9f239SJoerg Roedel 
1087c68a2921SDaniel Kurtz 	if (!rk_iommu_is_dev_iommu_master(dev))
1088c68a2921SDaniel Kurtz 		return;
1089c68a2921SDaniel Kurtz 
1090c9d9f239SJoerg Roedel 	iommu = rk_iommu_from_dev(dev);
1091c9d9f239SJoerg Roedel 	if (iommu)
1092c9d9f239SJoerg Roedel 		iommu_device_unlink(&iommu->iommu, dev);
1093c9d9f239SJoerg Roedel 
1094c68a2921SDaniel Kurtz 	iommu_group_remove_device(dev);
1095c68a2921SDaniel Kurtz }
1096c68a2921SDaniel Kurtz 
1097c68a2921SDaniel Kurtz static const struct iommu_ops rk_iommu_ops = {
1098bcd516a3SJoerg Roedel 	.domain_alloc = rk_iommu_domain_alloc,
1099bcd516a3SJoerg Roedel 	.domain_free = rk_iommu_domain_free,
1100c68a2921SDaniel Kurtz 	.attach_dev = rk_iommu_attach_device,
1101c68a2921SDaniel Kurtz 	.detach_dev = rk_iommu_detach_device,
1102c68a2921SDaniel Kurtz 	.map = rk_iommu_map,
1103c68a2921SDaniel Kurtz 	.unmap = rk_iommu_unmap,
1104e6d0f473SSimon Xue 	.map_sg = default_iommu_map_sg,
1105c68a2921SDaniel Kurtz 	.add_device = rk_iommu_add_device,
1106c68a2921SDaniel Kurtz 	.remove_device = rk_iommu_remove_device,
1107c68a2921SDaniel Kurtz 	.iova_to_phys = rk_iommu_iova_to_phys,
1108c68a2921SDaniel Kurtz 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1109c68a2921SDaniel Kurtz };
1110c68a2921SDaniel Kurtz 
11114f0aba67SShunqian Zheng static int rk_iommu_domain_probe(struct platform_device *pdev)
11124f0aba67SShunqian Zheng {
11134f0aba67SShunqian Zheng 	struct device *dev = &pdev->dev;
11144f0aba67SShunqian Zheng 
11154f0aba67SShunqian Zheng 	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
11164f0aba67SShunqian Zheng 	if (!dev->dma_parms)
11174f0aba67SShunqian Zheng 		return -ENOMEM;
11184f0aba67SShunqian Zheng 
11194f0aba67SShunqian Zheng 	/* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
11204f0aba67SShunqian Zheng 	arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
11214f0aba67SShunqian Zheng 
11224f0aba67SShunqian Zheng 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
11234f0aba67SShunqian Zheng 	dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
11244f0aba67SShunqian Zheng 
11254f0aba67SShunqian Zheng 	return 0;
11264f0aba67SShunqian Zheng }
11274f0aba67SShunqian Zheng 
11284f0aba67SShunqian Zheng static struct platform_driver rk_iommu_domain_driver = {
11294f0aba67SShunqian Zheng 	.probe = rk_iommu_domain_probe,
11304f0aba67SShunqian Zheng 	.driver = {
11314f0aba67SShunqian Zheng 		   .name = "rk_iommu_domain",
11324f0aba67SShunqian Zheng 	},
11334f0aba67SShunqian Zheng };
11344f0aba67SShunqian Zheng 
1135c68a2921SDaniel Kurtz static int rk_iommu_probe(struct platform_device *pdev)
1136c68a2921SDaniel Kurtz {
1137c68a2921SDaniel Kurtz 	struct device *dev = &pdev->dev;
1138c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
1139c68a2921SDaniel Kurtz 	struct resource *res;
11403d08f434SShunqian Zheng 	int num_res = pdev->num_resources;
1141c9d9f239SJoerg Roedel 	int err, i;
1142c68a2921SDaniel Kurtz 
1143c68a2921SDaniel Kurtz 	iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1144c68a2921SDaniel Kurtz 	if (!iommu)
1145c68a2921SDaniel Kurtz 		return -ENOMEM;
1146c68a2921SDaniel Kurtz 
1147c68a2921SDaniel Kurtz 	platform_set_drvdata(pdev, iommu);
1148c68a2921SDaniel Kurtz 	iommu->dev = dev;
1149cd6438c5SZhengShunQian 	iommu->num_mmu = 0;
11503d08f434SShunqian Zheng 
11513d08f434SShunqian Zheng 	iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
1152cd6438c5SZhengShunQian 				    GFP_KERNEL);
1153cd6438c5SZhengShunQian 	if (!iommu->bases)
1154cd6438c5SZhengShunQian 		return -ENOMEM;
1155c68a2921SDaniel Kurtz 
11563d08f434SShunqian Zheng 	for (i = 0; i < num_res; i++) {
1157cd6438c5SZhengShunQian 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
11588d7f2d84STomeu Vizoso 		if (!res)
11598d7f2d84STomeu Vizoso 			continue;
1160cd6438c5SZhengShunQian 		iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1161cd6438c5SZhengShunQian 		if (IS_ERR(iommu->bases[i]))
1162cd6438c5SZhengShunQian 			continue;
1163cd6438c5SZhengShunQian 		iommu->num_mmu++;
1164cd6438c5SZhengShunQian 	}
1165cd6438c5SZhengShunQian 	if (iommu->num_mmu == 0)
1166cd6438c5SZhengShunQian 		return PTR_ERR(iommu->bases[0]);
1167c68a2921SDaniel Kurtz 
116803f732f8SSimon Xue 	iommu->num_irq = platform_irq_count(pdev);
116903f732f8SSimon Xue 	if (iommu->num_irq < 0)
117003f732f8SSimon Xue 		return iommu->num_irq;
117103f732f8SSimon Xue 	if (iommu->num_irq == 0)
1172c68a2921SDaniel Kurtz 		return -ENXIO;
117303f732f8SSimon Xue 
117403f732f8SSimon Xue 	iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
117503f732f8SSimon Xue 				  GFP_KERNEL);
117603f732f8SSimon Xue 	if (!iommu->irq)
117703f732f8SSimon Xue 		return -ENOMEM;
117803f732f8SSimon Xue 
117903f732f8SSimon Xue 	for (i = 0; i < iommu->num_irq; i++) {
118003f732f8SSimon Xue 		iommu->irq[i] = platform_get_irq(pdev, i);
118103f732f8SSimon Xue 		if (iommu->irq[i] < 0) {
118203f732f8SSimon Xue 			dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
1183c68a2921SDaniel Kurtz 			return -ENXIO;
1184c68a2921SDaniel Kurtz 		}
1185c68a2921SDaniel Kurtz 	}
1186c68a2921SDaniel Kurtz 
1187c3aa4742SSimon Xue 	iommu->reset_disabled = device_property_read_bool(dev,
1188c3aa4742SSimon Xue 					"rockchip,disable-mmu-reset");
1189c68a2921SDaniel Kurtz 
1190c9d9f239SJoerg Roedel 	err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1191c9d9f239SJoerg Roedel 	if (err)
1192c9d9f239SJoerg Roedel 		return err;
1193c9d9f239SJoerg Roedel 
1194c9d9f239SJoerg Roedel 	iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1195c9d9f239SJoerg Roedel 	err = iommu_device_register(&iommu->iommu);
1196*6d9ffaadSJeffy Chen 	if (err)
1197*6d9ffaadSJeffy Chen 		iommu_device_sysfs_remove(&iommu->iommu);
1198c9d9f239SJoerg Roedel 
1199c9d9f239SJoerg Roedel 	return err;
1200c68a2921SDaniel Kurtz }
1201c68a2921SDaniel Kurtz 
12021a4e90f2SMarc Zyngier static void rk_iommu_shutdown(struct platform_device *pdev)
12031a4e90f2SMarc Zyngier {
12041a4e90f2SMarc Zyngier 	struct rk_iommu *iommu = platform_get_drvdata(pdev);
12051a4e90f2SMarc Zyngier 
12061a4e90f2SMarc Zyngier 	/*
12071a4e90f2SMarc Zyngier 	 * Be careful not to try to shutdown an otherwise unused
12081a4e90f2SMarc Zyngier 	 * IOMMU, as it is likely not to be clocked, and accessing it
12091a4e90f2SMarc Zyngier 	 * would just block. An IOMMU without a domain is likely to be
12101a4e90f2SMarc Zyngier 	 * unused, so let's use this as a (weak) guard.
12111a4e90f2SMarc Zyngier 	 */
12121a4e90f2SMarc Zyngier 	if (iommu && iommu->domain) {
12131a4e90f2SMarc Zyngier 		rk_iommu_enable_stall(iommu);
12141a4e90f2SMarc Zyngier 		rk_iommu_disable_paging(iommu);
12151a4e90f2SMarc Zyngier 		rk_iommu_force_reset(iommu);
12161a4e90f2SMarc Zyngier 	}
12171a4e90f2SMarc Zyngier }
12181a4e90f2SMarc Zyngier 
1219c68a2921SDaniel Kurtz static const struct of_device_id rk_iommu_dt_ids[] = {
1220c68a2921SDaniel Kurtz 	{ .compatible = "rockchip,iommu" },
1221c68a2921SDaniel Kurtz 	{ /* sentinel */ }
1222c68a2921SDaniel Kurtz };
1223c68a2921SDaniel Kurtz MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1224c68a2921SDaniel Kurtz 
1225c68a2921SDaniel Kurtz static struct platform_driver rk_iommu_driver = {
1226c68a2921SDaniel Kurtz 	.probe = rk_iommu_probe,
12271a4e90f2SMarc Zyngier 	.shutdown = rk_iommu_shutdown,
1228c68a2921SDaniel Kurtz 	.driver = {
1229c68a2921SDaniel Kurtz 		   .name = "rk_iommu",
1230d9e7eb15SArnd Bergmann 		   .of_match_table = rk_iommu_dt_ids,
123198b72b94SJeffy Chen 		   .suppress_bind_attrs = true,
1232c68a2921SDaniel Kurtz 	},
1233c68a2921SDaniel Kurtz };
1234c68a2921SDaniel Kurtz 
1235c68a2921SDaniel Kurtz static int __init rk_iommu_init(void)
1236c68a2921SDaniel Kurtz {
1237425061b0SThierry Reding 	struct device_node *np;
1238c68a2921SDaniel Kurtz 	int ret;
1239c68a2921SDaniel Kurtz 
1240425061b0SThierry Reding 	np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1241425061b0SThierry Reding 	if (!np)
1242425061b0SThierry Reding 		return 0;
1243425061b0SThierry Reding 
1244425061b0SThierry Reding 	of_node_put(np);
1245425061b0SThierry Reding 
1246c68a2921SDaniel Kurtz 	ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1247c68a2921SDaniel Kurtz 	if (ret)
1248c68a2921SDaniel Kurtz 		return ret;
1249c68a2921SDaniel Kurtz 
12504f0aba67SShunqian Zheng 	ret = platform_driver_register(&rk_iommu_domain_driver);
12514f0aba67SShunqian Zheng 	if (ret)
12524f0aba67SShunqian Zheng 		return ret;
12534f0aba67SShunqian Zheng 
12544f0aba67SShunqian Zheng 	ret = platform_driver_register(&rk_iommu_driver);
12554f0aba67SShunqian Zheng 	if (ret)
12564f0aba67SShunqian Zheng 		platform_driver_unregister(&rk_iommu_domain_driver);
12574f0aba67SShunqian Zheng 	return ret;
1258c68a2921SDaniel Kurtz }
1259c68a2921SDaniel Kurtz subsys_initcall(rk_iommu_init);
1260c68a2921SDaniel Kurtz 
1261c68a2921SDaniel Kurtz MODULE_DESCRIPTION("IOMMU API for Rockchip");
1262c68a2921SDaniel Kurtz MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1263c68a2921SDaniel Kurtz MODULE_ALIAS("platform:rockchip-iommu");
1264c68a2921SDaniel Kurtz MODULE_LICENSE("GPL v2");
1265