xref: /linux/drivers/iommu/rockchip-iommu.c (revision 461a6946b1f93f6720577fb06aa78e8cbd9291c9)
1c68a2921SDaniel Kurtz /*
2c68a2921SDaniel Kurtz  * This program is free software; you can redistribute it and/or modify
3c68a2921SDaniel Kurtz  * it under the terms of the GNU General Public License version 2 as
4c68a2921SDaniel Kurtz  * published by the Free Software Foundation.
5c68a2921SDaniel Kurtz  */
6c68a2921SDaniel Kurtz 
7c68a2921SDaniel Kurtz #include <linux/compiler.h>
8c68a2921SDaniel Kurtz #include <linux/delay.h>
9c68a2921SDaniel Kurtz #include <linux/device.h>
104f0aba67SShunqian Zheng #include <linux/dma-iommu.h>
11*461a6946SJoerg Roedel #include <linux/dma-mapping.h>
12c68a2921SDaniel Kurtz #include <linux/errno.h>
13c68a2921SDaniel Kurtz #include <linux/interrupt.h>
14c68a2921SDaniel Kurtz #include <linux/io.h>
15c68a2921SDaniel Kurtz #include <linux/iommu.h>
16c68a2921SDaniel Kurtz #include <linux/jiffies.h>
17c68a2921SDaniel Kurtz #include <linux/list.h>
18c68a2921SDaniel Kurtz #include <linux/mm.h>
19c68a2921SDaniel Kurtz #include <linux/module.h>
20c68a2921SDaniel Kurtz #include <linux/of.h>
21c68a2921SDaniel Kurtz #include <linux/of_platform.h>
22c68a2921SDaniel Kurtz #include <linux/platform_device.h>
23c68a2921SDaniel Kurtz #include <linux/slab.h>
24c68a2921SDaniel Kurtz #include <linux/spinlock.h>
25c68a2921SDaniel Kurtz 
26c68a2921SDaniel Kurtz /** MMU register offsets */
27c68a2921SDaniel Kurtz #define RK_MMU_DTE_ADDR		0x00	/* Directory table address */
28c68a2921SDaniel Kurtz #define RK_MMU_STATUS		0x04
29c68a2921SDaniel Kurtz #define RK_MMU_COMMAND		0x08
30c68a2921SDaniel Kurtz #define RK_MMU_PAGE_FAULT_ADDR	0x0C	/* IOVA of last page fault */
31c68a2921SDaniel Kurtz #define RK_MMU_ZAP_ONE_LINE	0x10	/* Shootdown one IOTLB entry */
32c68a2921SDaniel Kurtz #define RK_MMU_INT_RAWSTAT	0x14	/* IRQ status ignoring mask */
33c68a2921SDaniel Kurtz #define RK_MMU_INT_CLEAR	0x18	/* Acknowledge and re-arm irq */
34c68a2921SDaniel Kurtz #define RK_MMU_INT_MASK		0x1C	/* IRQ enable */
35c68a2921SDaniel Kurtz #define RK_MMU_INT_STATUS	0x20	/* IRQ status after masking */
36c68a2921SDaniel Kurtz #define RK_MMU_AUTO_GATING	0x24
37c68a2921SDaniel Kurtz 
38c68a2921SDaniel Kurtz #define DTE_ADDR_DUMMY		0xCAFEBABE
39c68a2921SDaniel Kurtz #define FORCE_RESET_TIMEOUT	100	/* ms */
40c68a2921SDaniel Kurtz 
41c68a2921SDaniel Kurtz /* RK_MMU_STATUS fields */
42c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
43c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
44c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
45c68a2921SDaniel Kurtz #define RK_MMU_STATUS_IDLE                 BIT(3)
46c68a2921SDaniel Kurtz #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
47c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
48c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
49c68a2921SDaniel Kurtz 
50c68a2921SDaniel Kurtz /* RK_MMU_COMMAND command values */
51c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
52c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
53c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
54c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
55c68a2921SDaniel Kurtz #define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
56c68a2921SDaniel Kurtz #define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
57c68a2921SDaniel Kurtz #define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
58c68a2921SDaniel Kurtz 
59c68a2921SDaniel Kurtz /* RK_MMU_INT_* register fields */
60c68a2921SDaniel Kurtz #define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
61c68a2921SDaniel Kurtz #define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
62c68a2921SDaniel Kurtz #define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
63c68a2921SDaniel Kurtz 
64c68a2921SDaniel Kurtz #define NUM_DT_ENTRIES 1024
65c68a2921SDaniel Kurtz #define NUM_PT_ENTRIES 1024
66c68a2921SDaniel Kurtz 
67c68a2921SDaniel Kurtz #define SPAGE_ORDER 12
68c68a2921SDaniel Kurtz #define SPAGE_SIZE (1 << SPAGE_ORDER)
69c68a2921SDaniel Kurtz 
70c68a2921SDaniel Kurtz  /*
71c68a2921SDaniel Kurtz   * Support mapping any size that fits in one page table:
72c68a2921SDaniel Kurtz   *   4 KiB to 4 MiB
73c68a2921SDaniel Kurtz   */
74c68a2921SDaniel Kurtz #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
75c68a2921SDaniel Kurtz 
76c68a2921SDaniel Kurtz #define IOMMU_REG_POLL_COUNT_FAST 1000
77c68a2921SDaniel Kurtz 
78c68a2921SDaniel Kurtz struct rk_iommu_domain {
79c68a2921SDaniel Kurtz 	struct list_head iommus;
804f0aba67SShunqian Zheng 	struct platform_device *pdev;
81c68a2921SDaniel Kurtz 	u32 *dt; /* page directory table */
824f0aba67SShunqian Zheng 	dma_addr_t dt_dma;
83c68a2921SDaniel Kurtz 	spinlock_t iommus_lock; /* lock for iommus list */
84c68a2921SDaniel Kurtz 	spinlock_t dt_lock; /* lock for modifying page directory table */
85bcd516a3SJoerg Roedel 
86bcd516a3SJoerg Roedel 	struct iommu_domain domain;
87c68a2921SDaniel Kurtz };
88c68a2921SDaniel Kurtz 
89c68a2921SDaniel Kurtz struct rk_iommu {
90c68a2921SDaniel Kurtz 	struct device *dev;
91cd6438c5SZhengShunQian 	void __iomem **bases;
92cd6438c5SZhengShunQian 	int num_mmu;
93c68a2921SDaniel Kurtz 	int irq;
94c68a2921SDaniel Kurtz 	struct list_head node; /* entry in rk_iommu_domain.iommus */
95c68a2921SDaniel Kurtz 	struct iommu_domain *domain; /* domain to which iommu is attached */
96c68a2921SDaniel Kurtz };
97c68a2921SDaniel Kurtz 
984f0aba67SShunqian Zheng static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
994f0aba67SShunqian Zheng 				  unsigned int count)
100c68a2921SDaniel Kurtz {
1014f0aba67SShunqian Zheng 	size_t size = count * sizeof(u32); /* count of u32 entry */
102c68a2921SDaniel Kurtz 
1034f0aba67SShunqian Zheng 	dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
104c68a2921SDaniel Kurtz }
105c68a2921SDaniel Kurtz 
106bcd516a3SJoerg Roedel static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
107bcd516a3SJoerg Roedel {
108bcd516a3SJoerg Roedel 	return container_of(dom, struct rk_iommu_domain, domain);
109bcd516a3SJoerg Roedel }
110bcd516a3SJoerg Roedel 
111c68a2921SDaniel Kurtz /**
112c68a2921SDaniel Kurtz  * Inspired by _wait_for in intel_drv.h
113c68a2921SDaniel Kurtz  * This is NOT safe for use in interrupt context.
114c68a2921SDaniel Kurtz  *
115c68a2921SDaniel Kurtz  * Note that it's important that we check the condition again after having
116c68a2921SDaniel Kurtz  * timed out, since the timeout could be due to preemption or similar and
117c68a2921SDaniel Kurtz  * we've never had a chance to check the condition before the timeout.
118c68a2921SDaniel Kurtz  */
119c68a2921SDaniel Kurtz #define rk_wait_for(COND, MS) ({ \
120c68a2921SDaniel Kurtz 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
121c68a2921SDaniel Kurtz 	int ret__ = 0;							\
122c68a2921SDaniel Kurtz 	while (!(COND)) {						\
123c68a2921SDaniel Kurtz 		if (time_after(jiffies, timeout__)) {			\
124c68a2921SDaniel Kurtz 			ret__ = (COND) ? 0 : -ETIMEDOUT;		\
125c68a2921SDaniel Kurtz 			break;						\
126c68a2921SDaniel Kurtz 		}							\
127c68a2921SDaniel Kurtz 		usleep_range(50, 100);					\
128c68a2921SDaniel Kurtz 	}								\
129c68a2921SDaniel Kurtz 	ret__;								\
130c68a2921SDaniel Kurtz })
131c68a2921SDaniel Kurtz 
132c68a2921SDaniel Kurtz /*
133c68a2921SDaniel Kurtz  * The Rockchip rk3288 iommu uses a 2-level page table.
134c68a2921SDaniel Kurtz  * The first level is the "Directory Table" (DT).
135c68a2921SDaniel Kurtz  * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
136c68a2921SDaniel Kurtz  * to a "Page Table".
137c68a2921SDaniel Kurtz  * The second level is the 1024 Page Tables (PT).
138c68a2921SDaniel Kurtz  * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
139c68a2921SDaniel Kurtz  * a 4 KB page of physical memory.
140c68a2921SDaniel Kurtz  *
141c68a2921SDaniel Kurtz  * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
142c68a2921SDaniel Kurtz  * Each iommu device has a MMU_DTE_ADDR register that contains the physical
143c68a2921SDaniel Kurtz  * address of the start of the DT page.
144c68a2921SDaniel Kurtz  *
145c68a2921SDaniel Kurtz  * The structure of the page table is as follows:
146c68a2921SDaniel Kurtz  *
147c68a2921SDaniel Kurtz  *                   DT
148c68a2921SDaniel Kurtz  * MMU_DTE_ADDR -> +-----+
149c68a2921SDaniel Kurtz  *                 |     |
150c68a2921SDaniel Kurtz  *                 +-----+     PT
151c68a2921SDaniel Kurtz  *                 | DTE | -> +-----+
152c68a2921SDaniel Kurtz  *                 +-----+    |     |     Memory
153c68a2921SDaniel Kurtz  *                 |     |    +-----+     Page
154c68a2921SDaniel Kurtz  *                 |     |    | PTE | -> +-----+
155c68a2921SDaniel Kurtz  *                 +-----+    +-----+    |     |
156c68a2921SDaniel Kurtz  *                            |     |    |     |
157c68a2921SDaniel Kurtz  *                            |     |    |     |
158c68a2921SDaniel Kurtz  *                            +-----+    |     |
159c68a2921SDaniel Kurtz  *                                       |     |
160c68a2921SDaniel Kurtz  *                                       |     |
161c68a2921SDaniel Kurtz  *                                       +-----+
162c68a2921SDaniel Kurtz  */
163c68a2921SDaniel Kurtz 
164c68a2921SDaniel Kurtz /*
165c68a2921SDaniel Kurtz  * Each DTE has a PT address and a valid bit:
166c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
167c68a2921SDaniel Kurtz  * | PT address          | Reserved  |V|
168c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
169c68a2921SDaniel Kurtz  *  31:12 - PT address (PTs always starts on a 4 KB boundary)
170c68a2921SDaniel Kurtz  *  11: 1 - Reserved
171c68a2921SDaniel Kurtz  *      0 - 1 if PT @ PT address is valid
172c68a2921SDaniel Kurtz  */
173c68a2921SDaniel Kurtz #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
174c68a2921SDaniel Kurtz #define RK_DTE_PT_VALID           BIT(0)
175c68a2921SDaniel Kurtz 
176c68a2921SDaniel Kurtz static inline phys_addr_t rk_dte_pt_address(u32 dte)
177c68a2921SDaniel Kurtz {
178c68a2921SDaniel Kurtz 	return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
179c68a2921SDaniel Kurtz }
180c68a2921SDaniel Kurtz 
181c68a2921SDaniel Kurtz static inline bool rk_dte_is_pt_valid(u32 dte)
182c68a2921SDaniel Kurtz {
183c68a2921SDaniel Kurtz 	return dte & RK_DTE_PT_VALID;
184c68a2921SDaniel Kurtz }
185c68a2921SDaniel Kurtz 
1864f0aba67SShunqian Zheng static inline u32 rk_mk_dte(dma_addr_t pt_dma)
187c68a2921SDaniel Kurtz {
1884f0aba67SShunqian Zheng 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
189c68a2921SDaniel Kurtz }
190c68a2921SDaniel Kurtz 
191c68a2921SDaniel Kurtz /*
192c68a2921SDaniel Kurtz  * Each PTE has a Page address, some flags and a valid bit:
193c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
194c68a2921SDaniel Kurtz  * | Page address        |Rsv| Flags |V|
195c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
196c68a2921SDaniel Kurtz  *  31:12 - Page address (Pages always start on a 4 KB boundary)
197c68a2921SDaniel Kurtz  *  11: 9 - Reserved
198c68a2921SDaniel Kurtz  *   8: 1 - Flags
199c68a2921SDaniel Kurtz  *      8 - Read allocate - allocate cache space on read misses
200c68a2921SDaniel Kurtz  *      7 - Read cache - enable cache & prefetch of data
201c68a2921SDaniel Kurtz  *      6 - Write buffer - enable delaying writes on their way to memory
202c68a2921SDaniel Kurtz  *      5 - Write allocate - allocate cache space on write misses
203c68a2921SDaniel Kurtz  *      4 - Write cache - different writes can be merged together
204c68a2921SDaniel Kurtz  *      3 - Override cache attributes
205c68a2921SDaniel Kurtz  *          if 1, bits 4-8 control cache attributes
206c68a2921SDaniel Kurtz  *          if 0, the system bus defaults are used
207c68a2921SDaniel Kurtz  *      2 - Writable
208c68a2921SDaniel Kurtz  *      1 - Readable
209c68a2921SDaniel Kurtz  *      0 - 1 if Page @ Page address is valid
210c68a2921SDaniel Kurtz  */
211c68a2921SDaniel Kurtz #define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
212c68a2921SDaniel Kurtz #define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
213c68a2921SDaniel Kurtz #define RK_PTE_PAGE_WRITABLE      BIT(2)
214c68a2921SDaniel Kurtz #define RK_PTE_PAGE_READABLE      BIT(1)
215c68a2921SDaniel Kurtz #define RK_PTE_PAGE_VALID         BIT(0)
216c68a2921SDaniel Kurtz 
217c68a2921SDaniel Kurtz static inline phys_addr_t rk_pte_page_address(u32 pte)
218c68a2921SDaniel Kurtz {
219c68a2921SDaniel Kurtz 	return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
220c68a2921SDaniel Kurtz }
221c68a2921SDaniel Kurtz 
222c68a2921SDaniel Kurtz static inline bool rk_pte_is_page_valid(u32 pte)
223c68a2921SDaniel Kurtz {
224c68a2921SDaniel Kurtz 	return pte & RK_PTE_PAGE_VALID;
225c68a2921SDaniel Kurtz }
226c68a2921SDaniel Kurtz 
227c68a2921SDaniel Kurtz /* TODO: set cache flags per prot IOMMU_CACHE */
228c68a2921SDaniel Kurtz static u32 rk_mk_pte(phys_addr_t page, int prot)
229c68a2921SDaniel Kurtz {
230c68a2921SDaniel Kurtz 	u32 flags = 0;
231c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
232c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
233c68a2921SDaniel Kurtz 	page &= RK_PTE_PAGE_ADDRESS_MASK;
234c68a2921SDaniel Kurtz 	return page | flags | RK_PTE_PAGE_VALID;
235c68a2921SDaniel Kurtz }
236c68a2921SDaniel Kurtz 
237c68a2921SDaniel Kurtz static u32 rk_mk_pte_invalid(u32 pte)
238c68a2921SDaniel Kurtz {
239c68a2921SDaniel Kurtz 	return pte & ~RK_PTE_PAGE_VALID;
240c68a2921SDaniel Kurtz }
241c68a2921SDaniel Kurtz 
242c68a2921SDaniel Kurtz /*
243c68a2921SDaniel Kurtz  * rk3288 iova (IOMMU Virtual Address) format
244c68a2921SDaniel Kurtz  *  31       22.21       12.11          0
245c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
246c68a2921SDaniel Kurtz  * | DTE index | PTE index | Page offset |
247c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
248c68a2921SDaniel Kurtz  *  31:22 - DTE index   - index of DTE in DT
249c68a2921SDaniel Kurtz  *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
250c68a2921SDaniel Kurtz  *  11: 0 - Page offset - offset into page @ PTE.page_address
251c68a2921SDaniel Kurtz  */
252c68a2921SDaniel Kurtz #define RK_IOVA_DTE_MASK    0xffc00000
253c68a2921SDaniel Kurtz #define RK_IOVA_DTE_SHIFT   22
254c68a2921SDaniel Kurtz #define RK_IOVA_PTE_MASK    0x003ff000
255c68a2921SDaniel Kurtz #define RK_IOVA_PTE_SHIFT   12
256c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_MASK   0x00000fff
257c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_SHIFT  0
258c68a2921SDaniel Kurtz 
259c68a2921SDaniel Kurtz static u32 rk_iova_dte_index(dma_addr_t iova)
260c68a2921SDaniel Kurtz {
261c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
262c68a2921SDaniel Kurtz }
263c68a2921SDaniel Kurtz 
264c68a2921SDaniel Kurtz static u32 rk_iova_pte_index(dma_addr_t iova)
265c68a2921SDaniel Kurtz {
266c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
267c68a2921SDaniel Kurtz }
268c68a2921SDaniel Kurtz 
269c68a2921SDaniel Kurtz static u32 rk_iova_page_offset(dma_addr_t iova)
270c68a2921SDaniel Kurtz {
271c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
272c68a2921SDaniel Kurtz }
273c68a2921SDaniel Kurtz 
274cd6438c5SZhengShunQian static u32 rk_iommu_read(void __iomem *base, u32 offset)
275c68a2921SDaniel Kurtz {
276cd6438c5SZhengShunQian 	return readl(base + offset);
277c68a2921SDaniel Kurtz }
278c68a2921SDaniel Kurtz 
279cd6438c5SZhengShunQian static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
280c68a2921SDaniel Kurtz {
281cd6438c5SZhengShunQian 	writel(value, base + offset);
282c68a2921SDaniel Kurtz }
283c68a2921SDaniel Kurtz 
284c68a2921SDaniel Kurtz static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
285c68a2921SDaniel Kurtz {
286cd6438c5SZhengShunQian 	int i;
287cd6438c5SZhengShunQian 
288cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
289cd6438c5SZhengShunQian 		writel(command, iommu->bases[i] + RK_MMU_COMMAND);
290c68a2921SDaniel Kurtz }
291c68a2921SDaniel Kurtz 
292cd6438c5SZhengShunQian static void rk_iommu_base_command(void __iomem *base, u32 command)
293cd6438c5SZhengShunQian {
294cd6438c5SZhengShunQian 	writel(command, base + RK_MMU_COMMAND);
295cd6438c5SZhengShunQian }
296c68a2921SDaniel Kurtz static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
297c68a2921SDaniel Kurtz 			       size_t size)
298c68a2921SDaniel Kurtz {
299cd6438c5SZhengShunQian 	int i;
300cd6438c5SZhengShunQian 
301c68a2921SDaniel Kurtz 	dma_addr_t iova_end = iova + size;
302c68a2921SDaniel Kurtz 	/*
303c68a2921SDaniel Kurtz 	 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
304c68a2921SDaniel Kurtz 	 * entire iotlb rather than iterate over individual iovas.
305c68a2921SDaniel Kurtz 	 */
306cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
307c68a2921SDaniel Kurtz 		for (; iova < iova_end; iova += SPAGE_SIZE)
308cd6438c5SZhengShunQian 			rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
309c68a2921SDaniel Kurtz }
310c68a2921SDaniel Kurtz 
311c68a2921SDaniel Kurtz static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
312c68a2921SDaniel Kurtz {
313cd6438c5SZhengShunQian 	bool active = true;
314cd6438c5SZhengShunQian 	int i;
315cd6438c5SZhengShunQian 
316cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
317fbedd9b9SJohn Keeping 		active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
318fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_STALL_ACTIVE);
319cd6438c5SZhengShunQian 
320cd6438c5SZhengShunQian 	return active;
321c68a2921SDaniel Kurtz }
322c68a2921SDaniel Kurtz 
323c68a2921SDaniel Kurtz static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
324c68a2921SDaniel Kurtz {
325cd6438c5SZhengShunQian 	bool enable = true;
326cd6438c5SZhengShunQian 	int i;
327cd6438c5SZhengShunQian 
328cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
329fbedd9b9SJohn Keeping 		enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
330fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_PAGING_ENABLED);
331cd6438c5SZhengShunQian 
332cd6438c5SZhengShunQian 	return enable;
333c68a2921SDaniel Kurtz }
334c68a2921SDaniel Kurtz 
335c68a2921SDaniel Kurtz static int rk_iommu_enable_stall(struct rk_iommu *iommu)
336c68a2921SDaniel Kurtz {
337cd6438c5SZhengShunQian 	int ret, i;
338c68a2921SDaniel Kurtz 
339c68a2921SDaniel Kurtz 	if (rk_iommu_is_stall_active(iommu))
340c68a2921SDaniel Kurtz 		return 0;
341c68a2921SDaniel Kurtz 
342c68a2921SDaniel Kurtz 	/* Stall can only be enabled if paging is enabled */
343c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
344c68a2921SDaniel Kurtz 		return 0;
345c68a2921SDaniel Kurtz 
346c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
347c68a2921SDaniel Kurtz 
348c68a2921SDaniel Kurtz 	ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
349c68a2921SDaniel Kurtz 	if (ret)
350cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
351c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
352cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
353c68a2921SDaniel Kurtz 
354c68a2921SDaniel Kurtz 	return ret;
355c68a2921SDaniel Kurtz }
356c68a2921SDaniel Kurtz 
357c68a2921SDaniel Kurtz static int rk_iommu_disable_stall(struct rk_iommu *iommu)
358c68a2921SDaniel Kurtz {
359cd6438c5SZhengShunQian 	int ret, i;
360c68a2921SDaniel Kurtz 
361c68a2921SDaniel Kurtz 	if (!rk_iommu_is_stall_active(iommu))
362c68a2921SDaniel Kurtz 		return 0;
363c68a2921SDaniel Kurtz 
364c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
365c68a2921SDaniel Kurtz 
366c68a2921SDaniel Kurtz 	ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
367c68a2921SDaniel Kurtz 	if (ret)
368cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
369c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
370cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
371c68a2921SDaniel Kurtz 
372c68a2921SDaniel Kurtz 	return ret;
373c68a2921SDaniel Kurtz }
374c68a2921SDaniel Kurtz 
375c68a2921SDaniel Kurtz static int rk_iommu_enable_paging(struct rk_iommu *iommu)
376c68a2921SDaniel Kurtz {
377cd6438c5SZhengShunQian 	int ret, i;
378c68a2921SDaniel Kurtz 
379c68a2921SDaniel Kurtz 	if (rk_iommu_is_paging_enabled(iommu))
380c68a2921SDaniel Kurtz 		return 0;
381c68a2921SDaniel Kurtz 
382c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
383c68a2921SDaniel Kurtz 
384c68a2921SDaniel Kurtz 	ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
385c68a2921SDaniel Kurtz 	if (ret)
386cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
387c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
388cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
389c68a2921SDaniel Kurtz 
390c68a2921SDaniel Kurtz 	return ret;
391c68a2921SDaniel Kurtz }
392c68a2921SDaniel Kurtz 
393c68a2921SDaniel Kurtz static int rk_iommu_disable_paging(struct rk_iommu *iommu)
394c68a2921SDaniel Kurtz {
395cd6438c5SZhengShunQian 	int ret, i;
396c68a2921SDaniel Kurtz 
397c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
398c68a2921SDaniel Kurtz 		return 0;
399c68a2921SDaniel Kurtz 
400c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
401c68a2921SDaniel Kurtz 
402c68a2921SDaniel Kurtz 	ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
403c68a2921SDaniel Kurtz 	if (ret)
404cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
405c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
406cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
407c68a2921SDaniel Kurtz 
408c68a2921SDaniel Kurtz 	return ret;
409c68a2921SDaniel Kurtz }
410c68a2921SDaniel Kurtz 
411c68a2921SDaniel Kurtz static int rk_iommu_force_reset(struct rk_iommu *iommu)
412c68a2921SDaniel Kurtz {
413cd6438c5SZhengShunQian 	int ret, i;
414c68a2921SDaniel Kurtz 	u32 dte_addr;
415c68a2921SDaniel Kurtz 
416c68a2921SDaniel Kurtz 	/*
417c68a2921SDaniel Kurtz 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
418c68a2921SDaniel Kurtz 	 * and verifying that upper 5 nybbles are read back.
419c68a2921SDaniel Kurtz 	 */
420cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
421cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
422c68a2921SDaniel Kurtz 
423cd6438c5SZhengShunQian 		dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
424c68a2921SDaniel Kurtz 		if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
425c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
426c68a2921SDaniel Kurtz 			return -EFAULT;
427c68a2921SDaniel Kurtz 		}
428cd6438c5SZhengShunQian 	}
429c68a2921SDaniel Kurtz 
430c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
431c68a2921SDaniel Kurtz 
432cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
433cd6438c5SZhengShunQian 		ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
434c68a2921SDaniel Kurtz 				  FORCE_RESET_TIMEOUT);
435cd6438c5SZhengShunQian 		if (ret) {
436c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "FORCE_RESET command timed out\n");
437c68a2921SDaniel Kurtz 			return ret;
438c68a2921SDaniel Kurtz 		}
439cd6438c5SZhengShunQian 	}
440c68a2921SDaniel Kurtz 
441cd6438c5SZhengShunQian 	return 0;
442cd6438c5SZhengShunQian }
443cd6438c5SZhengShunQian 
444cd6438c5SZhengShunQian static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
445c68a2921SDaniel Kurtz {
446cd6438c5SZhengShunQian 	void __iomem *base = iommu->bases[index];
447c68a2921SDaniel Kurtz 	u32 dte_index, pte_index, page_offset;
448c68a2921SDaniel Kurtz 	u32 mmu_dte_addr;
449c68a2921SDaniel Kurtz 	phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
450c68a2921SDaniel Kurtz 	u32 *dte_addr;
451c68a2921SDaniel Kurtz 	u32 dte;
452c68a2921SDaniel Kurtz 	phys_addr_t pte_addr_phys = 0;
453c68a2921SDaniel Kurtz 	u32 *pte_addr = NULL;
454c68a2921SDaniel Kurtz 	u32 pte = 0;
455c68a2921SDaniel Kurtz 	phys_addr_t page_addr_phys = 0;
456c68a2921SDaniel Kurtz 	u32 page_flags = 0;
457c68a2921SDaniel Kurtz 
458c68a2921SDaniel Kurtz 	dte_index = rk_iova_dte_index(iova);
459c68a2921SDaniel Kurtz 	pte_index = rk_iova_pte_index(iova);
460c68a2921SDaniel Kurtz 	page_offset = rk_iova_page_offset(iova);
461c68a2921SDaniel Kurtz 
462cd6438c5SZhengShunQian 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
463c68a2921SDaniel Kurtz 	mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
464c68a2921SDaniel Kurtz 
465c68a2921SDaniel Kurtz 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
466c68a2921SDaniel Kurtz 	dte_addr = phys_to_virt(dte_addr_phys);
467c68a2921SDaniel Kurtz 	dte = *dte_addr;
468c68a2921SDaniel Kurtz 
469c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
470c68a2921SDaniel Kurtz 		goto print_it;
471c68a2921SDaniel Kurtz 
472c68a2921SDaniel Kurtz 	pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
473c68a2921SDaniel Kurtz 	pte_addr = phys_to_virt(pte_addr_phys);
474c68a2921SDaniel Kurtz 	pte = *pte_addr;
475c68a2921SDaniel Kurtz 
476c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
477c68a2921SDaniel Kurtz 		goto print_it;
478c68a2921SDaniel Kurtz 
479c68a2921SDaniel Kurtz 	page_addr_phys = rk_pte_page_address(pte) + page_offset;
480c68a2921SDaniel Kurtz 	page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
481c68a2921SDaniel Kurtz 
482c68a2921SDaniel Kurtz print_it:
483c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
484c68a2921SDaniel Kurtz 		&iova, dte_index, pte_index, page_offset);
485c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
486c68a2921SDaniel Kurtz 		&mmu_dte_addr_phys, &dte_addr_phys, dte,
487c68a2921SDaniel Kurtz 		rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
488c68a2921SDaniel Kurtz 		rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
489c68a2921SDaniel Kurtz }
490c68a2921SDaniel Kurtz 
491c68a2921SDaniel Kurtz static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
492c68a2921SDaniel Kurtz {
493c68a2921SDaniel Kurtz 	struct rk_iommu *iommu = dev_id;
494c68a2921SDaniel Kurtz 	u32 status;
495c68a2921SDaniel Kurtz 	u32 int_status;
496c68a2921SDaniel Kurtz 	dma_addr_t iova;
497cd6438c5SZhengShunQian 	irqreturn_t ret = IRQ_NONE;
498cd6438c5SZhengShunQian 	int i;
499c68a2921SDaniel Kurtz 
500cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
501cd6438c5SZhengShunQian 		int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
502c68a2921SDaniel Kurtz 		if (int_status == 0)
503cd6438c5SZhengShunQian 			continue;
504c68a2921SDaniel Kurtz 
505cd6438c5SZhengShunQian 		ret = IRQ_HANDLED;
506cd6438c5SZhengShunQian 		iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
507c68a2921SDaniel Kurtz 
508c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
509c68a2921SDaniel Kurtz 			int flags;
510c68a2921SDaniel Kurtz 
511cd6438c5SZhengShunQian 			status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
512c68a2921SDaniel Kurtz 			flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
513c68a2921SDaniel Kurtz 					IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
514c68a2921SDaniel Kurtz 
515c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Page fault at %pad of type %s\n",
516c68a2921SDaniel Kurtz 				&iova,
517c68a2921SDaniel Kurtz 				(flags == IOMMU_FAULT_WRITE) ? "write" : "read");
518c68a2921SDaniel Kurtz 
519cd6438c5SZhengShunQian 			log_iova(iommu, i, iova);
520c68a2921SDaniel Kurtz 
521c68a2921SDaniel Kurtz 			/*
522c68a2921SDaniel Kurtz 			 * Report page fault to any installed handlers.
523c68a2921SDaniel Kurtz 			 * Ignore the return code, though, since we always zap cache
524c68a2921SDaniel Kurtz 			 * and clear the page fault anyway.
525c68a2921SDaniel Kurtz 			 */
526c68a2921SDaniel Kurtz 			if (iommu->domain)
527c68a2921SDaniel Kurtz 				report_iommu_fault(iommu->domain, iommu->dev, iova,
528c68a2921SDaniel Kurtz 						   flags);
529c68a2921SDaniel Kurtz 			else
530c68a2921SDaniel Kurtz 				dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
531c68a2921SDaniel Kurtz 
532cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
533cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
534c68a2921SDaniel Kurtz 		}
535c68a2921SDaniel Kurtz 
536c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_BUS_ERROR)
537c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
538c68a2921SDaniel Kurtz 
539c68a2921SDaniel Kurtz 		if (int_status & ~RK_MMU_IRQ_MASK)
540c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "unexpected int_status: %#08x\n",
541c68a2921SDaniel Kurtz 				int_status);
542c68a2921SDaniel Kurtz 
543cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
544cd6438c5SZhengShunQian 	}
545c68a2921SDaniel Kurtz 
546cd6438c5SZhengShunQian 	return ret;
547c68a2921SDaniel Kurtz }
548c68a2921SDaniel Kurtz 
549c68a2921SDaniel Kurtz static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
550c68a2921SDaniel Kurtz 					 dma_addr_t iova)
551c68a2921SDaniel Kurtz {
552bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
553c68a2921SDaniel Kurtz 	unsigned long flags;
554c68a2921SDaniel Kurtz 	phys_addr_t pt_phys, phys = 0;
555c68a2921SDaniel Kurtz 	u32 dte, pte;
556c68a2921SDaniel Kurtz 	u32 *page_table;
557c68a2921SDaniel Kurtz 
558c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
559c68a2921SDaniel Kurtz 
560c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
561c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
562c68a2921SDaniel Kurtz 		goto out;
563c68a2921SDaniel Kurtz 
564c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
565c68a2921SDaniel Kurtz 	page_table = (u32 *)phys_to_virt(pt_phys);
566c68a2921SDaniel Kurtz 	pte = page_table[rk_iova_pte_index(iova)];
567c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
568c68a2921SDaniel Kurtz 		goto out;
569c68a2921SDaniel Kurtz 
570c68a2921SDaniel Kurtz 	phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
571c68a2921SDaniel Kurtz out:
572c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
573c68a2921SDaniel Kurtz 
574c68a2921SDaniel Kurtz 	return phys;
575c68a2921SDaniel Kurtz }
576c68a2921SDaniel Kurtz 
577c68a2921SDaniel Kurtz static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
578c68a2921SDaniel Kurtz 			      dma_addr_t iova, size_t size)
579c68a2921SDaniel Kurtz {
580c68a2921SDaniel Kurtz 	struct list_head *pos;
581c68a2921SDaniel Kurtz 	unsigned long flags;
582c68a2921SDaniel Kurtz 
583c68a2921SDaniel Kurtz 	/* shootdown these iova from all iommus using this domain */
584c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
585c68a2921SDaniel Kurtz 	list_for_each(pos, &rk_domain->iommus) {
586c68a2921SDaniel Kurtz 		struct rk_iommu *iommu;
587c68a2921SDaniel Kurtz 		iommu = list_entry(pos, struct rk_iommu, node);
588c68a2921SDaniel Kurtz 		rk_iommu_zap_lines(iommu, iova, size);
589c68a2921SDaniel Kurtz 	}
590c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
591c68a2921SDaniel Kurtz }
592c68a2921SDaniel Kurtz 
593d4dd920cSTomasz Figa static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
594d4dd920cSTomasz Figa 					 dma_addr_t iova, size_t size)
595d4dd920cSTomasz Figa {
596d4dd920cSTomasz Figa 	rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
597d4dd920cSTomasz Figa 	if (size > SPAGE_SIZE)
598d4dd920cSTomasz Figa 		rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
599d4dd920cSTomasz Figa 					SPAGE_SIZE);
600d4dd920cSTomasz Figa }
601d4dd920cSTomasz Figa 
602c68a2921SDaniel Kurtz static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
603c68a2921SDaniel Kurtz 				  dma_addr_t iova)
604c68a2921SDaniel Kurtz {
6054f0aba67SShunqian Zheng 	struct device *dev = &rk_domain->pdev->dev;
606c68a2921SDaniel Kurtz 	u32 *page_table, *dte_addr;
6074f0aba67SShunqian Zheng 	u32 dte_index, dte;
608c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
6094f0aba67SShunqian Zheng 	dma_addr_t pt_dma;
610c68a2921SDaniel Kurtz 
611c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
612c68a2921SDaniel Kurtz 
6134f0aba67SShunqian Zheng 	dte_index = rk_iova_dte_index(iova);
6144f0aba67SShunqian Zheng 	dte_addr = &rk_domain->dt[dte_index];
615c68a2921SDaniel Kurtz 	dte = *dte_addr;
616c68a2921SDaniel Kurtz 	if (rk_dte_is_pt_valid(dte))
617c68a2921SDaniel Kurtz 		goto done;
618c68a2921SDaniel Kurtz 
619c68a2921SDaniel Kurtz 	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
620c68a2921SDaniel Kurtz 	if (!page_table)
621c68a2921SDaniel Kurtz 		return ERR_PTR(-ENOMEM);
622c68a2921SDaniel Kurtz 
6234f0aba67SShunqian Zheng 	pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
6244f0aba67SShunqian Zheng 	if (dma_mapping_error(dev, pt_dma)) {
6254f0aba67SShunqian Zheng 		dev_err(dev, "DMA mapping error while allocating page table\n");
6264f0aba67SShunqian Zheng 		free_page((unsigned long)page_table);
6274f0aba67SShunqian Zheng 		return ERR_PTR(-ENOMEM);
6284f0aba67SShunqian Zheng 	}
6294f0aba67SShunqian Zheng 
6304f0aba67SShunqian Zheng 	dte = rk_mk_dte(pt_dma);
631c68a2921SDaniel Kurtz 	*dte_addr = dte;
632c68a2921SDaniel Kurtz 
6334f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
6344f0aba67SShunqian Zheng 	rk_table_flush(rk_domain,
6354f0aba67SShunqian Zheng 		       rk_domain->dt_dma + dte_index * sizeof(u32), 1);
636c68a2921SDaniel Kurtz done:
637c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
638c68a2921SDaniel Kurtz 	return (u32 *)phys_to_virt(pt_phys);
639c68a2921SDaniel Kurtz }
640c68a2921SDaniel Kurtz 
641c68a2921SDaniel Kurtz static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
6424f0aba67SShunqian Zheng 				  u32 *pte_addr, dma_addr_t pte_dma,
6434f0aba67SShunqian Zheng 				  size_t size)
644c68a2921SDaniel Kurtz {
645c68a2921SDaniel Kurtz 	unsigned int pte_count;
646c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
647c68a2921SDaniel Kurtz 
648c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
649c68a2921SDaniel Kurtz 
650c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
651c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
652c68a2921SDaniel Kurtz 		if (!rk_pte_is_page_valid(pte))
653c68a2921SDaniel Kurtz 			break;
654c68a2921SDaniel Kurtz 
655c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte_invalid(pte);
656c68a2921SDaniel Kurtz 	}
657c68a2921SDaniel Kurtz 
6584f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_count);
659c68a2921SDaniel Kurtz 
660c68a2921SDaniel Kurtz 	return pte_count * SPAGE_SIZE;
661c68a2921SDaniel Kurtz }
662c68a2921SDaniel Kurtz 
663c68a2921SDaniel Kurtz static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
6644f0aba67SShunqian Zheng 			     dma_addr_t pte_dma, dma_addr_t iova,
6654f0aba67SShunqian Zheng 			     phys_addr_t paddr, size_t size, int prot)
666c68a2921SDaniel Kurtz {
667c68a2921SDaniel Kurtz 	unsigned int pte_count;
668c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
669c68a2921SDaniel Kurtz 	phys_addr_t page_phys;
670c68a2921SDaniel Kurtz 
671c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
672c68a2921SDaniel Kurtz 
673c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
674c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
675c68a2921SDaniel Kurtz 
676c68a2921SDaniel Kurtz 		if (rk_pte_is_page_valid(pte))
677c68a2921SDaniel Kurtz 			goto unwind;
678c68a2921SDaniel Kurtz 
679c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte(paddr, prot);
680c68a2921SDaniel Kurtz 
681c68a2921SDaniel Kurtz 		paddr += SPAGE_SIZE;
682c68a2921SDaniel Kurtz 	}
683c68a2921SDaniel Kurtz 
6844f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_total);
685c68a2921SDaniel Kurtz 
686d4dd920cSTomasz Figa 	/*
687d4dd920cSTomasz Figa 	 * Zap the first and last iova to evict from iotlb any previously
688d4dd920cSTomasz Figa 	 * mapped cachelines holding stale values for its dte and pte.
689d4dd920cSTomasz Figa 	 * We only zap the first and last iova, since only they could have
690d4dd920cSTomasz Figa 	 * dte or pte shared with an existing mapping.
691d4dd920cSTomasz Figa 	 */
692d4dd920cSTomasz Figa 	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
693d4dd920cSTomasz Figa 
694c68a2921SDaniel Kurtz 	return 0;
695c68a2921SDaniel Kurtz unwind:
696c68a2921SDaniel Kurtz 	/* Unmap the range of iovas that we just mapped */
6974f0aba67SShunqian Zheng 	rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
6984f0aba67SShunqian Zheng 			    pte_count * SPAGE_SIZE);
699c68a2921SDaniel Kurtz 
700c68a2921SDaniel Kurtz 	iova += pte_count * SPAGE_SIZE;
701c68a2921SDaniel Kurtz 	page_phys = rk_pte_page_address(pte_addr[pte_count]);
702c68a2921SDaniel Kurtz 	pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
703c68a2921SDaniel Kurtz 	       &iova, &page_phys, &paddr, prot);
704c68a2921SDaniel Kurtz 
705c68a2921SDaniel Kurtz 	return -EADDRINUSE;
706c68a2921SDaniel Kurtz }
707c68a2921SDaniel Kurtz 
708c68a2921SDaniel Kurtz static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
709c68a2921SDaniel Kurtz 			phys_addr_t paddr, size_t size, int prot)
710c68a2921SDaniel Kurtz {
711bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
712c68a2921SDaniel Kurtz 	unsigned long flags;
7134f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
714c68a2921SDaniel Kurtz 	u32 *page_table, *pte_addr;
7154f0aba67SShunqian Zheng 	u32 dte_index, pte_index;
716c68a2921SDaniel Kurtz 	int ret;
717c68a2921SDaniel Kurtz 
718c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
719c68a2921SDaniel Kurtz 
720c68a2921SDaniel Kurtz 	/*
721c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
722c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
723c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
724c68a2921SDaniel Kurtz 	 * Since iommu_map() guarantees that both iova and size will be
725c68a2921SDaniel Kurtz 	 * aligned, we will always only be mapping from a single dte here.
726c68a2921SDaniel Kurtz 	 */
727c68a2921SDaniel Kurtz 	page_table = rk_dte_get_page_table(rk_domain, iova);
728c68a2921SDaniel Kurtz 	if (IS_ERR(page_table)) {
729c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
730c68a2921SDaniel Kurtz 		return PTR_ERR(page_table);
731c68a2921SDaniel Kurtz 	}
732c68a2921SDaniel Kurtz 
7334f0aba67SShunqian Zheng 	dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
7344f0aba67SShunqian Zheng 	pte_index = rk_iova_pte_index(iova);
7354f0aba67SShunqian Zheng 	pte_addr = &page_table[pte_index];
7364f0aba67SShunqian Zheng 	pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
7374f0aba67SShunqian Zheng 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
7384f0aba67SShunqian Zheng 				paddr, size, prot);
7394f0aba67SShunqian Zheng 
740c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
741c68a2921SDaniel Kurtz 
742c68a2921SDaniel Kurtz 	return ret;
743c68a2921SDaniel Kurtz }
744c68a2921SDaniel Kurtz 
745c68a2921SDaniel Kurtz static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
746c68a2921SDaniel Kurtz 			     size_t size)
747c68a2921SDaniel Kurtz {
748bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
749c68a2921SDaniel Kurtz 	unsigned long flags;
7504f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
751c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
752c68a2921SDaniel Kurtz 	u32 dte;
753c68a2921SDaniel Kurtz 	u32 *pte_addr;
754c68a2921SDaniel Kurtz 	size_t unmap_size;
755c68a2921SDaniel Kurtz 
756c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
757c68a2921SDaniel Kurtz 
758c68a2921SDaniel Kurtz 	/*
759c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
760c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
761c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
762c68a2921SDaniel Kurtz 	 * Since iommu_unmap() guarantees that both iova and size will be
763c68a2921SDaniel Kurtz 	 * aligned, we will always only be unmapping from a single dte here.
764c68a2921SDaniel Kurtz 	 */
765c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
766c68a2921SDaniel Kurtz 	/* Just return 0 if iova is unmapped */
767c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte)) {
768c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
769c68a2921SDaniel Kurtz 		return 0;
770c68a2921SDaniel Kurtz 	}
771c68a2921SDaniel Kurtz 
772c68a2921SDaniel Kurtz 	pt_phys = rk_dte_pt_address(dte);
773c68a2921SDaniel Kurtz 	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
7744f0aba67SShunqian Zheng 	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
7754f0aba67SShunqian Zheng 	unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
776c68a2921SDaniel Kurtz 
777c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
778c68a2921SDaniel Kurtz 
779c68a2921SDaniel Kurtz 	/* Shootdown iotlb entries for iova range that was just unmapped */
780c68a2921SDaniel Kurtz 	rk_iommu_zap_iova(rk_domain, iova, unmap_size);
781c68a2921SDaniel Kurtz 
782c68a2921SDaniel Kurtz 	return unmap_size;
783c68a2921SDaniel Kurtz }
784c68a2921SDaniel Kurtz 
785c68a2921SDaniel Kurtz static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
786c68a2921SDaniel Kurtz {
787c68a2921SDaniel Kurtz 	struct iommu_group *group;
788c68a2921SDaniel Kurtz 	struct device *iommu_dev;
789c68a2921SDaniel Kurtz 	struct rk_iommu *rk_iommu;
790c68a2921SDaniel Kurtz 
791c68a2921SDaniel Kurtz 	group = iommu_group_get(dev);
792c68a2921SDaniel Kurtz 	if (!group)
793c68a2921SDaniel Kurtz 		return NULL;
794c68a2921SDaniel Kurtz 	iommu_dev = iommu_group_get_iommudata(group);
795c68a2921SDaniel Kurtz 	rk_iommu = dev_get_drvdata(iommu_dev);
796c68a2921SDaniel Kurtz 	iommu_group_put(group);
797c68a2921SDaniel Kurtz 
798c68a2921SDaniel Kurtz 	return rk_iommu;
799c68a2921SDaniel Kurtz }
800c68a2921SDaniel Kurtz 
801c68a2921SDaniel Kurtz static int rk_iommu_attach_device(struct iommu_domain *domain,
802c68a2921SDaniel Kurtz 				  struct device *dev)
803c68a2921SDaniel Kurtz {
804c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
805bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
806c68a2921SDaniel Kurtz 	unsigned long flags;
807cd6438c5SZhengShunQian 	int ret, i;
808c68a2921SDaniel Kurtz 
809c68a2921SDaniel Kurtz 	/*
810c68a2921SDaniel Kurtz 	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
811c68a2921SDaniel Kurtz 	 * Such a device does not belong to an iommu group.
812c68a2921SDaniel Kurtz 	 */
813c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
814c68a2921SDaniel Kurtz 	if (!iommu)
815c68a2921SDaniel Kurtz 		return 0;
816c68a2921SDaniel Kurtz 
817c68a2921SDaniel Kurtz 	ret = rk_iommu_enable_stall(iommu);
818c68a2921SDaniel Kurtz 	if (ret)
819c68a2921SDaniel Kurtz 		return ret;
820c68a2921SDaniel Kurtz 
821c68a2921SDaniel Kurtz 	ret = rk_iommu_force_reset(iommu);
822c68a2921SDaniel Kurtz 	if (ret)
823c68a2921SDaniel Kurtz 		return ret;
824c68a2921SDaniel Kurtz 
825c68a2921SDaniel Kurtz 	iommu->domain = domain;
826c68a2921SDaniel Kurtz 
827fec3b217SSimon Xue 	ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq,
828c68a2921SDaniel Kurtz 			       IRQF_SHARED, dev_name(dev), iommu);
829c68a2921SDaniel Kurtz 	if (ret)
830c68a2921SDaniel Kurtz 		return ret;
831c68a2921SDaniel Kurtz 
832cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
8334f0aba67SShunqian Zheng 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
8344f0aba67SShunqian Zheng 			       rk_domain->dt_dma);
835ae8a7910SJohn Keeping 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
836cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
837cd6438c5SZhengShunQian 	}
838c68a2921SDaniel Kurtz 
839c68a2921SDaniel Kurtz 	ret = rk_iommu_enable_paging(iommu);
840c68a2921SDaniel Kurtz 	if (ret)
841c68a2921SDaniel Kurtz 		return ret;
842c68a2921SDaniel Kurtz 
843c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
844c68a2921SDaniel Kurtz 	list_add_tail(&iommu->node, &rk_domain->iommus);
845c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
846c68a2921SDaniel Kurtz 
847ec4292deSHeiko Stuebner 	dev_dbg(dev, "Attached to iommu domain\n");
848c68a2921SDaniel Kurtz 
849c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
850c68a2921SDaniel Kurtz 
851c68a2921SDaniel Kurtz 	return 0;
852c68a2921SDaniel Kurtz }
853c68a2921SDaniel Kurtz 
854c68a2921SDaniel Kurtz static void rk_iommu_detach_device(struct iommu_domain *domain,
855c68a2921SDaniel Kurtz 				   struct device *dev)
856c68a2921SDaniel Kurtz {
857c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
858bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
859c68a2921SDaniel Kurtz 	unsigned long flags;
860cd6438c5SZhengShunQian 	int i;
861c68a2921SDaniel Kurtz 
862c68a2921SDaniel Kurtz 	/* Allow 'virtual devices' (eg drm) to detach from domain */
863c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
864c68a2921SDaniel Kurtz 	if (!iommu)
865c68a2921SDaniel Kurtz 		return;
866c68a2921SDaniel Kurtz 
867c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
868c68a2921SDaniel Kurtz 	list_del_init(&iommu->node);
869c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
870c68a2921SDaniel Kurtz 
871c68a2921SDaniel Kurtz 	/* Ignore error while disabling, just keep going */
872c68a2921SDaniel Kurtz 	rk_iommu_enable_stall(iommu);
873c68a2921SDaniel Kurtz 	rk_iommu_disable_paging(iommu);
874cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
875cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
876cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
877cd6438c5SZhengShunQian 	}
878c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
879c68a2921SDaniel Kurtz 
880fec3b217SSimon Xue 	devm_free_irq(iommu->dev, iommu->irq, iommu);
881c68a2921SDaniel Kurtz 
882c68a2921SDaniel Kurtz 	iommu->domain = NULL;
883c68a2921SDaniel Kurtz 
884ec4292deSHeiko Stuebner 	dev_dbg(dev, "Detached from iommu domain\n");
885c68a2921SDaniel Kurtz }
886c68a2921SDaniel Kurtz 
887bcd516a3SJoerg Roedel static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
888c68a2921SDaniel Kurtz {
889c68a2921SDaniel Kurtz 	struct rk_iommu_domain *rk_domain;
8904f0aba67SShunqian Zheng 	struct platform_device *pdev;
8914f0aba67SShunqian Zheng 	struct device *iommu_dev;
892c68a2921SDaniel Kurtz 
893a93db2f2SShunqian Zheng 	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
894bcd516a3SJoerg Roedel 		return NULL;
895bcd516a3SJoerg Roedel 
8964f0aba67SShunqian Zheng 	/* Register a pdev per domain, so DMA API can base on this *dev
8974f0aba67SShunqian Zheng 	 * even some virtual master doesn't have an iommu slave
8984f0aba67SShunqian Zheng 	 */
8994f0aba67SShunqian Zheng 	pdev = platform_device_register_simple("rk_iommu_domain",
9004f0aba67SShunqian Zheng 					       PLATFORM_DEVID_AUTO, NULL, 0);
9014f0aba67SShunqian Zheng 	if (IS_ERR(pdev))
902bcd516a3SJoerg Roedel 		return NULL;
903c68a2921SDaniel Kurtz 
9044f0aba67SShunqian Zheng 	rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
9054f0aba67SShunqian Zheng 	if (!rk_domain)
9064f0aba67SShunqian Zheng 		goto err_unreg_pdev;
9074f0aba67SShunqian Zheng 
9084f0aba67SShunqian Zheng 	rk_domain->pdev = pdev;
9094f0aba67SShunqian Zheng 
910a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA &&
911a93db2f2SShunqian Zheng 	    iommu_get_dma_cookie(&rk_domain->domain))
9124f0aba67SShunqian Zheng 		goto err_unreg_pdev;
9134f0aba67SShunqian Zheng 
914c68a2921SDaniel Kurtz 	/*
915c68a2921SDaniel Kurtz 	 * rk32xx iommus use a 2 level pagetable.
916c68a2921SDaniel Kurtz 	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
917c68a2921SDaniel Kurtz 	 * Allocate one 4 KiB page for each table.
918c68a2921SDaniel Kurtz 	 */
919c68a2921SDaniel Kurtz 	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
920c68a2921SDaniel Kurtz 	if (!rk_domain->dt)
9214f0aba67SShunqian Zheng 		goto err_put_cookie;
922c68a2921SDaniel Kurtz 
9234f0aba67SShunqian Zheng 	iommu_dev = &pdev->dev;
9244f0aba67SShunqian Zheng 	rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
9254f0aba67SShunqian Zheng 					   SPAGE_SIZE, DMA_TO_DEVICE);
9264f0aba67SShunqian Zheng 	if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
9274f0aba67SShunqian Zheng 		dev_err(iommu_dev, "DMA map error for DT\n");
9284f0aba67SShunqian Zheng 		goto err_free_dt;
9294f0aba67SShunqian Zheng 	}
9304f0aba67SShunqian Zheng 
9314f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
932c68a2921SDaniel Kurtz 
933c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->iommus_lock);
934c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->dt_lock);
935c68a2921SDaniel Kurtz 	INIT_LIST_HEAD(&rk_domain->iommus);
936c68a2921SDaniel Kurtz 
937a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_start = 0;
938a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_end   = DMA_BIT_MASK(32);
939a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.force_aperture = true;
940a93db2f2SShunqian Zheng 
941bcd516a3SJoerg Roedel 	return &rk_domain->domain;
942c68a2921SDaniel Kurtz 
9434f0aba67SShunqian Zheng err_free_dt:
9444f0aba67SShunqian Zheng 	free_page((unsigned long)rk_domain->dt);
9454f0aba67SShunqian Zheng err_put_cookie:
946a93db2f2SShunqian Zheng 	if (type == IOMMU_DOMAIN_DMA)
9474f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
9484f0aba67SShunqian Zheng err_unreg_pdev:
9494f0aba67SShunqian Zheng 	platform_device_unregister(pdev);
9504f0aba67SShunqian Zheng 
951bcd516a3SJoerg Roedel 	return NULL;
952c68a2921SDaniel Kurtz }
953c68a2921SDaniel Kurtz 
954bcd516a3SJoerg Roedel static void rk_iommu_domain_free(struct iommu_domain *domain)
955c68a2921SDaniel Kurtz {
956bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
957c68a2921SDaniel Kurtz 	int i;
958c68a2921SDaniel Kurtz 
959c68a2921SDaniel Kurtz 	WARN_ON(!list_empty(&rk_domain->iommus));
960c68a2921SDaniel Kurtz 
961c68a2921SDaniel Kurtz 	for (i = 0; i < NUM_DT_ENTRIES; i++) {
962c68a2921SDaniel Kurtz 		u32 dte = rk_domain->dt[i];
963c68a2921SDaniel Kurtz 		if (rk_dte_is_pt_valid(dte)) {
964c68a2921SDaniel Kurtz 			phys_addr_t pt_phys = rk_dte_pt_address(dte);
965c68a2921SDaniel Kurtz 			u32 *page_table = phys_to_virt(pt_phys);
9664f0aba67SShunqian Zheng 			dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
9674f0aba67SShunqian Zheng 					 SPAGE_SIZE, DMA_TO_DEVICE);
968c68a2921SDaniel Kurtz 			free_page((unsigned long)page_table);
969c68a2921SDaniel Kurtz 		}
970c68a2921SDaniel Kurtz 	}
971c68a2921SDaniel Kurtz 
9724f0aba67SShunqian Zheng 	dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
9734f0aba67SShunqian Zheng 			 SPAGE_SIZE, DMA_TO_DEVICE);
974c68a2921SDaniel Kurtz 	free_page((unsigned long)rk_domain->dt);
9754f0aba67SShunqian Zheng 
976a93db2f2SShunqian Zheng 	if (domain->type == IOMMU_DOMAIN_DMA)
9774f0aba67SShunqian Zheng 		iommu_put_dma_cookie(&rk_domain->domain);
9784f0aba67SShunqian Zheng 
9794f0aba67SShunqian Zheng 	platform_device_unregister(rk_domain->pdev);
980c68a2921SDaniel Kurtz }
981c68a2921SDaniel Kurtz 
982c68a2921SDaniel Kurtz static bool rk_iommu_is_dev_iommu_master(struct device *dev)
983c68a2921SDaniel Kurtz {
984c68a2921SDaniel Kurtz 	struct device_node *np = dev->of_node;
985c68a2921SDaniel Kurtz 	int ret;
986c68a2921SDaniel Kurtz 
987c68a2921SDaniel Kurtz 	/*
988c68a2921SDaniel Kurtz 	 * An iommu master has an iommus property containing a list of phandles
989c68a2921SDaniel Kurtz 	 * to iommu nodes, each with an #iommu-cells property with value 0.
990c68a2921SDaniel Kurtz 	 */
991c68a2921SDaniel Kurtz 	ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
992c68a2921SDaniel Kurtz 	return (ret > 0);
993c68a2921SDaniel Kurtz }
994c68a2921SDaniel Kurtz 
995c68a2921SDaniel Kurtz static int rk_iommu_group_set_iommudata(struct iommu_group *group,
996c68a2921SDaniel Kurtz 					struct device *dev)
997c68a2921SDaniel Kurtz {
998c68a2921SDaniel Kurtz 	struct device_node *np = dev->of_node;
999c68a2921SDaniel Kurtz 	struct platform_device *pd;
1000c68a2921SDaniel Kurtz 	int ret;
1001c68a2921SDaniel Kurtz 	struct of_phandle_args args;
1002c68a2921SDaniel Kurtz 
1003c68a2921SDaniel Kurtz 	/*
1004c68a2921SDaniel Kurtz 	 * An iommu master has an iommus property containing a list of phandles
1005c68a2921SDaniel Kurtz 	 * to iommu nodes, each with an #iommu-cells property with value 0.
1006c68a2921SDaniel Kurtz 	 */
1007c68a2921SDaniel Kurtz 	ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
1008c68a2921SDaniel Kurtz 					 &args);
1009c68a2921SDaniel Kurtz 	if (ret) {
1010c68a2921SDaniel Kurtz 		dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
1011c68a2921SDaniel Kurtz 			np->full_name, ret);
1012c68a2921SDaniel Kurtz 		return ret;
1013c68a2921SDaniel Kurtz 	}
1014c68a2921SDaniel Kurtz 	if (args.args_count != 0) {
1015c68a2921SDaniel Kurtz 		dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
1016c68a2921SDaniel Kurtz 			args.np->full_name, args.args_count);
1017c68a2921SDaniel Kurtz 		return -EINVAL;
1018c68a2921SDaniel Kurtz 	}
1019c68a2921SDaniel Kurtz 
1020c68a2921SDaniel Kurtz 	pd = of_find_device_by_node(args.np);
1021c68a2921SDaniel Kurtz 	of_node_put(args.np);
1022c68a2921SDaniel Kurtz 	if (!pd) {
1023c68a2921SDaniel Kurtz 		dev_err(dev, "iommu %s not found\n", args.np->full_name);
1024c68a2921SDaniel Kurtz 		return -EPROBE_DEFER;
1025c68a2921SDaniel Kurtz 	}
1026c68a2921SDaniel Kurtz 
1027c68a2921SDaniel Kurtz 	/* TODO(djkurtz): handle multiple slave iommus for a single master */
1028c68a2921SDaniel Kurtz 	iommu_group_set_iommudata(group, &pd->dev, NULL);
1029c68a2921SDaniel Kurtz 
1030c68a2921SDaniel Kurtz 	return 0;
1031c68a2921SDaniel Kurtz }
1032c68a2921SDaniel Kurtz 
1033c68a2921SDaniel Kurtz static int rk_iommu_add_device(struct device *dev)
1034c68a2921SDaniel Kurtz {
1035c68a2921SDaniel Kurtz 	struct iommu_group *group;
1036c68a2921SDaniel Kurtz 	int ret;
1037c68a2921SDaniel Kurtz 
1038c68a2921SDaniel Kurtz 	if (!rk_iommu_is_dev_iommu_master(dev))
1039c68a2921SDaniel Kurtz 		return -ENODEV;
1040c68a2921SDaniel Kurtz 
1041c68a2921SDaniel Kurtz 	group = iommu_group_get(dev);
1042c68a2921SDaniel Kurtz 	if (!group) {
1043c68a2921SDaniel Kurtz 		group = iommu_group_alloc();
1044c68a2921SDaniel Kurtz 		if (IS_ERR(group)) {
1045c68a2921SDaniel Kurtz 			dev_err(dev, "Failed to allocate IOMMU group\n");
1046c68a2921SDaniel Kurtz 			return PTR_ERR(group);
1047c68a2921SDaniel Kurtz 		}
1048c68a2921SDaniel Kurtz 	}
1049c68a2921SDaniel Kurtz 
1050c68a2921SDaniel Kurtz 	ret = iommu_group_add_device(group, dev);
1051c68a2921SDaniel Kurtz 	if (ret)
1052c68a2921SDaniel Kurtz 		goto err_put_group;
1053c68a2921SDaniel Kurtz 
1054c68a2921SDaniel Kurtz 	ret = rk_iommu_group_set_iommudata(group, dev);
1055c68a2921SDaniel Kurtz 	if (ret)
1056c68a2921SDaniel Kurtz 		goto err_remove_device;
1057c68a2921SDaniel Kurtz 
1058c68a2921SDaniel Kurtz 	iommu_group_put(group);
1059c68a2921SDaniel Kurtz 
1060c68a2921SDaniel Kurtz 	return 0;
1061c68a2921SDaniel Kurtz 
1062c68a2921SDaniel Kurtz err_remove_device:
1063c68a2921SDaniel Kurtz 	iommu_group_remove_device(dev);
1064c68a2921SDaniel Kurtz err_put_group:
1065c68a2921SDaniel Kurtz 	iommu_group_put(group);
1066c68a2921SDaniel Kurtz 	return ret;
1067c68a2921SDaniel Kurtz }
1068c68a2921SDaniel Kurtz 
1069c68a2921SDaniel Kurtz static void rk_iommu_remove_device(struct device *dev)
1070c68a2921SDaniel Kurtz {
1071c68a2921SDaniel Kurtz 	if (!rk_iommu_is_dev_iommu_master(dev))
1072c68a2921SDaniel Kurtz 		return;
1073c68a2921SDaniel Kurtz 
1074c68a2921SDaniel Kurtz 	iommu_group_remove_device(dev);
1075c68a2921SDaniel Kurtz }
1076c68a2921SDaniel Kurtz 
1077c68a2921SDaniel Kurtz static const struct iommu_ops rk_iommu_ops = {
1078bcd516a3SJoerg Roedel 	.domain_alloc = rk_iommu_domain_alloc,
1079bcd516a3SJoerg Roedel 	.domain_free = rk_iommu_domain_free,
1080c68a2921SDaniel Kurtz 	.attach_dev = rk_iommu_attach_device,
1081c68a2921SDaniel Kurtz 	.detach_dev = rk_iommu_detach_device,
1082c68a2921SDaniel Kurtz 	.map = rk_iommu_map,
1083c68a2921SDaniel Kurtz 	.unmap = rk_iommu_unmap,
1084e6d0f473SSimon Xue 	.map_sg = default_iommu_map_sg,
1085c68a2921SDaniel Kurtz 	.add_device = rk_iommu_add_device,
1086c68a2921SDaniel Kurtz 	.remove_device = rk_iommu_remove_device,
1087c68a2921SDaniel Kurtz 	.iova_to_phys = rk_iommu_iova_to_phys,
1088c68a2921SDaniel Kurtz 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1089c68a2921SDaniel Kurtz };
1090c68a2921SDaniel Kurtz 
10914f0aba67SShunqian Zheng static int rk_iommu_domain_probe(struct platform_device *pdev)
10924f0aba67SShunqian Zheng {
10934f0aba67SShunqian Zheng 	struct device *dev = &pdev->dev;
10944f0aba67SShunqian Zheng 
10954f0aba67SShunqian Zheng 	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
10964f0aba67SShunqian Zheng 	if (!dev->dma_parms)
10974f0aba67SShunqian Zheng 		return -ENOMEM;
10984f0aba67SShunqian Zheng 
10994f0aba67SShunqian Zheng 	/* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
11004f0aba67SShunqian Zheng 	arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
11014f0aba67SShunqian Zheng 
11024f0aba67SShunqian Zheng 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
11034f0aba67SShunqian Zheng 	dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
11044f0aba67SShunqian Zheng 
11054f0aba67SShunqian Zheng 	return 0;
11064f0aba67SShunqian Zheng }
11074f0aba67SShunqian Zheng 
11084f0aba67SShunqian Zheng static struct platform_driver rk_iommu_domain_driver = {
11094f0aba67SShunqian Zheng 	.probe = rk_iommu_domain_probe,
11104f0aba67SShunqian Zheng 	.driver = {
11114f0aba67SShunqian Zheng 		   .name = "rk_iommu_domain",
11124f0aba67SShunqian Zheng 	},
11134f0aba67SShunqian Zheng };
11144f0aba67SShunqian Zheng 
1115c68a2921SDaniel Kurtz static int rk_iommu_probe(struct platform_device *pdev)
1116c68a2921SDaniel Kurtz {
1117c68a2921SDaniel Kurtz 	struct device *dev = &pdev->dev;
1118c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
1119c68a2921SDaniel Kurtz 	struct resource *res;
11203d08f434SShunqian Zheng 	int num_res = pdev->num_resources;
1121cd6438c5SZhengShunQian 	int i;
1122c68a2921SDaniel Kurtz 
1123c68a2921SDaniel Kurtz 	iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1124c68a2921SDaniel Kurtz 	if (!iommu)
1125c68a2921SDaniel Kurtz 		return -ENOMEM;
1126c68a2921SDaniel Kurtz 
1127c68a2921SDaniel Kurtz 	platform_set_drvdata(pdev, iommu);
1128c68a2921SDaniel Kurtz 	iommu->dev = dev;
1129cd6438c5SZhengShunQian 	iommu->num_mmu = 0;
11303d08f434SShunqian Zheng 
11313d08f434SShunqian Zheng 	iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
1132cd6438c5SZhengShunQian 				    GFP_KERNEL);
1133cd6438c5SZhengShunQian 	if (!iommu->bases)
1134cd6438c5SZhengShunQian 		return -ENOMEM;
1135c68a2921SDaniel Kurtz 
11363d08f434SShunqian Zheng 	for (i = 0; i < num_res; i++) {
1137cd6438c5SZhengShunQian 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
11388d7f2d84STomeu Vizoso 		if (!res)
11398d7f2d84STomeu Vizoso 			continue;
1140cd6438c5SZhengShunQian 		iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1141cd6438c5SZhengShunQian 		if (IS_ERR(iommu->bases[i]))
1142cd6438c5SZhengShunQian 			continue;
1143cd6438c5SZhengShunQian 		iommu->num_mmu++;
1144cd6438c5SZhengShunQian 	}
1145cd6438c5SZhengShunQian 	if (iommu->num_mmu == 0)
1146cd6438c5SZhengShunQian 		return PTR_ERR(iommu->bases[0]);
1147c68a2921SDaniel Kurtz 
1148c68a2921SDaniel Kurtz 	iommu->irq = platform_get_irq(pdev, 0);
1149c68a2921SDaniel Kurtz 	if (iommu->irq < 0) {
1150c68a2921SDaniel Kurtz 		dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
1151c68a2921SDaniel Kurtz 		return -ENXIO;
1152c68a2921SDaniel Kurtz 	}
1153c68a2921SDaniel Kurtz 
1154c68a2921SDaniel Kurtz 	return 0;
1155c68a2921SDaniel Kurtz }
1156c68a2921SDaniel Kurtz 
1157c68a2921SDaniel Kurtz static int rk_iommu_remove(struct platform_device *pdev)
1158c68a2921SDaniel Kurtz {
1159c68a2921SDaniel Kurtz 	return 0;
1160c68a2921SDaniel Kurtz }
1161c68a2921SDaniel Kurtz 
1162c68a2921SDaniel Kurtz static const struct of_device_id rk_iommu_dt_ids[] = {
1163c68a2921SDaniel Kurtz 	{ .compatible = "rockchip,iommu" },
1164c68a2921SDaniel Kurtz 	{ /* sentinel */ }
1165c68a2921SDaniel Kurtz };
1166c68a2921SDaniel Kurtz MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1167c68a2921SDaniel Kurtz 
1168c68a2921SDaniel Kurtz static struct platform_driver rk_iommu_driver = {
1169c68a2921SDaniel Kurtz 	.probe = rk_iommu_probe,
1170c68a2921SDaniel Kurtz 	.remove = rk_iommu_remove,
1171c68a2921SDaniel Kurtz 	.driver = {
1172c68a2921SDaniel Kurtz 		   .name = "rk_iommu",
1173d9e7eb15SArnd Bergmann 		   .of_match_table = rk_iommu_dt_ids,
1174c68a2921SDaniel Kurtz 	},
1175c68a2921SDaniel Kurtz };
1176c68a2921SDaniel Kurtz 
1177c68a2921SDaniel Kurtz static int __init rk_iommu_init(void)
1178c68a2921SDaniel Kurtz {
1179425061b0SThierry Reding 	struct device_node *np;
1180c68a2921SDaniel Kurtz 	int ret;
1181c68a2921SDaniel Kurtz 
1182425061b0SThierry Reding 	np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1183425061b0SThierry Reding 	if (!np)
1184425061b0SThierry Reding 		return 0;
1185425061b0SThierry Reding 
1186425061b0SThierry Reding 	of_node_put(np);
1187425061b0SThierry Reding 
1188c68a2921SDaniel Kurtz 	ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1189c68a2921SDaniel Kurtz 	if (ret)
1190c68a2921SDaniel Kurtz 		return ret;
1191c68a2921SDaniel Kurtz 
11924f0aba67SShunqian Zheng 	ret = platform_driver_register(&rk_iommu_domain_driver);
11934f0aba67SShunqian Zheng 	if (ret)
11944f0aba67SShunqian Zheng 		return ret;
11954f0aba67SShunqian Zheng 
11964f0aba67SShunqian Zheng 	ret = platform_driver_register(&rk_iommu_driver);
11974f0aba67SShunqian Zheng 	if (ret)
11984f0aba67SShunqian Zheng 		platform_driver_unregister(&rk_iommu_domain_driver);
11994f0aba67SShunqian Zheng 	return ret;
1200c68a2921SDaniel Kurtz }
1201c68a2921SDaniel Kurtz static void __exit rk_iommu_exit(void)
1202c68a2921SDaniel Kurtz {
1203c68a2921SDaniel Kurtz 	platform_driver_unregister(&rk_iommu_driver);
12044f0aba67SShunqian Zheng 	platform_driver_unregister(&rk_iommu_domain_driver);
1205c68a2921SDaniel Kurtz }
1206c68a2921SDaniel Kurtz 
1207c68a2921SDaniel Kurtz subsys_initcall(rk_iommu_init);
1208c68a2921SDaniel Kurtz module_exit(rk_iommu_exit);
1209c68a2921SDaniel Kurtz 
1210c68a2921SDaniel Kurtz MODULE_DESCRIPTION("IOMMU API for Rockchip");
1211c68a2921SDaniel Kurtz MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1212c68a2921SDaniel Kurtz MODULE_ALIAS("platform:rockchip-iommu");
1213c68a2921SDaniel Kurtz MODULE_LICENSE("GPL v2");
1214