xref: /linux/drivers/iommu/rockchip-iommu.c (revision 0cc6f45cecb46cefe89c17ec816dc8cd58a2229a)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c68a2921SDaniel Kurtz /*
3669a047bSPaul Gortmaker  * IOMMU API for Rockchip
4669a047bSPaul Gortmaker  *
5669a047bSPaul Gortmaker  * Module Authors:	Simon Xue <xxm@rock-chips.com>
6669a047bSPaul Gortmaker  *			Daniel Kurtz <djkurtz@chromium.org>
7c68a2921SDaniel Kurtz  */
8c68a2921SDaniel Kurtz 
9f2e3a5f5STomasz Figa #include <linux/clk.h>
10c68a2921SDaniel Kurtz #include <linux/compiler.h>
11c68a2921SDaniel Kurtz #include <linux/delay.h>
12c68a2921SDaniel Kurtz #include <linux/device.h>
13461a6946SJoerg Roedel #include <linux/dma-mapping.h>
14c68a2921SDaniel Kurtz #include <linux/errno.h>
15c68a2921SDaniel Kurtz #include <linux/interrupt.h>
16c68a2921SDaniel Kurtz #include <linux/io.h>
17c68a2921SDaniel Kurtz #include <linux/iommu.h>
180416bf64STomasz Figa #include <linux/iopoll.h>
19c68a2921SDaniel Kurtz #include <linux/list.h>
20c68a2921SDaniel Kurtz #include <linux/mm.h>
21669a047bSPaul Gortmaker #include <linux/init.h>
22c68a2921SDaniel Kurtz #include <linux/of.h>
23c68a2921SDaniel Kurtz #include <linux/of_platform.h>
24c68a2921SDaniel Kurtz #include <linux/platform_device.h>
250f181d3cSJeffy Chen #include <linux/pm_runtime.h>
26c68a2921SDaniel Kurtz #include <linux/slab.h>
27c68a2921SDaniel Kurtz #include <linux/spinlock.h>
28c68a2921SDaniel Kurtz 
29*5404ccaaSPasha Tatashin #include "iommu-pages.h"
30*5404ccaaSPasha Tatashin 
31c68a2921SDaniel Kurtz /** MMU register offsets */
32c68a2921SDaniel Kurtz #define RK_MMU_DTE_ADDR		0x00	/* Directory table address */
33c68a2921SDaniel Kurtz #define RK_MMU_STATUS		0x04
34c68a2921SDaniel Kurtz #define RK_MMU_COMMAND		0x08
35c68a2921SDaniel Kurtz #define RK_MMU_PAGE_FAULT_ADDR	0x0C	/* IOVA of last page fault */
36c68a2921SDaniel Kurtz #define RK_MMU_ZAP_ONE_LINE	0x10	/* Shootdown one IOTLB entry */
37c68a2921SDaniel Kurtz #define RK_MMU_INT_RAWSTAT	0x14	/* IRQ status ignoring mask */
38c68a2921SDaniel Kurtz #define RK_MMU_INT_CLEAR	0x18	/* Acknowledge and re-arm irq */
39c68a2921SDaniel Kurtz #define RK_MMU_INT_MASK		0x1C	/* IRQ enable */
40c68a2921SDaniel Kurtz #define RK_MMU_INT_STATUS	0x20	/* IRQ status after masking */
41c68a2921SDaniel Kurtz #define RK_MMU_AUTO_GATING	0x24
42c68a2921SDaniel Kurtz 
43c68a2921SDaniel Kurtz #define DTE_ADDR_DUMMY		0xCAFEBABE
440416bf64STomasz Figa 
450416bf64STomasz Figa #define RK_MMU_POLL_PERIOD_US		100
460416bf64STomasz Figa #define RK_MMU_FORCE_RESET_TIMEOUT_US	100000
470416bf64STomasz Figa #define RK_MMU_POLL_TIMEOUT_US		1000
48c68a2921SDaniel Kurtz 
49c68a2921SDaniel Kurtz /* RK_MMU_STATUS fields */
50c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
51c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
52c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
53c68a2921SDaniel Kurtz #define RK_MMU_STATUS_IDLE                 BIT(3)
54c68a2921SDaniel Kurtz #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
55c68a2921SDaniel Kurtz #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
56c68a2921SDaniel Kurtz #define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
57c68a2921SDaniel Kurtz 
58c68a2921SDaniel Kurtz /* RK_MMU_COMMAND command values */
59c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
60c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
61c68a2921SDaniel Kurtz #define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
62c68a2921SDaniel Kurtz #define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
63c68a2921SDaniel Kurtz #define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
64c68a2921SDaniel Kurtz #define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
65c68a2921SDaniel Kurtz #define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
66c68a2921SDaniel Kurtz 
67c68a2921SDaniel Kurtz /* RK_MMU_INT_* register fields */
68c68a2921SDaniel Kurtz #define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
69c68a2921SDaniel Kurtz #define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
70c68a2921SDaniel Kurtz #define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
71c68a2921SDaniel Kurtz 
72c68a2921SDaniel Kurtz #define NUM_DT_ENTRIES 1024
73c68a2921SDaniel Kurtz #define NUM_PT_ENTRIES 1024
74c68a2921SDaniel Kurtz 
75c68a2921SDaniel Kurtz #define SPAGE_ORDER 12
76c68a2921SDaniel Kurtz #define SPAGE_SIZE (1 << SPAGE_ORDER)
77c68a2921SDaniel Kurtz 
78c68a2921SDaniel Kurtz  /*
79c68a2921SDaniel Kurtz   * Support mapping any size that fits in one page table:
80c68a2921SDaniel Kurtz   *   4 KiB to 4 MiB
81c68a2921SDaniel Kurtz   */
82c68a2921SDaniel Kurtz #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
83c68a2921SDaniel Kurtz 
84c68a2921SDaniel Kurtz struct rk_iommu_domain {
85c68a2921SDaniel Kurtz 	struct list_head iommus;
86c68a2921SDaniel Kurtz 	u32 *dt; /* page directory table */
874f0aba67SShunqian Zheng 	dma_addr_t dt_dma;
88c68a2921SDaniel Kurtz 	spinlock_t iommus_lock; /* lock for iommus list */
89c68a2921SDaniel Kurtz 	spinlock_t dt_lock; /* lock for modifying page directory table */
90bcd516a3SJoerg Roedel 
91bcd516a3SJoerg Roedel 	struct iommu_domain domain;
92c68a2921SDaniel Kurtz };
93c68a2921SDaniel Kurtz 
94f2e3a5f5STomasz Figa /* list of clocks required by IOMMU */
95f2e3a5f5STomasz Figa static const char * const rk_iommu_clocks[] = {
96f2e3a5f5STomasz Figa 	"aclk", "iface",
97f2e3a5f5STomasz Figa };
98f2e3a5f5STomasz Figa 
99227014b3SBenjamin Gaignard struct rk_iommu_ops {
100227014b3SBenjamin Gaignard 	phys_addr_t (*pt_address)(u32 dte);
101227014b3SBenjamin Gaignard 	u32 (*mk_dtentries)(dma_addr_t pt_dma);
102227014b3SBenjamin Gaignard 	u32 (*mk_ptentries)(phys_addr_t page, int prot);
103227014b3SBenjamin Gaignard 	u64 dma_bit_mask;
1042a7e6400SJonas Karlman 	gfp_t gfp_flags;
105227014b3SBenjamin Gaignard };
106227014b3SBenjamin Gaignard 
107c68a2921SDaniel Kurtz struct rk_iommu {
108c68a2921SDaniel Kurtz 	struct device *dev;
109cd6438c5SZhengShunQian 	void __iomem **bases;
110cd6438c5SZhengShunQian 	int num_mmu;
111f9258156SHeiko Stuebner 	int num_irq;
112f2e3a5f5STomasz Figa 	struct clk_bulk_data *clocks;
113f2e3a5f5STomasz Figa 	int num_clocks;
114c3aa4742SSimon Xue 	bool reset_disabled;
115c9d9f239SJoerg Roedel 	struct iommu_device iommu;
116c68a2921SDaniel Kurtz 	struct list_head node; /* entry in rk_iommu_domain.iommus */
117c68a2921SDaniel Kurtz 	struct iommu_domain *domain; /* domain to which iommu is attached */
118c68a2921SDaniel Kurtz };
119c68a2921SDaniel Kurtz 
1205fd577c3SJeffy Chen struct rk_iommudata {
1210f181d3cSJeffy Chen 	struct device_link *link; /* runtime PM link from IOMMU to master */
1225fd577c3SJeffy Chen 	struct rk_iommu *iommu;
1235fd577c3SJeffy Chen };
1245fd577c3SJeffy Chen 
1259176a303SJeffy Chen static struct device *dma_dev;
126227014b3SBenjamin Gaignard static const struct rk_iommu_ops *rk_ops;
12725c23255SSteven Price static struct iommu_domain rk_identity_domain;
1289176a303SJeffy Chen 
rk_table_flush(struct rk_iommu_domain * dom,dma_addr_t dma,unsigned int count)1294f0aba67SShunqian Zheng static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
1304f0aba67SShunqian Zheng 				  unsigned int count)
131c68a2921SDaniel Kurtz {
1324f0aba67SShunqian Zheng 	size_t size = count * sizeof(u32); /* count of u32 entry */
133c68a2921SDaniel Kurtz 
1349176a303SJeffy Chen 	dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
135c68a2921SDaniel Kurtz }
136c68a2921SDaniel Kurtz 
to_rk_domain(struct iommu_domain * dom)137bcd516a3SJoerg Roedel static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
138bcd516a3SJoerg Roedel {
139bcd516a3SJoerg Roedel 	return container_of(dom, struct rk_iommu_domain, domain);
140bcd516a3SJoerg Roedel }
141bcd516a3SJoerg Roedel 
142c68a2921SDaniel Kurtz /*
143c68a2921SDaniel Kurtz  * The Rockchip rk3288 iommu uses a 2-level page table.
144c68a2921SDaniel Kurtz  * The first level is the "Directory Table" (DT).
145c68a2921SDaniel Kurtz  * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
146c68a2921SDaniel Kurtz  * to a "Page Table".
147c68a2921SDaniel Kurtz  * The second level is the 1024 Page Tables (PT).
148c68a2921SDaniel Kurtz  * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
149c68a2921SDaniel Kurtz  * a 4 KB page of physical memory.
150c68a2921SDaniel Kurtz  *
151c68a2921SDaniel Kurtz  * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
152c68a2921SDaniel Kurtz  * Each iommu device has a MMU_DTE_ADDR register that contains the physical
153c68a2921SDaniel Kurtz  * address of the start of the DT page.
154c68a2921SDaniel Kurtz  *
155c68a2921SDaniel Kurtz  * The structure of the page table is as follows:
156c68a2921SDaniel Kurtz  *
157c68a2921SDaniel Kurtz  *                   DT
158c68a2921SDaniel Kurtz  * MMU_DTE_ADDR -> +-----+
159c68a2921SDaniel Kurtz  *                 |     |
160c68a2921SDaniel Kurtz  *                 +-----+     PT
161c68a2921SDaniel Kurtz  *                 | DTE | -> +-----+
162c68a2921SDaniel Kurtz  *                 +-----+    |     |     Memory
163c68a2921SDaniel Kurtz  *                 |     |    +-----+     Page
164c68a2921SDaniel Kurtz  *                 |     |    | PTE | -> +-----+
165c68a2921SDaniel Kurtz  *                 +-----+    +-----+    |     |
166c68a2921SDaniel Kurtz  *                            |     |    |     |
167c68a2921SDaniel Kurtz  *                            |     |    |     |
168c68a2921SDaniel Kurtz  *                            +-----+    |     |
169c68a2921SDaniel Kurtz  *                                       |     |
170c68a2921SDaniel Kurtz  *                                       |     |
171c68a2921SDaniel Kurtz  *                                       +-----+
172c68a2921SDaniel Kurtz  */
173c68a2921SDaniel Kurtz 
174c68a2921SDaniel Kurtz /*
175c68a2921SDaniel Kurtz  * Each DTE has a PT address and a valid bit:
176c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
177c68a2921SDaniel Kurtz  * | PT address          | Reserved  |V|
178c68a2921SDaniel Kurtz  * +---------------------+-----------+-+
179c68a2921SDaniel Kurtz  *  31:12 - PT address (PTs always starts on a 4 KB boundary)
180c68a2921SDaniel Kurtz  *  11: 1 - Reserved
181c68a2921SDaniel Kurtz  *      0 - 1 if PT @ PT address is valid
182c68a2921SDaniel Kurtz  */
183c68a2921SDaniel Kurtz #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
184c68a2921SDaniel Kurtz #define RK_DTE_PT_VALID           BIT(0)
185c68a2921SDaniel Kurtz 
rk_dte_pt_address(u32 dte)186c68a2921SDaniel Kurtz static inline phys_addr_t rk_dte_pt_address(u32 dte)
187c68a2921SDaniel Kurtz {
188c68a2921SDaniel Kurtz 	return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
189c68a2921SDaniel Kurtz }
190c68a2921SDaniel Kurtz 
191c55356c5SBenjamin Gaignard /*
192c55356c5SBenjamin Gaignard  * In v2:
193c55356c5SBenjamin Gaignard  * 31:12 - PT address bit 31:0
194c55356c5SBenjamin Gaignard  * 11: 8 - PT address bit 35:32
195c55356c5SBenjamin Gaignard  *  7: 4 - PT address bit 39:36
196c55356c5SBenjamin Gaignard  *  3: 1 - Reserved
197c55356c5SBenjamin Gaignard  *     0 - 1 if PT @ PT address is valid
198c55356c5SBenjamin Gaignard  */
199c55356c5SBenjamin Gaignard #define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
200c55356c5SBenjamin Gaignard #define DTE_HI_MASK1	GENMASK(11, 8)
201c55356c5SBenjamin Gaignard #define DTE_HI_MASK2	GENMASK(7, 4)
202c55356c5SBenjamin Gaignard #define DTE_HI_SHIFT1	24 /* shift bit 8 to bit 32 */
203c55356c5SBenjamin Gaignard #define DTE_HI_SHIFT2	32 /* shift bit 4 to bit 36 */
204f7ff3cffSAlex Bee #define PAGE_DESC_HI_MASK1	GENMASK_ULL(35, 32)
205f7ff3cffSAlex Bee #define PAGE_DESC_HI_MASK2	GENMASK_ULL(39, 36)
206c55356c5SBenjamin Gaignard 
rk_dte_pt_address_v2(u32 dte)207c55356c5SBenjamin Gaignard static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
208c55356c5SBenjamin Gaignard {
209c55356c5SBenjamin Gaignard 	u64 dte_v2 = dte;
210c55356c5SBenjamin Gaignard 
211c55356c5SBenjamin Gaignard 	dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
212c55356c5SBenjamin Gaignard 		 ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
213c55356c5SBenjamin Gaignard 		 (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
214c55356c5SBenjamin Gaignard 
215c55356c5SBenjamin Gaignard 	return (phys_addr_t)dte_v2;
216c55356c5SBenjamin Gaignard }
217c55356c5SBenjamin Gaignard 
rk_dte_is_pt_valid(u32 dte)218c68a2921SDaniel Kurtz static inline bool rk_dte_is_pt_valid(u32 dte)
219c68a2921SDaniel Kurtz {
220c68a2921SDaniel Kurtz 	return dte & RK_DTE_PT_VALID;
221c68a2921SDaniel Kurtz }
222c68a2921SDaniel Kurtz 
rk_mk_dte(dma_addr_t pt_dma)2234f0aba67SShunqian Zheng static inline u32 rk_mk_dte(dma_addr_t pt_dma)
224c68a2921SDaniel Kurtz {
2254f0aba67SShunqian Zheng 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
226c68a2921SDaniel Kurtz }
227c68a2921SDaniel Kurtz 
rk_mk_dte_v2(dma_addr_t pt_dma)228c55356c5SBenjamin Gaignard static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
229c55356c5SBenjamin Gaignard {
230c55356c5SBenjamin Gaignard 	pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
231c55356c5SBenjamin Gaignard 		 ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
232c55356c5SBenjamin Gaignard 		 (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
233c55356c5SBenjamin Gaignard 
234c55356c5SBenjamin Gaignard 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
235c55356c5SBenjamin Gaignard }
236c55356c5SBenjamin Gaignard 
237c68a2921SDaniel Kurtz /*
238c68a2921SDaniel Kurtz  * Each PTE has a Page address, some flags and a valid bit:
239c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
240c68a2921SDaniel Kurtz  * | Page address        |Rsv| Flags |V|
241c68a2921SDaniel Kurtz  * +---------------------+---+-------+-+
242c68a2921SDaniel Kurtz  *  31:12 - Page address (Pages always start on a 4 KB boundary)
243c68a2921SDaniel Kurtz  *  11: 9 - Reserved
244c68a2921SDaniel Kurtz  *   8: 1 - Flags
245c68a2921SDaniel Kurtz  *      8 - Read allocate - allocate cache space on read misses
246c68a2921SDaniel Kurtz  *      7 - Read cache - enable cache & prefetch of data
247c68a2921SDaniel Kurtz  *      6 - Write buffer - enable delaying writes on their way to memory
248c68a2921SDaniel Kurtz  *      5 - Write allocate - allocate cache space on write misses
249c68a2921SDaniel Kurtz  *      4 - Write cache - different writes can be merged together
250c68a2921SDaniel Kurtz  *      3 - Override cache attributes
251c68a2921SDaniel Kurtz  *          if 1, bits 4-8 control cache attributes
252c68a2921SDaniel Kurtz  *          if 0, the system bus defaults are used
253c68a2921SDaniel Kurtz  *      2 - Writable
254c68a2921SDaniel Kurtz  *      1 - Readable
255c68a2921SDaniel Kurtz  *      0 - 1 if Page @ Page address is valid
256c68a2921SDaniel Kurtz  */
257c68a2921SDaniel Kurtz #define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
258c68a2921SDaniel Kurtz #define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
259c68a2921SDaniel Kurtz #define RK_PTE_PAGE_WRITABLE      BIT(2)
260c68a2921SDaniel Kurtz #define RK_PTE_PAGE_READABLE      BIT(1)
261c68a2921SDaniel Kurtz #define RK_PTE_PAGE_VALID         BIT(0)
262c68a2921SDaniel Kurtz 
rk_pte_is_page_valid(u32 pte)263c68a2921SDaniel Kurtz static inline bool rk_pte_is_page_valid(u32 pte)
264c68a2921SDaniel Kurtz {
265c68a2921SDaniel Kurtz 	return pte & RK_PTE_PAGE_VALID;
266c68a2921SDaniel Kurtz }
267c68a2921SDaniel Kurtz 
268c68a2921SDaniel Kurtz /* TODO: set cache flags per prot IOMMU_CACHE */
rk_mk_pte(phys_addr_t page,int prot)269c68a2921SDaniel Kurtz static u32 rk_mk_pte(phys_addr_t page, int prot)
270c68a2921SDaniel Kurtz {
271c68a2921SDaniel Kurtz 	u32 flags = 0;
272c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
273c68a2921SDaniel Kurtz 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
274c68a2921SDaniel Kurtz 	page &= RK_PTE_PAGE_ADDRESS_MASK;
275c68a2921SDaniel Kurtz 	return page | flags | RK_PTE_PAGE_VALID;
276c68a2921SDaniel Kurtz }
277c68a2921SDaniel Kurtz 
278c55356c5SBenjamin Gaignard /*
279c55356c5SBenjamin Gaignard  * In v2:
280c55356c5SBenjamin Gaignard  * 31:12 - Page address bit 31:0
2816df63b7eSJonas Karlman  * 11: 8 - Page address bit 35:32
2826df63b7eSJonas Karlman  *  7: 4 - Page address bit 39:36
283c55356c5SBenjamin Gaignard  *     3 - Security
2847eb99841SMichael Riesch  *     2 - Writable
2857eb99841SMichael Riesch  *     1 - Readable
286c55356c5SBenjamin Gaignard  *     0 - 1 if Page @ Page address is valid
287c55356c5SBenjamin Gaignard  */
288c55356c5SBenjamin Gaignard 
rk_mk_pte_v2(phys_addr_t page,int prot)289c55356c5SBenjamin Gaignard static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
290c55356c5SBenjamin Gaignard {
291c55356c5SBenjamin Gaignard 	u32 flags = 0;
292c55356c5SBenjamin Gaignard 
2937eb99841SMichael Riesch 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
2947eb99841SMichael Riesch 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
295c55356c5SBenjamin Gaignard 
296c55356c5SBenjamin Gaignard 	return rk_mk_dte_v2(page) | flags;
297c55356c5SBenjamin Gaignard }
298c55356c5SBenjamin Gaignard 
rk_mk_pte_invalid(u32 pte)299c68a2921SDaniel Kurtz static u32 rk_mk_pte_invalid(u32 pte)
300c68a2921SDaniel Kurtz {
301c68a2921SDaniel Kurtz 	return pte & ~RK_PTE_PAGE_VALID;
302c68a2921SDaniel Kurtz }
303c68a2921SDaniel Kurtz 
304c68a2921SDaniel Kurtz /*
305c68a2921SDaniel Kurtz  * rk3288 iova (IOMMU Virtual Address) format
306c68a2921SDaniel Kurtz  *  31       22.21       12.11          0
307c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
308c68a2921SDaniel Kurtz  * | DTE index | PTE index | Page offset |
309c68a2921SDaniel Kurtz  * +-----------+-----------+-------------+
310c68a2921SDaniel Kurtz  *  31:22 - DTE index   - index of DTE in DT
311c68a2921SDaniel Kurtz  *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
312c68a2921SDaniel Kurtz  *  11: 0 - Page offset - offset into page @ PTE.page_address
313c68a2921SDaniel Kurtz  */
314c68a2921SDaniel Kurtz #define RK_IOVA_DTE_MASK    0xffc00000
315c68a2921SDaniel Kurtz #define RK_IOVA_DTE_SHIFT   22
316c68a2921SDaniel Kurtz #define RK_IOVA_PTE_MASK    0x003ff000
317c68a2921SDaniel Kurtz #define RK_IOVA_PTE_SHIFT   12
318c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_MASK   0x00000fff
319c68a2921SDaniel Kurtz #define RK_IOVA_PAGE_SHIFT  0
320c68a2921SDaniel Kurtz 
rk_iova_dte_index(dma_addr_t iova)321c68a2921SDaniel Kurtz static u32 rk_iova_dte_index(dma_addr_t iova)
322c68a2921SDaniel Kurtz {
323c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
324c68a2921SDaniel Kurtz }
325c68a2921SDaniel Kurtz 
rk_iova_pte_index(dma_addr_t iova)326c68a2921SDaniel Kurtz static u32 rk_iova_pte_index(dma_addr_t iova)
327c68a2921SDaniel Kurtz {
328c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
329c68a2921SDaniel Kurtz }
330c68a2921SDaniel Kurtz 
rk_iova_page_offset(dma_addr_t iova)331c68a2921SDaniel Kurtz static u32 rk_iova_page_offset(dma_addr_t iova)
332c68a2921SDaniel Kurtz {
333c68a2921SDaniel Kurtz 	return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
334c68a2921SDaniel Kurtz }
335c68a2921SDaniel Kurtz 
rk_iommu_read(void __iomem * base,u32 offset)336cd6438c5SZhengShunQian static u32 rk_iommu_read(void __iomem *base, u32 offset)
337c68a2921SDaniel Kurtz {
338cd6438c5SZhengShunQian 	return readl(base + offset);
339c68a2921SDaniel Kurtz }
340c68a2921SDaniel Kurtz 
rk_iommu_write(void __iomem * base,u32 offset,u32 value)341cd6438c5SZhengShunQian static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
342c68a2921SDaniel Kurtz {
343cd6438c5SZhengShunQian 	writel(value, base + offset);
344c68a2921SDaniel Kurtz }
345c68a2921SDaniel Kurtz 
rk_iommu_command(struct rk_iommu * iommu,u32 command)346c68a2921SDaniel Kurtz static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
347c68a2921SDaniel Kurtz {
348cd6438c5SZhengShunQian 	int i;
349cd6438c5SZhengShunQian 
350cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
351cd6438c5SZhengShunQian 		writel(command, iommu->bases[i] + RK_MMU_COMMAND);
352c68a2921SDaniel Kurtz }
353c68a2921SDaniel Kurtz 
rk_iommu_base_command(void __iomem * base,u32 command)354cd6438c5SZhengShunQian static void rk_iommu_base_command(void __iomem *base, u32 command)
355cd6438c5SZhengShunQian {
356cd6438c5SZhengShunQian 	writel(command, base + RK_MMU_COMMAND);
357cd6438c5SZhengShunQian }
rk_iommu_zap_lines(struct rk_iommu * iommu,dma_addr_t iova_start,size_t size)358bf2a5e71STomasz Figa static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
359c68a2921SDaniel Kurtz 			       size_t size)
360c68a2921SDaniel Kurtz {
361cd6438c5SZhengShunQian 	int i;
362bf2a5e71STomasz Figa 	dma_addr_t iova_end = iova_start + size;
363c68a2921SDaniel Kurtz 	/*
364c68a2921SDaniel Kurtz 	 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
365c68a2921SDaniel Kurtz 	 * entire iotlb rather than iterate over individual iovas.
366c68a2921SDaniel Kurtz 	 */
367bf2a5e71STomasz Figa 	for (i = 0; i < iommu->num_mmu; i++) {
368bf2a5e71STomasz Figa 		dma_addr_t iova;
369bf2a5e71STomasz Figa 
370bf2a5e71STomasz Figa 		for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
371cd6438c5SZhengShunQian 			rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
372c68a2921SDaniel Kurtz 	}
373bf2a5e71STomasz Figa }
374c68a2921SDaniel Kurtz 
rk_iommu_is_stall_active(struct rk_iommu * iommu)375c68a2921SDaniel Kurtz static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
376c68a2921SDaniel Kurtz {
377cd6438c5SZhengShunQian 	bool active = true;
378cd6438c5SZhengShunQian 	int i;
379cd6438c5SZhengShunQian 
380cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
381fbedd9b9SJohn Keeping 		active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
382fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_STALL_ACTIVE);
383cd6438c5SZhengShunQian 
384cd6438c5SZhengShunQian 	return active;
385c68a2921SDaniel Kurtz }
386c68a2921SDaniel Kurtz 
rk_iommu_is_paging_enabled(struct rk_iommu * iommu)387c68a2921SDaniel Kurtz static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
388c68a2921SDaniel Kurtz {
389cd6438c5SZhengShunQian 	bool enable = true;
390cd6438c5SZhengShunQian 	int i;
391cd6438c5SZhengShunQian 
392cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++)
393fbedd9b9SJohn Keeping 		enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
394fbedd9b9SJohn Keeping 					   RK_MMU_STATUS_PAGING_ENABLED);
395cd6438c5SZhengShunQian 
396cd6438c5SZhengShunQian 	return enable;
397c68a2921SDaniel Kurtz }
398c68a2921SDaniel Kurtz 
rk_iommu_is_reset_done(struct rk_iommu * iommu)3990416bf64STomasz Figa static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
4000416bf64STomasz Figa {
4010416bf64STomasz Figa 	bool done = true;
4020416bf64STomasz Figa 	int i;
4030416bf64STomasz Figa 
4040416bf64STomasz Figa 	for (i = 0; i < iommu->num_mmu; i++)
4050416bf64STomasz Figa 		done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
4060416bf64STomasz Figa 
4070416bf64STomasz Figa 	return done;
4080416bf64STomasz Figa }
4090416bf64STomasz Figa 
rk_iommu_enable_stall(struct rk_iommu * iommu)410c68a2921SDaniel Kurtz static int rk_iommu_enable_stall(struct rk_iommu *iommu)
411c68a2921SDaniel Kurtz {
412cd6438c5SZhengShunQian 	int ret, i;
4130416bf64STomasz Figa 	bool val;
414c68a2921SDaniel Kurtz 
415c68a2921SDaniel Kurtz 	if (rk_iommu_is_stall_active(iommu))
416c68a2921SDaniel Kurtz 		return 0;
417c68a2921SDaniel Kurtz 
418c68a2921SDaniel Kurtz 	/* Stall can only be enabled if paging is enabled */
419c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
420c68a2921SDaniel Kurtz 		return 0;
421c68a2921SDaniel Kurtz 
422c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
423c68a2921SDaniel Kurtz 
4240416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
4250416bf64STomasz Figa 				 val, RK_MMU_POLL_PERIOD_US,
4260416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
427c68a2921SDaniel Kurtz 	if (ret)
428cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
429c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
430cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
431c68a2921SDaniel Kurtz 
432c68a2921SDaniel Kurtz 	return ret;
433c68a2921SDaniel Kurtz }
434c68a2921SDaniel Kurtz 
rk_iommu_disable_stall(struct rk_iommu * iommu)435c68a2921SDaniel Kurtz static int rk_iommu_disable_stall(struct rk_iommu *iommu)
436c68a2921SDaniel Kurtz {
437cd6438c5SZhengShunQian 	int ret, i;
4380416bf64STomasz Figa 	bool val;
439c68a2921SDaniel Kurtz 
440c68a2921SDaniel Kurtz 	if (!rk_iommu_is_stall_active(iommu))
441c68a2921SDaniel Kurtz 		return 0;
442c68a2921SDaniel Kurtz 
443c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
444c68a2921SDaniel Kurtz 
4450416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
4460416bf64STomasz Figa 				 !val, RK_MMU_POLL_PERIOD_US,
4470416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
448c68a2921SDaniel Kurtz 	if (ret)
449cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
450c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
451cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
452c68a2921SDaniel Kurtz 
453c68a2921SDaniel Kurtz 	return ret;
454c68a2921SDaniel Kurtz }
455c68a2921SDaniel Kurtz 
rk_iommu_enable_paging(struct rk_iommu * iommu)456c68a2921SDaniel Kurtz static int rk_iommu_enable_paging(struct rk_iommu *iommu)
457c68a2921SDaniel Kurtz {
458cd6438c5SZhengShunQian 	int ret, i;
4590416bf64STomasz Figa 	bool val;
460c68a2921SDaniel Kurtz 
461c68a2921SDaniel Kurtz 	if (rk_iommu_is_paging_enabled(iommu))
462c68a2921SDaniel Kurtz 		return 0;
463c68a2921SDaniel Kurtz 
464c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
465c68a2921SDaniel Kurtz 
4660416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
4670416bf64STomasz Figa 				 val, RK_MMU_POLL_PERIOD_US,
4680416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
469c68a2921SDaniel Kurtz 	if (ret)
470cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
471c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
472cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
473c68a2921SDaniel Kurtz 
474c68a2921SDaniel Kurtz 	return ret;
475c68a2921SDaniel Kurtz }
476c68a2921SDaniel Kurtz 
rk_iommu_disable_paging(struct rk_iommu * iommu)477c68a2921SDaniel Kurtz static int rk_iommu_disable_paging(struct rk_iommu *iommu)
478c68a2921SDaniel Kurtz {
479cd6438c5SZhengShunQian 	int ret, i;
4800416bf64STomasz Figa 	bool val;
481c68a2921SDaniel Kurtz 
482c68a2921SDaniel Kurtz 	if (!rk_iommu_is_paging_enabled(iommu))
483c68a2921SDaniel Kurtz 		return 0;
484c68a2921SDaniel Kurtz 
485c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
486c68a2921SDaniel Kurtz 
4870416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
4880416bf64STomasz Figa 				 !val, RK_MMU_POLL_PERIOD_US,
4890416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
490c68a2921SDaniel Kurtz 	if (ret)
491cd6438c5SZhengShunQian 		for (i = 0; i < iommu->num_mmu; i++)
492c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
493cd6438c5SZhengShunQian 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
494c68a2921SDaniel Kurtz 
495c68a2921SDaniel Kurtz 	return ret;
496c68a2921SDaniel Kurtz }
497c68a2921SDaniel Kurtz 
rk_iommu_force_reset(struct rk_iommu * iommu)498c68a2921SDaniel Kurtz static int rk_iommu_force_reset(struct rk_iommu *iommu)
499c68a2921SDaniel Kurtz {
500cd6438c5SZhengShunQian 	int ret, i;
501c68a2921SDaniel Kurtz 	u32 dte_addr;
5020416bf64STomasz Figa 	bool val;
503c68a2921SDaniel Kurtz 
504c3aa4742SSimon Xue 	if (iommu->reset_disabled)
505c3aa4742SSimon Xue 		return 0;
506c3aa4742SSimon Xue 
507c68a2921SDaniel Kurtz 	/*
508c68a2921SDaniel Kurtz 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
5096df63b7eSJonas Karlman 	 * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back.
510c68a2921SDaniel Kurtz 	 */
511cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
512227014b3SBenjamin Gaignard 		dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
513227014b3SBenjamin Gaignard 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
514c68a2921SDaniel Kurtz 
515227014b3SBenjamin Gaignard 		if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
516c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
517c68a2921SDaniel Kurtz 			return -EFAULT;
518c68a2921SDaniel Kurtz 		}
519cd6438c5SZhengShunQian 	}
520c68a2921SDaniel Kurtz 
521c68a2921SDaniel Kurtz 	rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
522c68a2921SDaniel Kurtz 
5230416bf64STomasz Figa 	ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
5240416bf64STomasz Figa 				 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
5250416bf64STomasz Figa 				 RK_MMU_POLL_TIMEOUT_US);
526cd6438c5SZhengShunQian 	if (ret) {
527c68a2921SDaniel Kurtz 		dev_err(iommu->dev, "FORCE_RESET command timed out\n");
528c68a2921SDaniel Kurtz 		return ret;
529c68a2921SDaniel Kurtz 	}
530c68a2921SDaniel Kurtz 
531cd6438c5SZhengShunQian 	return 0;
532cd6438c5SZhengShunQian }
533cd6438c5SZhengShunQian 
log_iova(struct rk_iommu * iommu,int index,dma_addr_t iova)534cd6438c5SZhengShunQian static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
535c68a2921SDaniel Kurtz {
536cd6438c5SZhengShunQian 	void __iomem *base = iommu->bases[index];
537c68a2921SDaniel Kurtz 	u32 dte_index, pte_index, page_offset;
538c68a2921SDaniel Kurtz 	u32 mmu_dte_addr;
539c68a2921SDaniel Kurtz 	phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
540c68a2921SDaniel Kurtz 	u32 *dte_addr;
541c68a2921SDaniel Kurtz 	u32 dte;
542c68a2921SDaniel Kurtz 	phys_addr_t pte_addr_phys = 0;
543c68a2921SDaniel Kurtz 	u32 *pte_addr = NULL;
544c68a2921SDaniel Kurtz 	u32 pte = 0;
545c68a2921SDaniel Kurtz 	phys_addr_t page_addr_phys = 0;
546c68a2921SDaniel Kurtz 	u32 page_flags = 0;
547c68a2921SDaniel Kurtz 
548c68a2921SDaniel Kurtz 	dte_index = rk_iova_dte_index(iova);
549c68a2921SDaniel Kurtz 	pte_index = rk_iova_pte_index(iova);
550c68a2921SDaniel Kurtz 	page_offset = rk_iova_page_offset(iova);
551c68a2921SDaniel Kurtz 
552cd6438c5SZhengShunQian 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
5536df63b7eSJonas Karlman 	mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr);
554c68a2921SDaniel Kurtz 
555c68a2921SDaniel Kurtz 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
556c68a2921SDaniel Kurtz 	dte_addr = phys_to_virt(dte_addr_phys);
557c68a2921SDaniel Kurtz 	dte = *dte_addr;
558c68a2921SDaniel Kurtz 
559c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
560c68a2921SDaniel Kurtz 		goto print_it;
561c68a2921SDaniel Kurtz 
562227014b3SBenjamin Gaignard 	pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
563c68a2921SDaniel Kurtz 	pte_addr = phys_to_virt(pte_addr_phys);
564c68a2921SDaniel Kurtz 	pte = *pte_addr;
565c68a2921SDaniel Kurtz 
566c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
567c68a2921SDaniel Kurtz 		goto print_it;
568c68a2921SDaniel Kurtz 
569227014b3SBenjamin Gaignard 	page_addr_phys = rk_ops->pt_address(pte) + page_offset;
570c68a2921SDaniel Kurtz 	page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
571c68a2921SDaniel Kurtz 
572c68a2921SDaniel Kurtz print_it:
573c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
574c68a2921SDaniel Kurtz 		&iova, dte_index, pte_index, page_offset);
575c68a2921SDaniel Kurtz 	dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
576c68a2921SDaniel Kurtz 		&mmu_dte_addr_phys, &dte_addr_phys, dte,
577c68a2921SDaniel Kurtz 		rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
578c68a2921SDaniel Kurtz 		rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
579c68a2921SDaniel Kurtz }
580c68a2921SDaniel Kurtz 
rk_iommu_irq(int irq,void * dev_id)581c68a2921SDaniel Kurtz static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
582c68a2921SDaniel Kurtz {
583c68a2921SDaniel Kurtz 	struct rk_iommu *iommu = dev_id;
584c68a2921SDaniel Kurtz 	u32 status;
585c68a2921SDaniel Kurtz 	u32 int_status;
586c68a2921SDaniel Kurtz 	dma_addr_t iova;
587cd6438c5SZhengShunQian 	irqreturn_t ret = IRQ_NONE;
5883fc7c5c0SMarc Zyngier 	int i, err;
589c68a2921SDaniel Kurtz 
5903fc7c5c0SMarc Zyngier 	err = pm_runtime_get_if_in_use(iommu->dev);
5915b47748eSRobin Murphy 	if (!err || WARN_ON_ONCE(err < 0))
5923fc7c5c0SMarc Zyngier 		return ret;
5930f181d3cSJeffy Chen 
5940f181d3cSJeffy Chen 	if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
5950f181d3cSJeffy Chen 		goto out;
596f2e3a5f5STomasz Figa 
597cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
598cd6438c5SZhengShunQian 		int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
599c68a2921SDaniel Kurtz 		if (int_status == 0)
600cd6438c5SZhengShunQian 			continue;
601c68a2921SDaniel Kurtz 
602cd6438c5SZhengShunQian 		ret = IRQ_HANDLED;
603cd6438c5SZhengShunQian 		iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
604c68a2921SDaniel Kurtz 
605c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
606c68a2921SDaniel Kurtz 			int flags;
607c68a2921SDaniel Kurtz 
608cd6438c5SZhengShunQian 			status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
609c68a2921SDaniel Kurtz 			flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
610c68a2921SDaniel Kurtz 					IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
611c68a2921SDaniel Kurtz 
612c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "Page fault at %pad of type %s\n",
613c68a2921SDaniel Kurtz 				&iova,
614c68a2921SDaniel Kurtz 				(flags == IOMMU_FAULT_WRITE) ? "write" : "read");
615c68a2921SDaniel Kurtz 
616cd6438c5SZhengShunQian 			log_iova(iommu, i, iova);
617c68a2921SDaniel Kurtz 
618c68a2921SDaniel Kurtz 			/*
619c68a2921SDaniel Kurtz 			 * Report page fault to any installed handlers.
620c68a2921SDaniel Kurtz 			 * Ignore the return code, though, since we always zap cache
621c68a2921SDaniel Kurtz 			 * and clear the page fault anyway.
622c68a2921SDaniel Kurtz 			 */
62325c23255SSteven Price 			if (iommu->domain != &rk_identity_domain)
624c68a2921SDaniel Kurtz 				report_iommu_fault(iommu->domain, iommu->dev, iova,
625c68a2921SDaniel Kurtz 						   flags);
626c68a2921SDaniel Kurtz 			else
627c68a2921SDaniel Kurtz 				dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
628c68a2921SDaniel Kurtz 
629cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
630cd6438c5SZhengShunQian 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
631c68a2921SDaniel Kurtz 		}
632c68a2921SDaniel Kurtz 
633c68a2921SDaniel Kurtz 		if (int_status & RK_MMU_IRQ_BUS_ERROR)
634c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
635c68a2921SDaniel Kurtz 
636c68a2921SDaniel Kurtz 		if (int_status & ~RK_MMU_IRQ_MASK)
637c68a2921SDaniel Kurtz 			dev_err(iommu->dev, "unexpected int_status: %#08x\n",
638c68a2921SDaniel Kurtz 				int_status);
639c68a2921SDaniel Kurtz 
640cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
641cd6438c5SZhengShunQian 	}
642c68a2921SDaniel Kurtz 
643f2e3a5f5STomasz Figa 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
644f2e3a5f5STomasz Figa 
6450f181d3cSJeffy Chen out:
6460f181d3cSJeffy Chen 	pm_runtime_put(iommu->dev);
647cd6438c5SZhengShunQian 	return ret;
648c68a2921SDaniel Kurtz }
649c68a2921SDaniel Kurtz 
rk_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)650c68a2921SDaniel Kurtz static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
651c68a2921SDaniel Kurtz 					 dma_addr_t iova)
652c68a2921SDaniel Kurtz {
653bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
654c68a2921SDaniel Kurtz 	unsigned long flags;
655c68a2921SDaniel Kurtz 	phys_addr_t pt_phys, phys = 0;
656c68a2921SDaniel Kurtz 	u32 dte, pte;
657c68a2921SDaniel Kurtz 	u32 *page_table;
658c68a2921SDaniel Kurtz 
659c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
660c68a2921SDaniel Kurtz 
661c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
662c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte))
663c68a2921SDaniel Kurtz 		goto out;
664c68a2921SDaniel Kurtz 
665227014b3SBenjamin Gaignard 	pt_phys = rk_ops->pt_address(dte);
666c68a2921SDaniel Kurtz 	page_table = (u32 *)phys_to_virt(pt_phys);
667c68a2921SDaniel Kurtz 	pte = page_table[rk_iova_pte_index(iova)];
668c68a2921SDaniel Kurtz 	if (!rk_pte_is_page_valid(pte))
669c68a2921SDaniel Kurtz 		goto out;
670c68a2921SDaniel Kurtz 
671227014b3SBenjamin Gaignard 	phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
672c68a2921SDaniel Kurtz out:
673c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
674c68a2921SDaniel Kurtz 
675c68a2921SDaniel Kurtz 	return phys;
676c68a2921SDaniel Kurtz }
677c68a2921SDaniel Kurtz 
rk_iommu_zap_iova(struct rk_iommu_domain * rk_domain,dma_addr_t iova,size_t size)678c68a2921SDaniel Kurtz static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
679c68a2921SDaniel Kurtz 			      dma_addr_t iova, size_t size)
680c68a2921SDaniel Kurtz {
681c68a2921SDaniel Kurtz 	struct list_head *pos;
682c68a2921SDaniel Kurtz 	unsigned long flags;
683c68a2921SDaniel Kurtz 
684c68a2921SDaniel Kurtz 	/* shootdown these iova from all iommus using this domain */
685c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
686c68a2921SDaniel Kurtz 	list_for_each(pos, &rk_domain->iommus) {
687c68a2921SDaniel Kurtz 		struct rk_iommu *iommu;
6883fc7c5c0SMarc Zyngier 		int ret;
6890f181d3cSJeffy Chen 
690c68a2921SDaniel Kurtz 		iommu = list_entry(pos, struct rk_iommu, node);
6910f181d3cSJeffy Chen 
6920f181d3cSJeffy Chen 		/* Only zap TLBs of IOMMUs that are powered on. */
6933fc7c5c0SMarc Zyngier 		ret = pm_runtime_get_if_in_use(iommu->dev);
6943fc7c5c0SMarc Zyngier 		if (WARN_ON_ONCE(ret < 0))
6953fc7c5c0SMarc Zyngier 			continue;
6963fc7c5c0SMarc Zyngier 		if (ret) {
6970f181d3cSJeffy Chen 			WARN_ON(clk_bulk_enable(iommu->num_clocks,
6980f181d3cSJeffy Chen 						iommu->clocks));
699c68a2921SDaniel Kurtz 			rk_iommu_zap_lines(iommu, iova, size);
700f2e3a5f5STomasz Figa 			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
7010f181d3cSJeffy Chen 			pm_runtime_put(iommu->dev);
7020f181d3cSJeffy Chen 		}
703c68a2921SDaniel Kurtz 	}
704c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
705c68a2921SDaniel Kurtz }
706c68a2921SDaniel Kurtz 
rk_iommu_zap_iova_first_last(struct rk_iommu_domain * rk_domain,dma_addr_t iova,size_t size)707d4dd920cSTomasz Figa static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
708d4dd920cSTomasz Figa 					 dma_addr_t iova, size_t size)
709d4dd920cSTomasz Figa {
710d4dd920cSTomasz Figa 	rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
711d4dd920cSTomasz Figa 	if (size > SPAGE_SIZE)
712d4dd920cSTomasz Figa 		rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
713d4dd920cSTomasz Figa 					SPAGE_SIZE);
714d4dd920cSTomasz Figa }
715d4dd920cSTomasz Figa 
rk_dte_get_page_table(struct rk_iommu_domain * rk_domain,dma_addr_t iova)716c68a2921SDaniel Kurtz static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
717c68a2921SDaniel Kurtz 				  dma_addr_t iova)
718c68a2921SDaniel Kurtz {
719c68a2921SDaniel Kurtz 	u32 *page_table, *dte_addr;
7204f0aba67SShunqian Zheng 	u32 dte_index, dte;
721c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
7224f0aba67SShunqian Zheng 	dma_addr_t pt_dma;
723c68a2921SDaniel Kurtz 
724c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
725c68a2921SDaniel Kurtz 
7264f0aba67SShunqian Zheng 	dte_index = rk_iova_dte_index(iova);
7274f0aba67SShunqian Zheng 	dte_addr = &rk_domain->dt[dte_index];
728c68a2921SDaniel Kurtz 	dte = *dte_addr;
729c68a2921SDaniel Kurtz 	if (rk_dte_is_pt_valid(dte))
730c68a2921SDaniel Kurtz 		goto done;
731c68a2921SDaniel Kurtz 
732*5404ccaaSPasha Tatashin 	page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
733c68a2921SDaniel Kurtz 	if (!page_table)
734c68a2921SDaniel Kurtz 		return ERR_PTR(-ENOMEM);
735c68a2921SDaniel Kurtz 
7369176a303SJeffy Chen 	pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
7379176a303SJeffy Chen 	if (dma_mapping_error(dma_dev, pt_dma)) {
7389176a303SJeffy Chen 		dev_err(dma_dev, "DMA mapping error while allocating page table\n");
739*5404ccaaSPasha Tatashin 		iommu_free_page(page_table);
7404f0aba67SShunqian Zheng 		return ERR_PTR(-ENOMEM);
7414f0aba67SShunqian Zheng 	}
7424f0aba67SShunqian Zheng 
743227014b3SBenjamin Gaignard 	dte = rk_ops->mk_dtentries(pt_dma);
744c68a2921SDaniel Kurtz 	*dte_addr = dte;
745c68a2921SDaniel Kurtz 
7464f0aba67SShunqian Zheng 	rk_table_flush(rk_domain,
7474f0aba67SShunqian Zheng 		       rk_domain->dt_dma + dte_index * sizeof(u32), 1);
748c68a2921SDaniel Kurtz done:
749227014b3SBenjamin Gaignard 	pt_phys = rk_ops->pt_address(dte);
750c68a2921SDaniel Kurtz 	return (u32 *)phys_to_virt(pt_phys);
751c68a2921SDaniel Kurtz }
752c68a2921SDaniel Kurtz 
rk_iommu_unmap_iova(struct rk_iommu_domain * rk_domain,u32 * pte_addr,dma_addr_t pte_dma,size_t size)753c68a2921SDaniel Kurtz static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
7544f0aba67SShunqian Zheng 				  u32 *pte_addr, dma_addr_t pte_dma,
7554f0aba67SShunqian Zheng 				  size_t size)
756c68a2921SDaniel Kurtz {
757c68a2921SDaniel Kurtz 	unsigned int pte_count;
758c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
759c68a2921SDaniel Kurtz 
760c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
761c68a2921SDaniel Kurtz 
762c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
763c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
764c68a2921SDaniel Kurtz 		if (!rk_pte_is_page_valid(pte))
765c68a2921SDaniel Kurtz 			break;
766c68a2921SDaniel Kurtz 
767c68a2921SDaniel Kurtz 		pte_addr[pte_count] = rk_mk_pte_invalid(pte);
768c68a2921SDaniel Kurtz 	}
769c68a2921SDaniel Kurtz 
7704f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_count);
771c68a2921SDaniel Kurtz 
772c68a2921SDaniel Kurtz 	return pte_count * SPAGE_SIZE;
773c68a2921SDaniel Kurtz }
774c68a2921SDaniel Kurtz 
rk_iommu_map_iova(struct rk_iommu_domain * rk_domain,u32 * pte_addr,dma_addr_t pte_dma,dma_addr_t iova,phys_addr_t paddr,size_t size,int prot)775c68a2921SDaniel Kurtz static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
7764f0aba67SShunqian Zheng 			     dma_addr_t pte_dma, dma_addr_t iova,
7774f0aba67SShunqian Zheng 			     phys_addr_t paddr, size_t size, int prot)
778c68a2921SDaniel Kurtz {
779c68a2921SDaniel Kurtz 	unsigned int pte_count;
780c68a2921SDaniel Kurtz 	unsigned int pte_total = size / SPAGE_SIZE;
781c68a2921SDaniel Kurtz 	phys_addr_t page_phys;
782c68a2921SDaniel Kurtz 
783c68a2921SDaniel Kurtz 	assert_spin_locked(&rk_domain->dt_lock);
784c68a2921SDaniel Kurtz 
785c68a2921SDaniel Kurtz 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
786c68a2921SDaniel Kurtz 		u32 pte = pte_addr[pte_count];
787c68a2921SDaniel Kurtz 
788c68a2921SDaniel Kurtz 		if (rk_pte_is_page_valid(pte))
789c68a2921SDaniel Kurtz 			goto unwind;
790c68a2921SDaniel Kurtz 
791227014b3SBenjamin Gaignard 		pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
792c68a2921SDaniel Kurtz 
793c68a2921SDaniel Kurtz 		paddr += SPAGE_SIZE;
794c68a2921SDaniel Kurtz 	}
795c68a2921SDaniel Kurtz 
7964f0aba67SShunqian Zheng 	rk_table_flush(rk_domain, pte_dma, pte_total);
797c68a2921SDaniel Kurtz 
798d4dd920cSTomasz Figa 	/*
799d4dd920cSTomasz Figa 	 * Zap the first and last iova to evict from iotlb any previously
800d4dd920cSTomasz Figa 	 * mapped cachelines holding stale values for its dte and pte.
801d4dd920cSTomasz Figa 	 * We only zap the first and last iova, since only they could have
802d4dd920cSTomasz Figa 	 * dte or pte shared with an existing mapping.
803d4dd920cSTomasz Figa 	 */
804d4dd920cSTomasz Figa 	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
805d4dd920cSTomasz Figa 
806c68a2921SDaniel Kurtz 	return 0;
807c68a2921SDaniel Kurtz unwind:
808c68a2921SDaniel Kurtz 	/* Unmap the range of iovas that we just mapped */
8094f0aba67SShunqian Zheng 	rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
8104f0aba67SShunqian Zheng 			    pte_count * SPAGE_SIZE);
811c68a2921SDaniel Kurtz 
812c68a2921SDaniel Kurtz 	iova += pte_count * SPAGE_SIZE;
813227014b3SBenjamin Gaignard 	page_phys = rk_ops->pt_address(pte_addr[pte_count]);
814c68a2921SDaniel Kurtz 	pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
815c68a2921SDaniel Kurtz 	       &iova, &page_phys, &paddr, prot);
816c68a2921SDaniel Kurtz 
817c68a2921SDaniel Kurtz 	return -EADDRINUSE;
818c68a2921SDaniel Kurtz }
819c68a2921SDaniel Kurtz 
rk_iommu_map(struct iommu_domain * domain,unsigned long _iova,phys_addr_t paddr,size_t size,size_t count,int prot,gfp_t gfp,size_t * mapped)820c68a2921SDaniel Kurtz static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
821d47b9777SRobin Murphy 			phys_addr_t paddr, size_t size, size_t count,
822d47b9777SRobin Murphy 			int prot, gfp_t gfp, size_t *mapped)
823c68a2921SDaniel Kurtz {
824bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
825c68a2921SDaniel Kurtz 	unsigned long flags;
8264f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
827c68a2921SDaniel Kurtz 	u32 *page_table, *pte_addr;
8284f0aba67SShunqian Zheng 	u32 dte_index, pte_index;
829c68a2921SDaniel Kurtz 	int ret;
830c68a2921SDaniel Kurtz 
831c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
832c68a2921SDaniel Kurtz 
833c68a2921SDaniel Kurtz 	/*
834c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
835c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
836c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
837c68a2921SDaniel Kurtz 	 * Since iommu_map() guarantees that both iova and size will be
838c68a2921SDaniel Kurtz 	 * aligned, we will always only be mapping from a single dte here.
839c68a2921SDaniel Kurtz 	 */
840c68a2921SDaniel Kurtz 	page_table = rk_dte_get_page_table(rk_domain, iova);
841c68a2921SDaniel Kurtz 	if (IS_ERR(page_table)) {
842c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
843c68a2921SDaniel Kurtz 		return PTR_ERR(page_table);
844c68a2921SDaniel Kurtz 	}
845c68a2921SDaniel Kurtz 
8464f0aba67SShunqian Zheng 	dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
8474f0aba67SShunqian Zheng 	pte_index = rk_iova_pte_index(iova);
8484f0aba67SShunqian Zheng 	pte_addr = &page_table[pte_index];
849227014b3SBenjamin Gaignard 
850227014b3SBenjamin Gaignard 	pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32);
8514f0aba67SShunqian Zheng 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
8524f0aba67SShunqian Zheng 				paddr, size, prot);
8534f0aba67SShunqian Zheng 
854c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
855d47b9777SRobin Murphy 	if (!ret)
856d47b9777SRobin Murphy 		*mapped = size;
857c68a2921SDaniel Kurtz 
858c68a2921SDaniel Kurtz 	return ret;
859c68a2921SDaniel Kurtz }
860c68a2921SDaniel Kurtz 
rk_iommu_unmap(struct iommu_domain * domain,unsigned long _iova,size_t size,size_t count,struct iommu_iotlb_gather * gather)861c68a2921SDaniel Kurtz static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
862d47b9777SRobin Murphy 			     size_t size, size_t count, struct iommu_iotlb_gather *gather)
863c68a2921SDaniel Kurtz {
864bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
865c68a2921SDaniel Kurtz 	unsigned long flags;
8664f0aba67SShunqian Zheng 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
867c68a2921SDaniel Kurtz 	phys_addr_t pt_phys;
868c68a2921SDaniel Kurtz 	u32 dte;
869c68a2921SDaniel Kurtz 	u32 *pte_addr;
870c68a2921SDaniel Kurtz 	size_t unmap_size;
871c68a2921SDaniel Kurtz 
872c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
873c68a2921SDaniel Kurtz 
874c68a2921SDaniel Kurtz 	/*
875c68a2921SDaniel Kurtz 	 * pgsize_bitmap specifies iova sizes that fit in one page table
876c68a2921SDaniel Kurtz 	 * (1024 4-KiB pages = 4 MiB).
877c68a2921SDaniel Kurtz 	 * So, size will always be 4096 <= size <= 4194304.
878c68a2921SDaniel Kurtz 	 * Since iommu_unmap() guarantees that both iova and size will be
879c68a2921SDaniel Kurtz 	 * aligned, we will always only be unmapping from a single dte here.
880c68a2921SDaniel Kurtz 	 */
881c68a2921SDaniel Kurtz 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
882c68a2921SDaniel Kurtz 	/* Just return 0 if iova is unmapped */
883c68a2921SDaniel Kurtz 	if (!rk_dte_is_pt_valid(dte)) {
884c68a2921SDaniel Kurtz 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
885c68a2921SDaniel Kurtz 		return 0;
886c68a2921SDaniel Kurtz 	}
887c68a2921SDaniel Kurtz 
888227014b3SBenjamin Gaignard 	pt_phys = rk_ops->pt_address(dte);
889c68a2921SDaniel Kurtz 	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
8904f0aba67SShunqian Zheng 	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
8914f0aba67SShunqian Zheng 	unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
892c68a2921SDaniel Kurtz 
893c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
894c68a2921SDaniel Kurtz 
895c68a2921SDaniel Kurtz 	/* Shootdown iotlb entries for iova range that was just unmapped */
896c68a2921SDaniel Kurtz 	rk_iommu_zap_iova(rk_domain, iova, unmap_size);
897c68a2921SDaniel Kurtz 
898c68a2921SDaniel Kurtz 	return unmap_size;
899c68a2921SDaniel Kurtz }
900c68a2921SDaniel Kurtz 
rk_iommu_from_dev(struct device * dev)901c68a2921SDaniel Kurtz static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
902c68a2921SDaniel Kurtz {
9038b9cc3b7SJoerg Roedel 	struct rk_iommudata *data = dev_iommu_priv_get(dev);
904c68a2921SDaniel Kurtz 
9055fd577c3SJeffy Chen 	return data ? data->iommu : NULL;
906c68a2921SDaniel Kurtz }
907c68a2921SDaniel Kurtz 
9080f181d3cSJeffy Chen /* Must be called with iommu powered on and attached */
rk_iommu_disable(struct rk_iommu * iommu)9090f181d3cSJeffy Chen static void rk_iommu_disable(struct rk_iommu *iommu)
910c68a2921SDaniel Kurtz {
9110f181d3cSJeffy Chen 	int i;
912c68a2921SDaniel Kurtz 
9130f181d3cSJeffy Chen 	/* Ignore error while disabling, just keep going */
9140f181d3cSJeffy Chen 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
9150f181d3cSJeffy Chen 	rk_iommu_enable_stall(iommu);
9160f181d3cSJeffy Chen 	rk_iommu_disable_paging(iommu);
9170f181d3cSJeffy Chen 	for (i = 0; i < iommu->num_mmu; i++) {
9180f181d3cSJeffy Chen 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
9190f181d3cSJeffy Chen 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
9200f181d3cSJeffy Chen 	}
9210f181d3cSJeffy Chen 	rk_iommu_disable_stall(iommu);
9220f181d3cSJeffy Chen 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
9230f181d3cSJeffy Chen }
9240f181d3cSJeffy Chen 
9250f181d3cSJeffy Chen /* Must be called with iommu powered on and attached */
rk_iommu_enable(struct rk_iommu * iommu)9260f181d3cSJeffy Chen static int rk_iommu_enable(struct rk_iommu *iommu)
9270f181d3cSJeffy Chen {
9280f181d3cSJeffy Chen 	struct iommu_domain *domain = iommu->domain;
9290f181d3cSJeffy Chen 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
9300f181d3cSJeffy Chen 	int ret, i;
931c68a2921SDaniel Kurtz 
932f2e3a5f5STomasz Figa 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
933c68a2921SDaniel Kurtz 	if (ret)
934c68a2921SDaniel Kurtz 		return ret;
935c68a2921SDaniel Kurtz 
936f2e3a5f5STomasz Figa 	ret = rk_iommu_enable_stall(iommu);
937f2e3a5f5STomasz Figa 	if (ret)
938f2e3a5f5STomasz Figa 		goto out_disable_clocks;
939f2e3a5f5STomasz Figa 
940c68a2921SDaniel Kurtz 	ret = rk_iommu_force_reset(iommu);
941c68a2921SDaniel Kurtz 	if (ret)
942f6717d72STomasz Figa 		goto out_disable_stall;
943c68a2921SDaniel Kurtz 
944cd6438c5SZhengShunQian 	for (i = 0; i < iommu->num_mmu; i++) {
9454f0aba67SShunqian Zheng 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
9466df63b7eSJonas Karlman 			       rk_ops->mk_dtentries(rk_domain->dt_dma));
947ae8a7910SJohn Keeping 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
948cd6438c5SZhengShunQian 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
949cd6438c5SZhengShunQian 	}
950c68a2921SDaniel Kurtz 
951c68a2921SDaniel Kurtz 	ret = rk_iommu_enable_paging(iommu);
952c68a2921SDaniel Kurtz 
953f6717d72STomasz Figa out_disable_stall:
954c68a2921SDaniel Kurtz 	rk_iommu_disable_stall(iommu);
955f2e3a5f5STomasz Figa out_disable_clocks:
956f2e3a5f5STomasz Figa 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
957f6717d72STomasz Figa 	return ret;
958c68a2921SDaniel Kurtz }
959c68a2921SDaniel Kurtz 
rk_iommu_identity_attach(struct iommu_domain * identity_domain,struct device * dev)96025c23255SSteven Price static int rk_iommu_identity_attach(struct iommu_domain *identity_domain,
961c68a2921SDaniel Kurtz 				    struct device *dev)
962c68a2921SDaniel Kurtz {
963c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
96425c23255SSteven Price 	struct rk_iommu_domain *rk_domain;
965c68a2921SDaniel Kurtz 	unsigned long flags;
9663fc7c5c0SMarc Zyngier 	int ret;
967c68a2921SDaniel Kurtz 
968c68a2921SDaniel Kurtz 	/* Allow 'virtual devices' (eg drm) to detach from domain */
969c68a2921SDaniel Kurtz 	iommu = rk_iommu_from_dev(dev);
970c68a2921SDaniel Kurtz 	if (!iommu)
97125c23255SSteven Price 		return -ENODEV;
97225c23255SSteven Price 
97325c23255SSteven Price 	rk_domain = to_rk_domain(iommu->domain);
974c68a2921SDaniel Kurtz 
9750f181d3cSJeffy Chen 	dev_dbg(dev, "Detaching from iommu domain\n");
9760f181d3cSJeffy Chen 
97725c23255SSteven Price 	if (iommu->domain == identity_domain)
97825c23255SSteven Price 		return 0;
9790f181d3cSJeffy Chen 
98025c23255SSteven Price 	iommu->domain = identity_domain;
9810f181d3cSJeffy Chen 
982c68a2921SDaniel Kurtz 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
983c68a2921SDaniel Kurtz 	list_del_init(&iommu->node);
984c68a2921SDaniel Kurtz 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
985c68a2921SDaniel Kurtz 
9863fc7c5c0SMarc Zyngier 	ret = pm_runtime_get_if_in_use(iommu->dev);
9873fc7c5c0SMarc Zyngier 	WARN_ON_ONCE(ret < 0);
9883fc7c5c0SMarc Zyngier 	if (ret > 0) {
9890f181d3cSJeffy Chen 		rk_iommu_disable(iommu);
9900f181d3cSJeffy Chen 		pm_runtime_put(iommu->dev);
991cd6438c5SZhengShunQian 	}
99225c23255SSteven Price 
99325c23255SSteven Price 	return 0;
9940f181d3cSJeffy Chen }
995c68a2921SDaniel Kurtz 
99625c23255SSteven Price static struct iommu_domain_ops rk_identity_ops = {
99725c23255SSteven Price 	.attach_dev = rk_iommu_identity_attach,
99825c23255SSteven Price };
99925c23255SSteven Price 
100025c23255SSteven Price static struct iommu_domain rk_identity_domain = {
100125c23255SSteven Price 	.type = IOMMU_DOMAIN_IDENTITY,
100225c23255SSteven Price 	.ops = &rk_identity_ops,
100325c23255SSteven Price };
100425c23255SSteven Price 
rk_iommu_attach_device(struct iommu_domain * domain,struct device * dev)10050f181d3cSJeffy Chen static int rk_iommu_attach_device(struct iommu_domain *domain,
10060f181d3cSJeffy Chen 		struct device *dev)
10070f181d3cSJeffy Chen {
10080f181d3cSJeffy Chen 	struct rk_iommu *iommu;
10090f181d3cSJeffy Chen 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
10100f181d3cSJeffy Chen 	unsigned long flags;
10110f181d3cSJeffy Chen 	int ret;
1012c68a2921SDaniel Kurtz 
10130f181d3cSJeffy Chen 	/*
10140f181d3cSJeffy Chen 	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
10150f181d3cSJeffy Chen 	 * Such a device does not belong to an iommu group.
10160f181d3cSJeffy Chen 	 */
10170f181d3cSJeffy Chen 	iommu = rk_iommu_from_dev(dev);
10180f181d3cSJeffy Chen 	if (!iommu)
10190f181d3cSJeffy Chen 		return 0;
10200f181d3cSJeffy Chen 
10210f181d3cSJeffy Chen 	dev_dbg(dev, "Attaching to iommu domain\n");
10220f181d3cSJeffy Chen 
10230f181d3cSJeffy Chen 	/* iommu already attached */
10240f181d3cSJeffy Chen 	if (iommu->domain == domain)
10250f181d3cSJeffy Chen 		return 0;
10260f181d3cSJeffy Chen 
102725c23255SSteven Price 	ret = rk_iommu_identity_attach(&rk_identity_domain, dev);
102825c23255SSteven Price 	if (ret)
102925c23255SSteven Price 		return ret;
10300f181d3cSJeffy Chen 
10310f181d3cSJeffy Chen 	iommu->domain = domain;
10320f181d3cSJeffy Chen 
10330f181d3cSJeffy Chen 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
10340f181d3cSJeffy Chen 	list_add_tail(&iommu->node, &rk_domain->iommus);
10350f181d3cSJeffy Chen 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
10360f181d3cSJeffy Chen 
10373fc7c5c0SMarc Zyngier 	ret = pm_runtime_get_if_in_use(iommu->dev);
10383fc7c5c0SMarc Zyngier 	if (!ret || WARN_ON_ONCE(ret < 0))
10390f181d3cSJeffy Chen 		return 0;
10400f181d3cSJeffy Chen 
10410f181d3cSJeffy Chen 	ret = rk_iommu_enable(iommu);
10420f181d3cSJeffy Chen 	if (ret)
104325c23255SSteven Price 		WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev));
10440f181d3cSJeffy Chen 
10450f181d3cSJeffy Chen 	pm_runtime_put(iommu->dev);
10460f181d3cSJeffy Chen 
10470f181d3cSJeffy Chen 	return ret;
1048c68a2921SDaniel Kurtz }
1049c68a2921SDaniel Kurtz 
rk_iommu_domain_alloc_paging(struct device * dev)10503529375eSJason Gunthorpe static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
1051c68a2921SDaniel Kurtz {
1052c68a2921SDaniel Kurtz 	struct rk_iommu_domain *rk_domain;
1053c68a2921SDaniel Kurtz 
10549176a303SJeffy Chen 	if (!dma_dev)
1055bcd516a3SJoerg Roedel 		return NULL;
1056c68a2921SDaniel Kurtz 
105742bb97b8SEzequiel Garcia 	rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
10584f0aba67SShunqian Zheng 	if (!rk_domain)
10599176a303SJeffy Chen 		return NULL;
10604f0aba67SShunqian Zheng 
1061c68a2921SDaniel Kurtz 	/*
1062c68a2921SDaniel Kurtz 	 * rk32xx iommus use a 2 level pagetable.
1063c68a2921SDaniel Kurtz 	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
1064c68a2921SDaniel Kurtz 	 * Allocate one 4 KiB page for each table.
1065c68a2921SDaniel Kurtz 	 */
1066*5404ccaaSPasha Tatashin 	rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
1067c68a2921SDaniel Kurtz 	if (!rk_domain->dt)
1068b811a451SRobin Murphy 		goto err_free_domain;
1069c68a2921SDaniel Kurtz 
10709176a303SJeffy Chen 	rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
10714f0aba67SShunqian Zheng 					   SPAGE_SIZE, DMA_TO_DEVICE);
10729176a303SJeffy Chen 	if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
10739176a303SJeffy Chen 		dev_err(dma_dev, "DMA map error for DT\n");
10744f0aba67SShunqian Zheng 		goto err_free_dt;
10754f0aba67SShunqian Zheng 	}
10764f0aba67SShunqian Zheng 
1077c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->iommus_lock);
1078c68a2921SDaniel Kurtz 	spin_lock_init(&rk_domain->dt_lock);
1079c68a2921SDaniel Kurtz 	INIT_LIST_HEAD(&rk_domain->iommus);
1080c68a2921SDaniel Kurtz 
1081a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_start = 0;
1082a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.aperture_end   = DMA_BIT_MASK(32);
1083a93db2f2SShunqian Zheng 	rk_domain->domain.geometry.force_aperture = true;
1084a93db2f2SShunqian Zheng 
1085bcd516a3SJoerg Roedel 	return &rk_domain->domain;
1086c68a2921SDaniel Kurtz 
10874f0aba67SShunqian Zheng err_free_dt:
1088*5404ccaaSPasha Tatashin 	iommu_free_page(rk_domain->dt);
108942bb97b8SEzequiel Garcia err_free_domain:
109042bb97b8SEzequiel Garcia 	kfree(rk_domain);
10914f0aba67SShunqian Zheng 
1092bcd516a3SJoerg Roedel 	return NULL;
1093c68a2921SDaniel Kurtz }
1094c68a2921SDaniel Kurtz 
rk_iommu_domain_free(struct iommu_domain * domain)1095bcd516a3SJoerg Roedel static void rk_iommu_domain_free(struct iommu_domain *domain)
1096c68a2921SDaniel Kurtz {
1097bcd516a3SJoerg Roedel 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1098c68a2921SDaniel Kurtz 	int i;
1099c68a2921SDaniel Kurtz 
1100c68a2921SDaniel Kurtz 	WARN_ON(!list_empty(&rk_domain->iommus));
1101c68a2921SDaniel Kurtz 
1102c68a2921SDaniel Kurtz 	for (i = 0; i < NUM_DT_ENTRIES; i++) {
1103c68a2921SDaniel Kurtz 		u32 dte = rk_domain->dt[i];
1104c68a2921SDaniel Kurtz 		if (rk_dte_is_pt_valid(dte)) {
1105227014b3SBenjamin Gaignard 			phys_addr_t pt_phys = rk_ops->pt_address(dte);
1106c68a2921SDaniel Kurtz 			u32 *page_table = phys_to_virt(pt_phys);
11079176a303SJeffy Chen 			dma_unmap_single(dma_dev, pt_phys,
11084f0aba67SShunqian Zheng 					 SPAGE_SIZE, DMA_TO_DEVICE);
1109*5404ccaaSPasha Tatashin 			iommu_free_page(page_table);
1110c68a2921SDaniel Kurtz 		}
1111c68a2921SDaniel Kurtz 	}
1112c68a2921SDaniel Kurtz 
11139176a303SJeffy Chen 	dma_unmap_single(dma_dev, rk_domain->dt_dma,
11144f0aba67SShunqian Zheng 			 SPAGE_SIZE, DMA_TO_DEVICE);
1115*5404ccaaSPasha Tatashin 	iommu_free_page(rk_domain->dt);
11164f0aba67SShunqian Zheng 
111742bb97b8SEzequiel Garcia 	kfree(rk_domain);
1118c68a2921SDaniel Kurtz }
1119c68a2921SDaniel Kurtz 
rk_iommu_probe_device(struct device * dev)1120d8260443SJoerg Roedel static struct iommu_device *rk_iommu_probe_device(struct device *dev)
1121c68a2921SDaniel Kurtz {
11220f181d3cSJeffy Chen 	struct rk_iommudata *data;
1123d8260443SJoerg Roedel 	struct rk_iommu *iommu;
11240f181d3cSJeffy Chen 
11258b9cc3b7SJoerg Roedel 	data = dev_iommu_priv_get(dev);
11260f181d3cSJeffy Chen 	if (!data)
1127d8260443SJoerg Roedel 		return ERR_PTR(-ENODEV);
1128c68a2921SDaniel Kurtz 
1129c9d9f239SJoerg Roedel 	iommu = rk_iommu_from_dev(dev);
11305fd577c3SJeffy Chen 
1131ea4f6400SRafael J. Wysocki 	data->link = device_link_add(dev, iommu->dev,
1132ea4f6400SRafael J. Wysocki 				     DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1133c9d9f239SJoerg Roedel 
1134d8260443SJoerg Roedel 	return &iommu->iommu;
1135c68a2921SDaniel Kurtz }
1136c68a2921SDaniel Kurtz 
rk_iommu_release_device(struct device * dev)1137d8260443SJoerg Roedel static void rk_iommu_release_device(struct device *dev)
1138c68a2921SDaniel Kurtz {
11398b9cc3b7SJoerg Roedel 	struct rk_iommudata *data = dev_iommu_priv_get(dev);
1140c9d9f239SJoerg Roedel 
11410f181d3cSJeffy Chen 	device_link_del(data->link);
1142c68a2921SDaniel Kurtz }
1143c68a2921SDaniel Kurtz 
rk_iommu_of_xlate(struct device * dev,const struct of_phandle_args * args)11445fd577c3SJeffy Chen static int rk_iommu_of_xlate(struct device *dev,
1145b42a905bSKrzysztof Kozlowski 			     const struct of_phandle_args *args)
11465fd577c3SJeffy Chen {
11475fd577c3SJeffy Chen 	struct platform_device *iommu_dev;
11485fd577c3SJeffy Chen 	struct rk_iommudata *data;
11495fd577c3SJeffy Chen 
11505fd577c3SJeffy Chen 	data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
11515fd577c3SJeffy Chen 	if (!data)
11525fd577c3SJeffy Chen 		return -ENOMEM;
11535fd577c3SJeffy Chen 
11545fd577c3SJeffy Chen 	iommu_dev = of_find_device_by_node(args->np);
11555fd577c3SJeffy Chen 
11565fd577c3SJeffy Chen 	data->iommu = platform_get_drvdata(iommu_dev);
115725c23255SSteven Price 	data->iommu->domain = &rk_identity_domain;
11588b9cc3b7SJoerg Roedel 	dev_iommu_priv_set(dev, data);
11595fd577c3SJeffy Chen 
116040fa84e1SArnd Bergmann 	platform_device_put(iommu_dev);
11615fd577c3SJeffy Chen 
11625fd577c3SJeffy Chen 	return 0;
11635fd577c3SJeffy Chen }
11645fd577c3SJeffy Chen 
1165c68a2921SDaniel Kurtz static const struct iommu_ops rk_iommu_ops = {
1166df31b298SJason Gunthorpe 	.identity_domain = &rk_identity_domain,
11673529375eSJason Gunthorpe 	.domain_alloc_paging = rk_iommu_domain_alloc_paging,
11689a630a4bSLu Baolu 	.probe_device = rk_iommu_probe_device,
11699a630a4bSLu Baolu 	.release_device = rk_iommu_release_device,
1170ef0f48c6SJason Gunthorpe 	.device_group = generic_single_device_group,
11719a630a4bSLu Baolu 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
11729a630a4bSLu Baolu 	.of_xlate = rk_iommu_of_xlate,
11739a630a4bSLu Baolu 	.default_domain_ops = &(const struct iommu_domain_ops) {
1174c68a2921SDaniel Kurtz 		.attach_dev	= rk_iommu_attach_device,
1175d47b9777SRobin Murphy 		.map_pages	= rk_iommu_map,
1176d47b9777SRobin Murphy 		.unmap_pages	= rk_iommu_unmap,
1177c68a2921SDaniel Kurtz 		.iova_to_phys	= rk_iommu_iova_to_phys,
11789a630a4bSLu Baolu 		.free		= rk_iommu_domain_free,
11799a630a4bSLu Baolu 	}
1180c68a2921SDaniel Kurtz };
1181c68a2921SDaniel Kurtz 
rk_iommu_probe(struct platform_device * pdev)1182c68a2921SDaniel Kurtz static int rk_iommu_probe(struct platform_device *pdev)
1183c68a2921SDaniel Kurtz {
1184c68a2921SDaniel Kurtz 	struct device *dev = &pdev->dev;
1185c68a2921SDaniel Kurtz 	struct rk_iommu *iommu;
1186c68a2921SDaniel Kurtz 	struct resource *res;
1187227014b3SBenjamin Gaignard 	const struct rk_iommu_ops *ops;
11883d08f434SShunqian Zheng 	int num_res = pdev->num_resources;
1189f9258156SHeiko Stuebner 	int err, i;
1190c68a2921SDaniel Kurtz 
1191c68a2921SDaniel Kurtz 	iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1192c68a2921SDaniel Kurtz 	if (!iommu)
1193c68a2921SDaniel Kurtz 		return -ENOMEM;
1194c68a2921SDaniel Kurtz 
1195c68a2921SDaniel Kurtz 	platform_set_drvdata(pdev, iommu);
1196c68a2921SDaniel Kurtz 	iommu->dev = dev;
1197cd6438c5SZhengShunQian 	iommu->num_mmu = 0;
11983d08f434SShunqian Zheng 
1199227014b3SBenjamin Gaignard 	ops = of_device_get_match_data(dev);
1200227014b3SBenjamin Gaignard 	if (!rk_ops)
1201227014b3SBenjamin Gaignard 		rk_ops = ops;
1202227014b3SBenjamin Gaignard 
1203227014b3SBenjamin Gaignard 	/*
1204227014b3SBenjamin Gaignard 	 * That should not happen unless different versions of the
1205227014b3SBenjamin Gaignard 	 * hardware block are embedded the same SoC
1206227014b3SBenjamin Gaignard 	 */
1207227014b3SBenjamin Gaignard 	if (WARN_ON(rk_ops != ops))
1208227014b3SBenjamin Gaignard 		return -EINVAL;
1209227014b3SBenjamin Gaignard 
1210a86854d0SKees Cook 	iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1211cd6438c5SZhengShunQian 				    GFP_KERNEL);
1212cd6438c5SZhengShunQian 	if (!iommu->bases)
1213cd6438c5SZhengShunQian 		return -ENOMEM;
1214c68a2921SDaniel Kurtz 
12153d08f434SShunqian Zheng 	for (i = 0; i < num_res; i++) {
1216cd6438c5SZhengShunQian 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
12178d7f2d84STomeu Vizoso 		if (!res)
12188d7f2d84STomeu Vizoso 			continue;
1219cd6438c5SZhengShunQian 		iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1220cd6438c5SZhengShunQian 		if (IS_ERR(iommu->bases[i]))
1221cd6438c5SZhengShunQian 			continue;
1222cd6438c5SZhengShunQian 		iommu->num_mmu++;
1223cd6438c5SZhengShunQian 	}
1224cd6438c5SZhengShunQian 	if (iommu->num_mmu == 0)
1225cd6438c5SZhengShunQian 		return PTR_ERR(iommu->bases[0]);
1226c68a2921SDaniel Kurtz 
1227f9258156SHeiko Stuebner 	iommu->num_irq = platform_irq_count(pdev);
1228f9258156SHeiko Stuebner 	if (iommu->num_irq < 0)
1229f9258156SHeiko Stuebner 		return iommu->num_irq;
1230f9258156SHeiko Stuebner 
1231c3aa4742SSimon Xue 	iommu->reset_disabled = device_property_read_bool(dev,
1232c3aa4742SSimon Xue 					"rockchip,disable-mmu-reset");
1233c68a2921SDaniel Kurtz 
1234f2e3a5f5STomasz Figa 	iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1235f2e3a5f5STomasz Figa 	iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1236f2e3a5f5STomasz Figa 				     sizeof(*iommu->clocks), GFP_KERNEL);
1237f2e3a5f5STomasz Figa 	if (!iommu->clocks)
1238f2e3a5f5STomasz Figa 		return -ENOMEM;
1239f2e3a5f5STomasz Figa 
1240f2e3a5f5STomasz Figa 	for (i = 0; i < iommu->num_clocks; ++i)
1241f2e3a5f5STomasz Figa 		iommu->clocks[i].id = rk_iommu_clocks[i];
1242f2e3a5f5STomasz Figa 
12432f8c7f2eSHeiko Stuebner 	/*
12442f8c7f2eSHeiko Stuebner 	 * iommu clocks should be present for all new devices and devicetrees
12452f8c7f2eSHeiko Stuebner 	 * but there are older devicetrees without clocks out in the wild.
12462f8c7f2eSHeiko Stuebner 	 * So clocks as optional for the time being.
12472f8c7f2eSHeiko Stuebner 	 */
1248f2e3a5f5STomasz Figa 	err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
12492f8c7f2eSHeiko Stuebner 	if (err == -ENOENT)
12502f8c7f2eSHeiko Stuebner 		iommu->num_clocks = 0;
12512f8c7f2eSHeiko Stuebner 	else if (err)
1252c9d9f239SJoerg Roedel 		return err;
1253c9d9f239SJoerg Roedel 
1254f2e3a5f5STomasz Figa 	err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1255f2e3a5f5STomasz Figa 	if (err)
1256f2e3a5f5STomasz Figa 		return err;
1257f2e3a5f5STomasz Figa 
1258f2e3a5f5STomasz Figa 	err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1259f2e3a5f5STomasz Figa 	if (err)
1260ef0f48c6SJason Gunthorpe 		goto err_unprepare_clocks;
1261f2e3a5f5STomasz Figa 
12622d471b20SRobin Murphy 	err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
12636d9ffaadSJeffy Chen 	if (err)
1264f2e3a5f5STomasz Figa 		goto err_remove_sysfs;
1265c9d9f239SJoerg Roedel 
12669176a303SJeffy Chen 	/*
12679176a303SJeffy Chen 	 * Use the first registered IOMMU device for domain to use with DMA
12689176a303SJeffy Chen 	 * API, since a domain might not physically correspond to a single
12699176a303SJeffy Chen 	 * IOMMU device..
12709176a303SJeffy Chen 	 */
12719176a303SJeffy Chen 	if (!dma_dev)
12729176a303SJeffy Chen 		dma_dev = &pdev->dev;
12739176a303SJeffy Chen 
12740f181d3cSJeffy Chen 	pm_runtime_enable(dev);
12750f181d3cSJeffy Chen 
1276f9258156SHeiko Stuebner 	for (i = 0; i < iommu->num_irq; i++) {
1277f9258156SHeiko Stuebner 		int irq = platform_get_irq(pdev, i);
1278f9258156SHeiko Stuebner 
1279ec014683SChao Wang 		if (irq < 0) {
1280ec014683SChao Wang 			err = irq;
1281ec014683SChao Wang 			goto err_pm_disable;
1282ec014683SChao Wang 		}
12831aa55ca9SMarc Zyngier 
12841aa55ca9SMarc Zyngier 		err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
12851aa55ca9SMarc Zyngier 				       IRQF_SHARED, dev_name(dev), iommu);
1286ec014683SChao Wang 		if (err)
1287ec014683SChao Wang 			goto err_pm_disable;
12881aa55ca9SMarc Zyngier 	}
12891aa55ca9SMarc Zyngier 
1290227014b3SBenjamin Gaignard 	dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
1291227014b3SBenjamin Gaignard 
1292f2e3a5f5STomasz Figa 	return 0;
1293ec014683SChao Wang err_pm_disable:
1294ec014683SChao Wang 	pm_runtime_disable(dev);
1295f2e3a5f5STomasz Figa err_remove_sysfs:
1296f2e3a5f5STomasz Figa 	iommu_device_sysfs_remove(&iommu->iommu);
1297f2e3a5f5STomasz Figa err_unprepare_clocks:
1298f2e3a5f5STomasz Figa 	clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1299c9d9f239SJoerg Roedel 	return err;
1300c68a2921SDaniel Kurtz }
1301c68a2921SDaniel Kurtz 
rk_iommu_shutdown(struct platform_device * pdev)13021a4e90f2SMarc Zyngier static void rk_iommu_shutdown(struct platform_device *pdev)
13031a4e90f2SMarc Zyngier {
130474bc2abcSHeiko Stuebner 	struct rk_iommu *iommu = platform_get_drvdata(pdev);
1305f9258156SHeiko Stuebner 	int i;
130674bc2abcSHeiko Stuebner 
1307f9258156SHeiko Stuebner 	for (i = 0; i < iommu->num_irq; i++) {
1308f9258156SHeiko Stuebner 		int irq = platform_get_irq(pdev, i);
1309f9258156SHeiko Stuebner 
131074bc2abcSHeiko Stuebner 		devm_free_irq(iommu->dev, irq, iommu);
1311f9258156SHeiko Stuebner 	}
131274bc2abcSHeiko Stuebner 
13130f181d3cSJeffy Chen 	pm_runtime_force_suspend(&pdev->dev);
13140f181d3cSJeffy Chen }
13151a4e90f2SMarc Zyngier 
rk_iommu_suspend(struct device * dev)13160f181d3cSJeffy Chen static int __maybe_unused rk_iommu_suspend(struct device *dev)
13170f181d3cSJeffy Chen {
13180f181d3cSJeffy Chen 	struct rk_iommu *iommu = dev_get_drvdata(dev);
13190f181d3cSJeffy Chen 
132025c23255SSteven Price 	if (iommu->domain == &rk_identity_domain)
13210f181d3cSJeffy Chen 		return 0;
13220f181d3cSJeffy Chen 
13230f181d3cSJeffy Chen 	rk_iommu_disable(iommu);
13240f181d3cSJeffy Chen 	return 0;
13251a4e90f2SMarc Zyngier }
13260f181d3cSJeffy Chen 
rk_iommu_resume(struct device * dev)13270f181d3cSJeffy Chen static int __maybe_unused rk_iommu_resume(struct device *dev)
13280f181d3cSJeffy Chen {
13290f181d3cSJeffy Chen 	struct rk_iommu *iommu = dev_get_drvdata(dev);
13300f181d3cSJeffy Chen 
133125c23255SSteven Price 	if (iommu->domain == &rk_identity_domain)
13320f181d3cSJeffy Chen 		return 0;
13330f181d3cSJeffy Chen 
13340f181d3cSJeffy Chen 	return rk_iommu_enable(iommu);
13351a4e90f2SMarc Zyngier }
13361a4e90f2SMarc Zyngier 
13370f181d3cSJeffy Chen static const struct dev_pm_ops rk_iommu_pm_ops = {
13380f181d3cSJeffy Chen 	SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
13390f181d3cSJeffy Chen 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
13400f181d3cSJeffy Chen 				pm_runtime_force_resume)
13410f181d3cSJeffy Chen };
13420f181d3cSJeffy Chen 
1343227014b3SBenjamin Gaignard static struct rk_iommu_ops iommu_data_ops_v1 = {
1344227014b3SBenjamin Gaignard 	.pt_address = &rk_dte_pt_address,
1345227014b3SBenjamin Gaignard 	.mk_dtentries = &rk_mk_dte,
1346227014b3SBenjamin Gaignard 	.mk_ptentries = &rk_mk_pte,
1347227014b3SBenjamin Gaignard 	.dma_bit_mask = DMA_BIT_MASK(32),
13482a7e6400SJonas Karlman 	.gfp_flags = GFP_DMA32,
1349227014b3SBenjamin Gaignard };
1350227014b3SBenjamin Gaignard 
1351c55356c5SBenjamin Gaignard static struct rk_iommu_ops iommu_data_ops_v2 = {
1352c55356c5SBenjamin Gaignard 	.pt_address = &rk_dte_pt_address_v2,
1353c55356c5SBenjamin Gaignard 	.mk_dtentries = &rk_mk_dte_v2,
1354c55356c5SBenjamin Gaignard 	.mk_ptentries = &rk_mk_pte_v2,
1355c55356c5SBenjamin Gaignard 	.dma_bit_mask = DMA_BIT_MASK(40),
13562a7e6400SJonas Karlman 	.gfp_flags = 0,
1357c55356c5SBenjamin Gaignard };
1358227014b3SBenjamin Gaignard 
1359c68a2921SDaniel Kurtz static const struct of_device_id rk_iommu_dt_ids[] = {
1360227014b3SBenjamin Gaignard 	{	.compatible = "rockchip,iommu",
1361227014b3SBenjamin Gaignard 		.data = &iommu_data_ops_v1,
1362227014b3SBenjamin Gaignard 	},
1363c55356c5SBenjamin Gaignard 	{	.compatible = "rockchip,rk3568-iommu",
1364c55356c5SBenjamin Gaignard 		.data = &iommu_data_ops_v2,
1365c55356c5SBenjamin Gaignard 	},
1366c68a2921SDaniel Kurtz 	{ /* sentinel */ }
1367c68a2921SDaniel Kurtz };
1368c68a2921SDaniel Kurtz 
1369c68a2921SDaniel Kurtz static struct platform_driver rk_iommu_driver = {
1370c68a2921SDaniel Kurtz 	.probe = rk_iommu_probe,
13711a4e90f2SMarc Zyngier 	.shutdown = rk_iommu_shutdown,
1372c68a2921SDaniel Kurtz 	.driver = {
1373c68a2921SDaniel Kurtz 		   .name = "rk_iommu",
1374d9e7eb15SArnd Bergmann 		   .of_match_table = rk_iommu_dt_ids,
13750f181d3cSJeffy Chen 		   .pm = &rk_iommu_pm_ops,
137698b72b94SJeffy Chen 		   .suppress_bind_attrs = true,
1377c68a2921SDaniel Kurtz 	},
1378c68a2921SDaniel Kurtz };
13796efd3b83SRobin Murphy builtin_platform_driver(rk_iommu_driver);
1380