1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
3 // Copyright (C) 2019-2020, Cerno
4
5 #include <linux/bitfield.h>
6 #include <linux/bug.h>
7 #include <linux/clk.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/interrupt.h>
14 #include <linux/iommu.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/log2.h>
18 #include <linux/module.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/reset.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28
29 #include "iommu-pages.h"
30
31 #define IOMMU_RESET_REG 0x010
32 #define IOMMU_RESET_RELEASE_ALL 0xffffffff
33 #define IOMMU_ENABLE_REG 0x020
34 #define IOMMU_ENABLE_ENABLE BIT(0)
35
36 #define IOMMU_BYPASS_REG 0x030
37 #define IOMMU_AUTO_GATING_REG 0x040
38 #define IOMMU_AUTO_GATING_ENABLE BIT(0)
39
40 #define IOMMU_WBUF_CTRL_REG 0x044
41 #define IOMMU_OOO_CTRL_REG 0x048
42 #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
43 #define IOMMU_TTB_REG 0x050
44 #define IOMMU_TLB_ENABLE_REG 0x060
45 #define IOMMU_TLB_PREFETCH_REG 0x070
46 #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
47
48 #define IOMMU_TLB_FLUSH_REG 0x080
49 #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
50 #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
51 #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
52
53 #define IOMMU_TLB_IVLD_ADDR_REG 0x090
54 #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
55 #define IOMMU_TLB_IVLD_ENABLE_REG 0x098
56 #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
57
58 #define IOMMU_PC_IVLD_ADDR_REG 0x0a0
59 #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
60 #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
61
62 #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
63 #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
64 #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
65
66 #define IOMMU_DM_AUT_OVWT_REG 0x0d0
67 #define IOMMU_INT_ENABLE_REG 0x100
68 #define IOMMU_INT_CLR_REG 0x104
69 #define IOMMU_INT_STA_REG 0x108
70 #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
71 #define IOMMU_INT_ERR_ADDR_L1_REG 0x130
72 #define IOMMU_INT_ERR_ADDR_L2_REG 0x134
73 #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
74 #define IOMMU_L1PG_INT_REG 0x0180
75 #define IOMMU_L2PG_INT_REG 0x0184
76
77 #define IOMMU_INT_INVALID_L2PG BIT(17)
78 #define IOMMU_INT_INVALID_L1PG BIT(16)
79 #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
80 #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
81 IOMMU_INT_MASTER_PERMISSION(1) | \
82 IOMMU_INT_MASTER_PERMISSION(2) | \
83 IOMMU_INT_MASTER_PERMISSION(3) | \
84 IOMMU_INT_MASTER_PERMISSION(4) | \
85 IOMMU_INT_MASTER_PERMISSION(5))
86 #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
87 IOMMU_INT_INVALID_L2PG | \
88 IOMMU_INT_MASTER_MASK)
89
90 #define PT_ENTRY_SIZE sizeof(u32)
91
92 #define NUM_DT_ENTRIES 4096
93 #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
94
95 #define NUM_PT_ENTRIES 256
96 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
97
98 #define SPAGE_SIZE 4096
99
100 struct sun50i_iommu {
101 struct iommu_device iommu;
102
103 /* Lock to modify the IOMMU registers */
104 spinlock_t iommu_lock;
105
106 struct device *dev;
107 void __iomem *base;
108 struct reset_control *reset;
109 struct clk *clk;
110
111 struct iommu_domain *domain;
112 struct kmem_cache *pt_pool;
113 };
114
115 struct sun50i_iommu_domain {
116 struct iommu_domain domain;
117
118 /* Number of devices attached to the domain */
119 refcount_t refcnt;
120
121 /* L1 Page Table */
122 u32 *dt;
123 dma_addr_t dt_dma;
124
125 struct sun50i_iommu *iommu;
126 };
127
to_sun50i_domain(struct iommu_domain * domain)128 static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
129 {
130 return container_of(domain, struct sun50i_iommu_domain, domain);
131 }
132
sun50i_iommu_from_dev(struct device * dev)133 static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
134 {
135 return dev_iommu_priv_get(dev);
136 }
137
iommu_read(struct sun50i_iommu * iommu,u32 offset)138 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
139 {
140 return readl(iommu->base + offset);
141 }
142
iommu_write(struct sun50i_iommu * iommu,u32 offset,u32 value)143 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
144 {
145 writel(value, iommu->base + offset);
146 }
147
148 /*
149 * The Allwinner H6 IOMMU uses a 2-level page table.
150 *
151 * The first level is the usual Directory Table (DT), that consists of
152 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
153 * Table (PT).
154 *
155 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
156 * pointing to a 4kB page of physical memory.
157 *
158 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
159 * register that contains its physical address.
160 */
161
162 #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
163 #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
164 #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
165
sun50i_iova_get_dte_index(dma_addr_t iova)166 static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
167 {
168 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
169 }
170
sun50i_iova_get_pte_index(dma_addr_t iova)171 static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
172 {
173 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
174 }
175
sun50i_iova_get_page_offset(dma_addr_t iova)176 static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
177 {
178 return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
179 }
180
181 /*
182 * Each Directory Table Entry has a Page Table address and a valid
183 * bit:
184
185 * +---------------------+-----------+-+
186 * | PT address | Reserved |V|
187 * +---------------------+-----------+-+
188 * 31:10 - Page Table address
189 * 9:2 - Reserved
190 * 1:0 - 1 if the entry is valid
191 */
192
193 #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
194 #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
195 #define SUN50I_DTE_PT_VALID 1
196
sun50i_dte_get_pt_address(u32 dte)197 static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
198 {
199 return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
200 }
201
sun50i_dte_is_pt_valid(u32 dte)202 static bool sun50i_dte_is_pt_valid(u32 dte)
203 {
204 return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
205 }
206
sun50i_mk_dte(dma_addr_t pt_dma)207 static u32 sun50i_mk_dte(dma_addr_t pt_dma)
208 {
209 return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
210 }
211
212 /*
213 * Each PTE has a Page address, an authority index and a valid bit:
214 *
215 * +----------------+-----+-----+-----+---+-----+
216 * | Page address | Rsv | ACI | Rsv | V | Rsv |
217 * +----------------+-----+-----+-----+---+-----+
218 * 31:12 - Page address
219 * 11:8 - Reserved
220 * 7:4 - Authority Control Index
221 * 3:2 - Reserved
222 * 1 - 1 if the entry is valid
223 * 0 - Reserved
224 *
225 * The way permissions work is that the IOMMU has 16 "domains" that
226 * can be configured to give each masters either read or write
227 * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
228 * 0 seems like the default domain, and its permissions in the
229 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
230 * useful to enforce any particular permission.
231 *
232 * Each page entry will then have a reference to the domain they are
233 * affected to, so that we can actually enforce them on a per-page
234 * basis.
235 *
236 * In order to make it work with the IOMMU framework, we will be using
237 * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
238 * depending on the permission we want to enforce. Each domain will
239 * have each master setup in the same way, since the IOMMU framework
240 * doesn't seem to restrict page access on a per-device basis. And
241 * then we will use the relevant domain index when generating the page
242 * table entry depending on the permissions we want to be enforced.
243 */
244
245 enum sun50i_iommu_aci {
246 SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
247 SUN50I_IOMMU_ACI_NONE,
248 SUN50I_IOMMU_ACI_RD,
249 SUN50I_IOMMU_ACI_WR,
250 SUN50I_IOMMU_ACI_RD_WR,
251 };
252
253 #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
254 #define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
255 #define SUN50I_PTE_PAGE_VALID BIT(1)
256
sun50i_pte_get_page_address(u32 pte)257 static phys_addr_t sun50i_pte_get_page_address(u32 pte)
258 {
259 return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
260 }
261
sun50i_get_pte_aci(u32 pte)262 static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
263 {
264 return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
265 }
266
sun50i_pte_is_page_valid(u32 pte)267 static bool sun50i_pte_is_page_valid(u32 pte)
268 {
269 return pte & SUN50I_PTE_PAGE_VALID;
270 }
271
sun50i_mk_pte(phys_addr_t page,int prot)272 static u32 sun50i_mk_pte(phys_addr_t page, int prot)
273 {
274 enum sun50i_iommu_aci aci;
275 u32 flags = 0;
276
277 if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE))
278 aci = SUN50I_IOMMU_ACI_RD_WR;
279 else if (prot & IOMMU_READ)
280 aci = SUN50I_IOMMU_ACI_RD;
281 else if (prot & IOMMU_WRITE)
282 aci = SUN50I_IOMMU_ACI_WR;
283 else
284 aci = SUN50I_IOMMU_ACI_NONE;
285
286 flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
287 page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
288 return page | flags | SUN50I_PTE_PAGE_VALID;
289 }
290
sun50i_table_flush(struct sun50i_iommu_domain * sun50i_domain,void * vaddr,unsigned int count)291 static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
292 void *vaddr, unsigned int count)
293 {
294 struct sun50i_iommu *iommu = sun50i_domain->iommu;
295 dma_addr_t dma = virt_to_phys(vaddr);
296 size_t size = count * PT_ENTRY_SIZE;
297
298 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
299 }
300
sun50i_iommu_zap_iova(struct sun50i_iommu * iommu,unsigned long iova)301 static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
302 unsigned long iova)
303 {
304 u32 reg;
305 int ret;
306
307 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
308 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
309 iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
310 IOMMU_TLB_IVLD_ENABLE_ENABLE);
311
312 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
313 reg, !reg, 1, 2000);
314 if (ret)
315 dev_warn(iommu->dev, "TLB invalidation timed out!\n");
316 }
317
sun50i_iommu_zap_ptw_cache(struct sun50i_iommu * iommu,unsigned long iova)318 static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
319 unsigned long iova)
320 {
321 u32 reg;
322 int ret;
323
324 iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
325 iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
326 IOMMU_PC_IVLD_ENABLE_ENABLE);
327
328 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
329 reg, !reg, 1, 2000);
330 if (ret)
331 dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
332 }
333
sun50i_iommu_zap_range(struct sun50i_iommu * iommu,unsigned long iova,size_t size)334 static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
335 unsigned long iova, size_t size)
336 {
337 assert_spin_locked(&iommu->iommu_lock);
338
339 iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
340
341 sun50i_iommu_zap_iova(iommu, iova);
342 sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
343 if (size > SPAGE_SIZE) {
344 sun50i_iommu_zap_iova(iommu, iova + size);
345 sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
346 }
347 sun50i_iommu_zap_ptw_cache(iommu, iova);
348 sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
349 if (size > SZ_1M) {
350 sun50i_iommu_zap_ptw_cache(iommu, iova + size);
351 sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
352 }
353
354 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
355 }
356
sun50i_iommu_flush_all_tlb(struct sun50i_iommu * iommu)357 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
358 {
359 u32 reg;
360 int ret;
361
362 assert_spin_locked(&iommu->iommu_lock);
363
364 iommu_write(iommu,
365 IOMMU_TLB_FLUSH_REG,
366 IOMMU_TLB_FLUSH_PTW_CACHE |
367 IOMMU_TLB_FLUSH_MACRO_TLB |
368 IOMMU_TLB_FLUSH_MICRO_TLB(5) |
369 IOMMU_TLB_FLUSH_MICRO_TLB(4) |
370 IOMMU_TLB_FLUSH_MICRO_TLB(3) |
371 IOMMU_TLB_FLUSH_MICRO_TLB(2) |
372 IOMMU_TLB_FLUSH_MICRO_TLB(1) |
373 IOMMU_TLB_FLUSH_MICRO_TLB(0));
374
375 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
376 reg, !reg,
377 1, 2000);
378 if (ret)
379 dev_warn(iommu->dev, "TLB Flush timed out!\n");
380
381 return ret;
382 }
383
sun50i_iommu_flush_iotlb_all(struct iommu_domain * domain)384 static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
385 {
386 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
387 struct sun50i_iommu *iommu = sun50i_domain->iommu;
388 unsigned long flags;
389
390 /*
391 * At boot, we'll have a first call into .flush_iotlb_all right after
392 * .probe_device, and since we link our (single) domain to our iommu in
393 * the .attach_device callback, we don't have that pointer set.
394 *
395 * It shouldn't really be any trouble to ignore it though since we flush
396 * all caches as part of the device powerup.
397 */
398 if (!iommu)
399 return;
400
401 spin_lock_irqsave(&iommu->iommu_lock, flags);
402 sun50i_iommu_flush_all_tlb(iommu);
403 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
404 }
405
sun50i_iommu_iotlb_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)406 static int sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
407 unsigned long iova, size_t size)
408 {
409 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
410 struct sun50i_iommu *iommu = sun50i_domain->iommu;
411 unsigned long flags;
412
413 spin_lock_irqsave(&iommu->iommu_lock, flags);
414 sun50i_iommu_zap_range(iommu, iova, size);
415 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
416
417 return 0;
418 }
419
sun50i_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)420 static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
421 struct iommu_iotlb_gather *gather)
422 {
423 sun50i_iommu_flush_iotlb_all(domain);
424 }
425
sun50i_iommu_enable(struct sun50i_iommu * iommu)426 static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
427 {
428 struct sun50i_iommu_domain *sun50i_domain;
429 unsigned long flags;
430 int ret;
431
432 if (!iommu->domain)
433 return 0;
434
435 sun50i_domain = to_sun50i_domain(iommu->domain);
436
437 ret = reset_control_deassert(iommu->reset);
438 if (ret)
439 return ret;
440
441 ret = clk_prepare_enable(iommu->clk);
442 if (ret)
443 goto err_reset_assert;
444
445 spin_lock_irqsave(&iommu->iommu_lock, flags);
446
447 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
448 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
449 IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
450 IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
451 IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
452 IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
453 IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
454 IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
455 iommu_write(iommu, IOMMU_BYPASS_REG, 0);
456 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
457 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
458 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
459 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
460 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
461 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
462 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
463 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
464 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
465 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
466 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
467 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
468 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
469 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
470
471 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
472 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
473 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
474 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
475 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
476 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
477 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
478
479 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
480 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
481 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
482 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
483 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
484 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
485 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
486
487 ret = sun50i_iommu_flush_all_tlb(iommu);
488 if (ret) {
489 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
490 goto err_clk_disable;
491 }
492
493 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
494 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
495
496 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
497
498 return 0;
499
500 err_clk_disable:
501 clk_disable_unprepare(iommu->clk);
502
503 err_reset_assert:
504 reset_control_assert(iommu->reset);
505
506 return ret;
507 }
508
sun50i_iommu_disable(struct sun50i_iommu * iommu)509 static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
510 {
511 unsigned long flags;
512
513 spin_lock_irqsave(&iommu->iommu_lock, flags);
514
515 iommu_write(iommu, IOMMU_ENABLE_REG, 0);
516 iommu_write(iommu, IOMMU_TTB_REG, 0);
517
518 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
519
520 clk_disable_unprepare(iommu->clk);
521 reset_control_assert(iommu->reset);
522 }
523
sun50i_iommu_alloc_page_table(struct sun50i_iommu * iommu,gfp_t gfp)524 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
525 gfp_t gfp)
526 {
527 dma_addr_t pt_dma;
528 u32 *page_table;
529
530 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
531 if (!page_table)
532 return ERR_PTR(-ENOMEM);
533
534 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
535 if (dma_mapping_error(iommu->dev, pt_dma)) {
536 dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
537 kmem_cache_free(iommu->pt_pool, page_table);
538 return ERR_PTR(-ENOMEM);
539 }
540
541 /* We rely on the physical address and DMA address being the same */
542 WARN_ON(pt_dma != virt_to_phys(page_table));
543
544 return page_table;
545 }
546
sun50i_iommu_free_page_table(struct sun50i_iommu * iommu,u32 * page_table)547 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
548 u32 *page_table)
549 {
550 phys_addr_t pt_phys = virt_to_phys(page_table);
551
552 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
553 kmem_cache_free(iommu->pt_pool, page_table);
554 }
555
sun50i_dte_get_page_table(struct sun50i_iommu_domain * sun50i_domain,dma_addr_t iova,gfp_t gfp)556 static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
557 dma_addr_t iova, gfp_t gfp)
558 {
559 struct sun50i_iommu *iommu = sun50i_domain->iommu;
560 u32 *page_table;
561 u32 *dte_addr;
562 u32 old_dte;
563 u32 dte;
564
565 dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
566 dte = *dte_addr;
567 if (sun50i_dte_is_pt_valid(dte)) {
568 phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
569 return (u32 *)phys_to_virt(pt_phys);
570 }
571
572 page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
573 if (IS_ERR(page_table))
574 return page_table;
575
576 dte = sun50i_mk_dte(virt_to_phys(page_table));
577 old_dte = cmpxchg(dte_addr, 0, dte);
578 if (old_dte) {
579 phys_addr_t installed_pt_phys =
580 sun50i_dte_get_pt_address(old_dte);
581 u32 *installed_pt = phys_to_virt(installed_pt_phys);
582 u32 *drop_pt = page_table;
583
584 page_table = installed_pt;
585 dte = old_dte;
586 sun50i_iommu_free_page_table(iommu, drop_pt);
587 }
588
589 sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
590 sun50i_table_flush(sun50i_domain, dte_addr, 1);
591
592 return page_table;
593 }
594
sun50i_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,size_t count,int prot,gfp_t gfp,size_t * mapped)595 static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
596 phys_addr_t paddr, size_t size, size_t count,
597 int prot, gfp_t gfp, size_t *mapped)
598 {
599 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
600 struct sun50i_iommu *iommu = sun50i_domain->iommu;
601 u32 pte_index;
602 u32 *page_table, *pte_addr;
603 int ret = 0;
604
605 /* the IOMMU can only handle 32-bit addresses, both input and output */
606 if ((uint64_t)paddr >> 32) {
607 ret = -EINVAL;
608 dev_warn_once(iommu->dev,
609 "attempt to map address beyond 4GB\n");
610 goto out;
611 }
612
613 page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
614 if (IS_ERR(page_table)) {
615 ret = PTR_ERR(page_table);
616 goto out;
617 }
618
619 pte_index = sun50i_iova_get_pte_index(iova);
620 pte_addr = &page_table[pte_index];
621 if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
622 phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
623 dev_err(iommu->dev,
624 "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
625 &iova, &page_phys, &paddr, prot);
626 ret = -EBUSY;
627 goto out;
628 }
629
630 *pte_addr = sun50i_mk_pte(paddr, prot);
631 sun50i_table_flush(sun50i_domain, pte_addr, 1);
632 *mapped = size;
633
634 out:
635 return ret;
636 }
637
sun50i_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,size_t count,struct iommu_iotlb_gather * gather)638 static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
639 size_t size, size_t count, struct iommu_iotlb_gather *gather)
640 {
641 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
642 phys_addr_t pt_phys;
643 u32 *pte_addr;
644 u32 dte;
645
646 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
647 if (!sun50i_dte_is_pt_valid(dte))
648 return 0;
649
650 pt_phys = sun50i_dte_get_pt_address(dte);
651 pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
652
653 if (!sun50i_pte_is_page_valid(*pte_addr))
654 return 0;
655
656 memset(pte_addr, 0, sizeof(*pte_addr));
657 sun50i_table_flush(sun50i_domain, pte_addr, 1);
658
659 return SZ_4K;
660 }
661
sun50i_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)662 static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
663 dma_addr_t iova)
664 {
665 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
666 phys_addr_t pt_phys;
667 u32 *page_table;
668 u32 dte, pte;
669
670 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
671 if (!sun50i_dte_is_pt_valid(dte))
672 return 0;
673
674 pt_phys = sun50i_dte_get_pt_address(dte);
675 page_table = (u32 *)phys_to_virt(pt_phys);
676 pte = page_table[sun50i_iova_get_pte_index(iova)];
677 if (!sun50i_pte_is_page_valid(pte))
678 return 0;
679
680 return sun50i_pte_get_page_address(pte) +
681 sun50i_iova_get_page_offset(iova);
682 }
683
684 static struct iommu_domain *
sun50i_iommu_domain_alloc_paging(struct device * dev)685 sun50i_iommu_domain_alloc_paging(struct device *dev)
686 {
687 struct sun50i_iommu_domain *sun50i_domain;
688
689 sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
690 if (!sun50i_domain)
691 return NULL;
692
693 sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
694 get_order(DT_SIZE));
695 if (!sun50i_domain->dt)
696 goto err_free_domain;
697
698 refcount_set(&sun50i_domain->refcnt, 1);
699
700 sun50i_domain->domain.geometry.aperture_start = 0;
701 sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
702 sun50i_domain->domain.geometry.force_aperture = true;
703
704 return &sun50i_domain->domain;
705
706 err_free_domain:
707 kfree(sun50i_domain);
708
709 return NULL;
710 }
711
sun50i_iommu_domain_free(struct iommu_domain * domain)712 static void sun50i_iommu_domain_free(struct iommu_domain *domain)
713 {
714 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
715
716 iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
717 sun50i_domain->dt = NULL;
718
719 kfree(sun50i_domain);
720 }
721
sun50i_iommu_attach_domain(struct sun50i_iommu * iommu,struct sun50i_iommu_domain * sun50i_domain)722 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
723 struct sun50i_iommu_domain *sun50i_domain)
724 {
725 iommu->domain = &sun50i_domain->domain;
726 sun50i_domain->iommu = iommu;
727
728 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
729 DT_SIZE, DMA_TO_DEVICE);
730 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
731 dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
732 return -ENOMEM;
733 }
734
735 return sun50i_iommu_enable(iommu);
736 }
737
sun50i_iommu_detach_domain(struct sun50i_iommu * iommu,struct sun50i_iommu_domain * sun50i_domain)738 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
739 struct sun50i_iommu_domain *sun50i_domain)
740 {
741 unsigned int i;
742
743 for (i = 0; i < NUM_DT_ENTRIES; i++) {
744 phys_addr_t pt_phys;
745 u32 *page_table;
746 u32 *dte_addr;
747 u32 dte;
748
749 dte_addr = &sun50i_domain->dt[i];
750 dte = *dte_addr;
751 if (!sun50i_dte_is_pt_valid(dte))
752 continue;
753
754 memset(dte_addr, 0, sizeof(*dte_addr));
755 sun50i_table_flush(sun50i_domain, dte_addr, 1);
756
757 pt_phys = sun50i_dte_get_pt_address(dte);
758 page_table = phys_to_virt(pt_phys);
759 sun50i_iommu_free_page_table(iommu, page_table);
760 }
761
762
763 sun50i_iommu_disable(iommu);
764
765 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
766 DT_SIZE, DMA_TO_DEVICE);
767
768 iommu->domain = NULL;
769 }
770
sun50i_iommu_identity_attach(struct iommu_domain * identity_domain,struct device * dev)771 static int sun50i_iommu_identity_attach(struct iommu_domain *identity_domain,
772 struct device *dev)
773 {
774 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
775 struct sun50i_iommu_domain *sun50i_domain;
776
777 dev_dbg(dev, "Detaching from IOMMU domain\n");
778
779 if (iommu->domain == identity_domain)
780 return 0;
781
782 sun50i_domain = to_sun50i_domain(iommu->domain);
783 if (refcount_dec_and_test(&sun50i_domain->refcnt))
784 sun50i_iommu_detach_domain(iommu, sun50i_domain);
785 return 0;
786 }
787
788 static struct iommu_domain_ops sun50i_iommu_identity_ops = {
789 .attach_dev = sun50i_iommu_identity_attach,
790 };
791
792 static struct iommu_domain sun50i_iommu_identity_domain = {
793 .type = IOMMU_DOMAIN_IDENTITY,
794 .ops = &sun50i_iommu_identity_ops,
795 };
796
sun50i_iommu_attach_device(struct iommu_domain * domain,struct device * dev)797 static int sun50i_iommu_attach_device(struct iommu_domain *domain,
798 struct device *dev)
799 {
800 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
801 struct sun50i_iommu *iommu;
802
803 iommu = sun50i_iommu_from_dev(dev);
804 if (!iommu)
805 return -ENODEV;
806
807 dev_dbg(dev, "Attaching to IOMMU domain\n");
808
809 refcount_inc(&sun50i_domain->refcnt);
810
811 if (iommu->domain == domain)
812 return 0;
813
814 sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev);
815
816 sun50i_iommu_attach_domain(iommu, sun50i_domain);
817
818 return 0;
819 }
820
sun50i_iommu_probe_device(struct device * dev)821 static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
822 {
823 struct sun50i_iommu *iommu;
824
825 iommu = sun50i_iommu_from_dev(dev);
826 if (!iommu)
827 return ERR_PTR(-ENODEV);
828
829 return &iommu->iommu;
830 }
831
sun50i_iommu_of_xlate(struct device * dev,const struct of_phandle_args * args)832 static int sun50i_iommu_of_xlate(struct device *dev,
833 const struct of_phandle_args *args)
834 {
835 struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
836 unsigned id = args->args[0];
837
838 dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
839
840 return iommu_fwspec_add_ids(dev, &id, 1);
841 }
842
843 static const struct iommu_ops sun50i_iommu_ops = {
844 .identity_domain = &sun50i_iommu_identity_domain,
845 .pgsize_bitmap = SZ_4K,
846 .device_group = generic_single_device_group,
847 .domain_alloc_paging = sun50i_iommu_domain_alloc_paging,
848 .of_xlate = sun50i_iommu_of_xlate,
849 .probe_device = sun50i_iommu_probe_device,
850 .default_domain_ops = &(const struct iommu_domain_ops) {
851 .attach_dev = sun50i_iommu_attach_device,
852 .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
853 .iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
854 .iotlb_sync = sun50i_iommu_iotlb_sync,
855 .iova_to_phys = sun50i_iommu_iova_to_phys,
856 .map_pages = sun50i_iommu_map,
857 .unmap_pages = sun50i_iommu_unmap,
858 .free = sun50i_iommu_domain_free,
859 }
860 };
861
sun50i_iommu_report_fault(struct sun50i_iommu * iommu,unsigned master,phys_addr_t iova,unsigned prot)862 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
863 unsigned master, phys_addr_t iova,
864 unsigned prot)
865 {
866 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
867 &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
868
869 if (iommu->domain)
870 report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
871 else
872 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
873
874 sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
875 }
876
sun50i_iommu_handle_pt_irq(struct sun50i_iommu * iommu,unsigned addr_reg,unsigned blame_reg)877 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
878 unsigned addr_reg,
879 unsigned blame_reg)
880 {
881 phys_addr_t iova;
882 unsigned master;
883 u32 blame;
884
885 assert_spin_locked(&iommu->iommu_lock);
886
887 iova = iommu_read(iommu, addr_reg);
888 blame = iommu_read(iommu, blame_reg);
889 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
890
891 /*
892 * If the address is not in the page table, we can't get what
893 * operation triggered the fault. Assume it's a read
894 * operation.
895 */
896 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
897
898 return iova;
899 }
900
sun50i_iommu_handle_perm_irq(struct sun50i_iommu * iommu)901 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
902 {
903 enum sun50i_iommu_aci aci;
904 phys_addr_t iova;
905 unsigned master;
906 unsigned dir;
907 u32 blame;
908
909 assert_spin_locked(&iommu->iommu_lock);
910
911 blame = iommu_read(iommu, IOMMU_INT_STA_REG);
912 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
913 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
914 aci = sun50i_get_pte_aci(iommu_read(iommu,
915 IOMMU_INT_ERR_DATA_REG(master)));
916
917 switch (aci) {
918 /*
919 * If we are in the read-only domain, then it means we
920 * tried to write.
921 */
922 case SUN50I_IOMMU_ACI_RD:
923 dir = IOMMU_FAULT_WRITE;
924 break;
925
926 /*
927 * If we are in the write-only domain, then it means
928 * we tried to read.
929 */
930 case SUN50I_IOMMU_ACI_WR:
931
932 /*
933 * If we are in the domain without any permission, we
934 * can't really tell. Let's default to a read
935 * operation.
936 */
937 case SUN50I_IOMMU_ACI_NONE:
938
939 /* WTF? */
940 case SUN50I_IOMMU_ACI_RD_WR:
941 default:
942 dir = IOMMU_FAULT_READ;
943 break;
944 }
945
946 /*
947 * If the address is not in the page table, we can't get what
948 * operation triggered the fault. Assume it's a read
949 * operation.
950 */
951 sun50i_iommu_report_fault(iommu, master, iova, dir);
952
953 return iova;
954 }
955
sun50i_iommu_irq(int irq,void * dev_id)956 static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
957 {
958 u32 status, l1_status, l2_status, resets;
959 struct sun50i_iommu *iommu = dev_id;
960
961 spin_lock(&iommu->iommu_lock);
962
963 status = iommu_read(iommu, IOMMU_INT_STA_REG);
964 if (!(status & IOMMU_INT_MASK)) {
965 spin_unlock(&iommu->iommu_lock);
966 return IRQ_NONE;
967 }
968
969 l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
970 l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
971
972 if (status & IOMMU_INT_INVALID_L2PG)
973 sun50i_iommu_handle_pt_irq(iommu,
974 IOMMU_INT_ERR_ADDR_L2_REG,
975 IOMMU_L2PG_INT_REG);
976 else if (status & IOMMU_INT_INVALID_L1PG)
977 sun50i_iommu_handle_pt_irq(iommu,
978 IOMMU_INT_ERR_ADDR_L1_REG,
979 IOMMU_L1PG_INT_REG);
980 else
981 sun50i_iommu_handle_perm_irq(iommu);
982
983 iommu_write(iommu, IOMMU_INT_CLR_REG, status);
984
985 resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK;
986 iommu_write(iommu, IOMMU_RESET_REG, ~resets);
987 iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
988
989 spin_unlock(&iommu->iommu_lock);
990
991 return IRQ_HANDLED;
992 }
993
sun50i_iommu_probe(struct platform_device * pdev)994 static int sun50i_iommu_probe(struct platform_device *pdev)
995 {
996 struct sun50i_iommu *iommu;
997 int ret, irq;
998
999 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
1000 if (!iommu)
1001 return -ENOMEM;
1002 spin_lock_init(&iommu->iommu_lock);
1003 iommu->domain = &sun50i_iommu_identity_domain;
1004 platform_set_drvdata(pdev, iommu);
1005 iommu->dev = &pdev->dev;
1006
1007 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
1008 PT_SIZE, PT_SIZE,
1009 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA32,
1010 NULL);
1011 if (!iommu->pt_pool)
1012 return -ENOMEM;
1013
1014 iommu->base = devm_platform_ioremap_resource(pdev, 0);
1015 if (IS_ERR(iommu->base)) {
1016 ret = PTR_ERR(iommu->base);
1017 goto err_free_cache;
1018 }
1019
1020 irq = platform_get_irq(pdev, 0);
1021 if (irq < 0) {
1022 ret = irq;
1023 goto err_free_cache;
1024 }
1025
1026 iommu->clk = devm_clk_get(&pdev->dev, NULL);
1027 if (IS_ERR(iommu->clk)) {
1028 dev_err(&pdev->dev, "Couldn't get our clock.\n");
1029 ret = PTR_ERR(iommu->clk);
1030 goto err_free_cache;
1031 }
1032
1033 iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
1034 if (IS_ERR(iommu->reset)) {
1035 dev_err(&pdev->dev, "Couldn't get our reset line.\n");
1036 ret = PTR_ERR(iommu->reset);
1037 goto err_free_cache;
1038 }
1039
1040 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
1041 NULL, dev_name(&pdev->dev));
1042 if (ret)
1043 goto err_free_cache;
1044
1045 ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
1046 if (ret)
1047 goto err_remove_sysfs;
1048
1049 ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
1050 dev_name(&pdev->dev), iommu);
1051 if (ret < 0)
1052 goto err_unregister;
1053
1054 return 0;
1055
1056 err_unregister:
1057 iommu_device_unregister(&iommu->iommu);
1058
1059 err_remove_sysfs:
1060 iommu_device_sysfs_remove(&iommu->iommu);
1061
1062 err_free_cache:
1063 kmem_cache_destroy(iommu->pt_pool);
1064
1065 return ret;
1066 }
1067
1068 static const struct of_device_id sun50i_iommu_dt[] = {
1069 { .compatible = "allwinner,sun50i-h6-iommu", },
1070 { .compatible = "allwinner,sun50i-h616-iommu", },
1071 { /* sentinel */ },
1072 };
1073 MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
1074
1075 static struct platform_driver sun50i_iommu_driver = {
1076 .driver = {
1077 .name = "sun50i-iommu",
1078 .of_match_table = sun50i_iommu_dt,
1079 .suppress_bind_attrs = true,
1080 }
1081 };
1082 builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
1083
1084 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
1085 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
1086 MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
1087