xref: /linux/drivers/iommu/mtk_iommu_v1.c (revision 336b4dae6dfecc9aa53a3a68c71b9c1c1d466388)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * IOMMU API for MTK architected m4u v1 implementations
4  *
5  * Copyright (c) 2015-2016 MediaTek Inc.
6  * Author: Honghui Zhang <honghui.zhang@mediatek.com>
7  *
8  * Based on driver/iommu/mtk_iommu.c
9  */
10 #include <linux/bug.h>
11 #include <linux/clk.h>
12 #include <linux/component.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/iommu.h>
19 #include <linux/iopoll.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/string_choices.h>
29 #include <asm/barrier.h>
30 #include <dt-bindings/memory/mtk-memory-port.h>
31 #include <dt-bindings/memory/mt2701-larb-port.h>
32 #include <soc/mediatek/smi.h>
33 
34 #if defined(CONFIG_ARM)
35 #include <asm/dma-iommu.h>
36 #else
37 #define arm_iommu_create_mapping(...) NULL
38 #define arm_iommu_attach_device(...)	-ENODEV
39 struct dma_iommu_mapping {
40 	struct iommu_domain *domain;
41 };
42 #endif
43 
44 #define REG_MMU_PT_BASE_ADDR			0x000
45 
46 #define F_ALL_INVLD				0x2
47 #define F_MMU_INV_RANGE				0x1
48 #define F_INVLD_EN0				BIT(0)
49 #define F_INVLD_EN1				BIT(1)
50 
51 #define F_MMU_FAULT_VA_MSK			0xfffff000
52 #define MTK_PROTECT_PA_ALIGN			128
53 
54 #define REG_MMU_CTRL_REG			0x210
55 #define F_MMU_CTRL_COHERENT_EN			BIT(8)
56 #define REG_MMU_IVRP_PADDR			0x214
57 #define REG_MMU_INT_CONTROL			0x220
58 #define F_INT_TRANSLATION_FAULT			BIT(0)
59 #define F_INT_MAIN_MULTI_HIT_FAULT		BIT(1)
60 #define F_INT_INVALID_PA_FAULT			BIT(2)
61 #define F_INT_ENTRY_REPLACEMENT_FAULT		BIT(3)
62 #define F_INT_TABLE_WALK_FAULT			BIT(4)
63 #define F_INT_TLB_MISS_FAULT			BIT(5)
64 #define F_INT_PFH_DMA_FIFO_OVERFLOW		BIT(6)
65 #define F_INT_MISS_DMA_FIFO_OVERFLOW		BIT(7)
66 
67 #define F_MMU_TF_PROTECT_SEL(prot)		(((prot) & 0x3) << 5)
68 #define F_INT_CLR_BIT				BIT(12)
69 
70 #define REG_MMU_FAULT_ST			0x224
71 #define REG_MMU_FAULT_VA			0x228
72 #define REG_MMU_INVLD_PA			0x22C
73 #define REG_MMU_INT_ID				0x388
74 #define REG_MMU_INVALIDATE			0x5c0
75 #define REG_MMU_INVLD_START_A			0x5c4
76 #define REG_MMU_INVLD_END_A			0x5c8
77 
78 #define REG_MMU_INV_SEL				0x5d8
79 #define REG_MMU_STANDARD_AXI_MODE		0x5e8
80 
81 #define REG_MMU_DCM				0x5f0
82 #define F_MMU_DCM_ON				BIT(1)
83 #define REG_MMU_CPE_DONE			0x60c
84 #define F_DESC_VALID				0x2
85 #define F_DESC_NONSEC				BIT(3)
86 #define MT2701_M4U_TF_LARB(TF)			(6 - (((TF) >> 13) & 0x7))
87 #define MT2701_M4U_TF_PORT(TF)			(((TF) >> 8) & 0xF)
88 /* MTK generation one iommu HW only support 4K size mapping */
89 #define MT2701_IOMMU_PAGE_SHIFT			12
90 #define MT2701_IOMMU_PAGE_SIZE			(1UL << MT2701_IOMMU_PAGE_SHIFT)
91 #define MT2701_LARB_NR_MAX			3
92 
93 /*
94  * MTK m4u support 4GB iova address space, and only support 4K page
95  * mapping. So the pagetable size should be exactly as 4M.
96  */
97 #define M2701_IOMMU_PGT_SIZE			SZ_4M
98 
99 struct mtk_iommu_v1_suspend_reg {
100 	u32			standard_axi_mode;
101 	u32			dcm_dis;
102 	u32			ctrl_reg;
103 	u32			int_control0;
104 };
105 
106 struct mtk_iommu_v1_data {
107 	void __iomem			*base;
108 	int				irq;
109 	struct device			*dev;
110 	struct clk			*bclk;
111 	phys_addr_t			protect_base; /* protect memory base */
112 	struct mtk_iommu_v1_domain	*m4u_dom;
113 
114 	struct iommu_device		iommu;
115 	struct dma_iommu_mapping	*mapping;
116 	struct mtk_smi_larb_iommu	larb_imu[MTK_LARB_NR_MAX];
117 
118 	struct mtk_iommu_v1_suspend_reg	reg;
119 };
120 
121 struct mtk_iommu_v1_domain {
122 	spinlock_t			pgtlock; /* lock for page table */
123 	struct iommu_domain		domain;
124 	u32				*pgt_va;
125 	dma_addr_t			pgt_pa;
126 	struct mtk_iommu_v1_data	*data;
127 };
128 
mtk_iommu_v1_bind(struct device * dev)129 static int mtk_iommu_v1_bind(struct device *dev)
130 {
131 	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
132 
133 	return component_bind_all(dev, &data->larb_imu);
134 }
135 
mtk_iommu_v1_unbind(struct device * dev)136 static void mtk_iommu_v1_unbind(struct device *dev)
137 {
138 	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
139 
140 	component_unbind_all(dev, &data->larb_imu);
141 }
142 
to_mtk_domain(struct iommu_domain * dom)143 static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
144 {
145 	return container_of(dom, struct mtk_iommu_v1_domain, domain);
146 }
147 
148 static const int mt2701_m4u_in_larb[] = {
149 	LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
150 	LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
151 };
152 
mt2701_m4u_to_larb(int id)153 static inline int mt2701_m4u_to_larb(int id)
154 {
155 	int i;
156 
157 	for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
158 		if ((id) >= mt2701_m4u_in_larb[i])
159 			return i;
160 
161 	return 0;
162 }
163 
mt2701_m4u_to_port(int id)164 static inline int mt2701_m4u_to_port(int id)
165 {
166 	int larb = mt2701_m4u_to_larb(id);
167 
168 	return id - mt2701_m4u_in_larb[larb];
169 }
170 
mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data * data)171 static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
172 {
173 	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
174 			data->base + REG_MMU_INV_SEL);
175 	writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
176 	wmb(); /* Make sure the tlb flush all done */
177 }
178 
mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data * data,unsigned long iova,size_t size)179 static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
180 					 unsigned long iova, size_t size)
181 {
182 	int ret;
183 	u32 tmp;
184 
185 	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
186 		data->base + REG_MMU_INV_SEL);
187 	writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
188 		data->base + REG_MMU_INVLD_START_A);
189 	writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
190 		data->base + REG_MMU_INVLD_END_A);
191 	writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
192 
193 	ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
194 				tmp, tmp != 0, 10, 100000);
195 	if (ret) {
196 		dev_warn(data->dev,
197 			 "Partial TLB flush timed out, falling back to full flush\n");
198 		mtk_iommu_v1_tlb_flush_all(data);
199 	}
200 	/* Clear the CPE status */
201 	writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
202 }
203 
mtk_iommu_v1_isr(int irq,void * dev_id)204 static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
205 {
206 	struct mtk_iommu_v1_data *data = dev_id;
207 	struct mtk_iommu_v1_domain *dom = data->m4u_dom;
208 	u32 int_state, regval, fault_iova, fault_pa;
209 	unsigned int fault_larb, fault_port;
210 
211 	/* Read error information from registers */
212 	int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
213 	fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
214 
215 	fault_iova &= F_MMU_FAULT_VA_MSK;
216 	fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
217 	regval = readl_relaxed(data->base + REG_MMU_INT_ID);
218 	fault_larb = MT2701_M4U_TF_LARB(regval);
219 	fault_port = MT2701_M4U_TF_PORT(regval);
220 
221 	/*
222 	 * MTK v1 iommu HW could not determine whether the fault is read or
223 	 * write fault, report as read fault.
224 	 */
225 	if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
226 			IOMMU_FAULT_READ))
227 		dev_err_ratelimited(data->dev,
228 			"fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
229 			int_state, fault_iova, fault_pa,
230 			fault_larb, fault_port);
231 
232 	/* Interrupt clear */
233 	regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
234 	regval |= F_INT_CLR_BIT;
235 	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
236 
237 	mtk_iommu_v1_tlb_flush_all(data);
238 
239 	return IRQ_HANDLED;
240 }
241 
mtk_iommu_v1_config(struct mtk_iommu_v1_data * data,struct device * dev,bool enable)242 static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
243 				struct device *dev, bool enable)
244 {
245 	struct mtk_smi_larb_iommu    *larb_mmu;
246 	unsigned int                 larbid, portid;
247 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
248 	int i;
249 
250 	for (i = 0; i < fwspec->num_ids; ++i) {
251 		larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
252 		portid = mt2701_m4u_to_port(fwspec->ids[i]);
253 		larb_mmu = &data->larb_imu[larbid];
254 
255 		dev_dbg(dev, "%s iommu port: %d\n",
256 			str_enable_disable(enable), portid);
257 
258 		if (enable)
259 			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
260 		else
261 			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
262 	}
263 }
264 
mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data * data)265 static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
266 {
267 	struct mtk_iommu_v1_domain *dom = data->m4u_dom;
268 
269 	spin_lock_init(&dom->pgtlock);
270 
271 	dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
272 					 &dom->pgt_pa, GFP_KERNEL);
273 	if (!dom->pgt_va)
274 		return -ENOMEM;
275 
276 	writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
277 
278 	dom->data = data;
279 
280 	return 0;
281 }
282 
mtk_iommu_v1_domain_alloc_paging(struct device * dev)283 static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
284 {
285 	struct mtk_iommu_v1_domain *dom;
286 
287 	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
288 	if (!dom)
289 		return NULL;
290 
291 	return &dom->domain;
292 }
293 
mtk_iommu_v1_domain_free(struct iommu_domain * domain)294 static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
295 {
296 	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
297 	struct mtk_iommu_v1_data *data = dom->data;
298 
299 	dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
300 			dom->pgt_va, dom->pgt_pa);
301 	kfree(to_mtk_domain(domain));
302 }
303 
mtk_iommu_v1_attach_device(struct iommu_domain * domain,struct device * dev)304 static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
305 {
306 	struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
307 	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
308 	struct dma_iommu_mapping *mtk_mapping;
309 	int ret;
310 
311 	/* Only allow the domain created internally. */
312 	mtk_mapping = data->mapping;
313 	if (mtk_mapping->domain != domain)
314 		return 0;
315 
316 	if (!data->m4u_dom) {
317 		data->m4u_dom = dom;
318 		ret = mtk_iommu_v1_domain_finalise(data);
319 		if (ret) {
320 			data->m4u_dom = NULL;
321 			return ret;
322 		}
323 	}
324 
325 	mtk_iommu_v1_config(data, dev, true);
326 	return 0;
327 }
328 
mtk_iommu_v1_identity_attach(struct iommu_domain * identity_domain,struct device * dev)329 static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain,
330 					struct device *dev)
331 {
332 	struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
333 
334 	mtk_iommu_v1_config(data, dev, false);
335 	return 0;
336 }
337 
338 static struct iommu_domain_ops mtk_iommu_v1_identity_ops = {
339 	.attach_dev = mtk_iommu_v1_identity_attach,
340 };
341 
342 static struct iommu_domain mtk_iommu_v1_identity_domain = {
343 	.type = IOMMU_DOMAIN_IDENTITY,
344 	.ops = &mtk_iommu_v1_identity_ops,
345 };
346 
mtk_iommu_v1_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)347 static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
348 			    phys_addr_t paddr, size_t pgsize, size_t pgcount,
349 			    int prot, gfp_t gfp, size_t *mapped)
350 {
351 	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
352 	unsigned long flags;
353 	unsigned int i;
354 	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
355 	u32 pabase = (u32)paddr;
356 
357 	spin_lock_irqsave(&dom->pgtlock, flags);
358 	for (i = 0; i < pgcount; i++) {
359 		if (pgt_base_iova[i])
360 			break;
361 		pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
362 		pabase += MT2701_IOMMU_PAGE_SIZE;
363 	}
364 
365 	spin_unlock_irqrestore(&dom->pgtlock, flags);
366 
367 	*mapped = i * MT2701_IOMMU_PAGE_SIZE;
368 	mtk_iommu_v1_tlb_flush_range(dom->data, iova, *mapped);
369 
370 	return i == pgcount ? 0 : -EEXIST;
371 }
372 
mtk_iommu_v1_unmap(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)373 static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
374 				 size_t pgsize, size_t pgcount,
375 				 struct iommu_iotlb_gather *gather)
376 {
377 	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
378 	unsigned long flags;
379 	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
380 	size_t size = pgcount * MT2701_IOMMU_PAGE_SIZE;
381 
382 	spin_lock_irqsave(&dom->pgtlock, flags);
383 	memset(pgt_base_iova, 0, pgcount * sizeof(u32));
384 	spin_unlock_irqrestore(&dom->pgtlock, flags);
385 
386 	mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
387 
388 	return size;
389 }
390 
mtk_iommu_v1_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)391 static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
392 {
393 	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
394 	unsigned long flags;
395 	phys_addr_t pa;
396 
397 	spin_lock_irqsave(&dom->pgtlock, flags);
398 	pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
399 	pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
400 	spin_unlock_irqrestore(&dom->pgtlock, flags);
401 
402 	return pa;
403 }
404 
405 static const struct iommu_ops mtk_iommu_v1_ops;
406 
407 /*
408  * MTK generation one iommu HW only support one iommu domain, and all the client
409  * sharing the same iova address space.
410  */
mtk_iommu_v1_create_mapping(struct device * dev,const struct of_phandle_args * args)411 static int mtk_iommu_v1_create_mapping(struct device *dev,
412 				       const struct of_phandle_args *args)
413 {
414 	struct mtk_iommu_v1_data *data;
415 	struct platform_device *m4updev;
416 	struct dma_iommu_mapping *mtk_mapping;
417 	int ret;
418 
419 	if (args->args_count != 1) {
420 		dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
421 			args->args_count);
422 		return -EINVAL;
423 	}
424 
425 	ret = iommu_fwspec_init(dev, of_fwnode_handle(args->np));
426 	if (ret)
427 		return ret;
428 
429 	if (!dev_iommu_priv_get(dev)) {
430 		/* Get the m4u device */
431 		m4updev = of_find_device_by_node(args->np);
432 		if (WARN_ON(!m4updev))
433 			return -EINVAL;
434 
435 		dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
436 	}
437 
438 	ret = iommu_fwspec_add_ids(dev, args->args, 1);
439 	if (ret)
440 		return ret;
441 
442 	data = dev_iommu_priv_get(dev);
443 	mtk_mapping = data->mapping;
444 	if (!mtk_mapping) {
445 		/* MTK iommu support 4GB iova address space. */
446 		mtk_mapping = arm_iommu_create_mapping(dev, 0, 1ULL << 32);
447 		if (IS_ERR(mtk_mapping))
448 			return PTR_ERR(mtk_mapping);
449 
450 		data->mapping = mtk_mapping;
451 	}
452 
453 	return 0;
454 }
455 
mtk_iommu_v1_probe_device(struct device * dev)456 static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
457 {
458 	struct iommu_fwspec *fwspec = NULL;
459 	struct of_phandle_args iommu_spec;
460 	struct mtk_iommu_v1_data *data;
461 	int err, idx = 0, larbid, larbidx;
462 	struct device_link *link;
463 	struct device *larbdev;
464 
465 	while (!of_parse_phandle_with_args(dev->of_node, "iommus",
466 					   "#iommu-cells",
467 					   idx, &iommu_spec)) {
468 
469 		err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
470 		of_node_put(iommu_spec.np);
471 		if (err)
472 			return ERR_PTR(err);
473 
474 		/* dev->iommu_fwspec might have changed */
475 		fwspec = dev_iommu_fwspec_get(dev);
476 		idx++;
477 	}
478 
479 	if (!fwspec)
480 		return ERR_PTR(-ENODEV);
481 
482 	data = dev_iommu_priv_get(dev);
483 
484 	/* Link the consumer device with the smi-larb device(supplier) */
485 	larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
486 	if (larbid >= MT2701_LARB_NR_MAX)
487 		return ERR_PTR(-EINVAL);
488 
489 	for (idx = 1; idx < fwspec->num_ids; idx++) {
490 		larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
491 		if (larbid != larbidx) {
492 			dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
493 				larbid, larbidx);
494 			return ERR_PTR(-EINVAL);
495 		}
496 	}
497 
498 	larbdev = data->larb_imu[larbid].dev;
499 	if (!larbdev)
500 		return ERR_PTR(-EINVAL);
501 
502 	link = device_link_add(dev, larbdev,
503 			       DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
504 	if (!link)
505 		dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
506 
507 	return &data->iommu;
508 }
509 
mtk_iommu_v1_probe_finalize(struct device * dev)510 static void mtk_iommu_v1_probe_finalize(struct device *dev)
511 {
512 	struct dma_iommu_mapping *mtk_mapping;
513 	struct mtk_iommu_v1_data *data;
514 	int err;
515 
516 	data        = dev_iommu_priv_get(dev);
517 	mtk_mapping = data->mapping;
518 
519 	err = arm_iommu_attach_device(dev, mtk_mapping);
520 	if (err)
521 		dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
522 }
523 
mtk_iommu_v1_release_device(struct device * dev)524 static void mtk_iommu_v1_release_device(struct device *dev)
525 {
526 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
527 	struct mtk_iommu_v1_data *data;
528 	struct device *larbdev;
529 	unsigned int larbid;
530 
531 	data = dev_iommu_priv_get(dev);
532 	larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
533 	larbdev = data->larb_imu[larbid].dev;
534 	device_link_remove(dev, larbdev);
535 }
536 
mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data * data)537 static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
538 {
539 	u32 regval;
540 	int ret;
541 
542 	ret = clk_prepare_enable(data->bclk);
543 	if (ret) {
544 		dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
545 		return ret;
546 	}
547 
548 	regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
549 	writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
550 
551 	regval = F_INT_TRANSLATION_FAULT |
552 		F_INT_MAIN_MULTI_HIT_FAULT |
553 		F_INT_INVALID_PA_FAULT |
554 		F_INT_ENTRY_REPLACEMENT_FAULT |
555 		F_INT_TABLE_WALK_FAULT |
556 		F_INT_TLB_MISS_FAULT |
557 		F_INT_PFH_DMA_FIFO_OVERFLOW |
558 		F_INT_MISS_DMA_FIFO_OVERFLOW;
559 	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
560 
561 	/* protect memory,hw will write here while translation fault */
562 	writel_relaxed(data->protect_base,
563 			data->base + REG_MMU_IVRP_PADDR);
564 
565 	writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
566 
567 	if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
568 			     dev_name(data->dev), (void *)data)) {
569 		writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
570 		clk_disable_unprepare(data->bclk);
571 		dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
572 		return -ENODEV;
573 	}
574 
575 	return 0;
576 }
577 
578 static const struct iommu_ops mtk_iommu_v1_ops = {
579 	.identity_domain = &mtk_iommu_v1_identity_domain,
580 	.domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
581 	.probe_device	= mtk_iommu_v1_probe_device,
582 	.probe_finalize = mtk_iommu_v1_probe_finalize,
583 	.release_device	= mtk_iommu_v1_release_device,
584 	.device_group	= generic_device_group,
585 	.pgsize_bitmap	= MT2701_IOMMU_PAGE_SIZE,
586 	.owner          = THIS_MODULE,
587 	.default_domain_ops = &(const struct iommu_domain_ops) {
588 		.attach_dev	= mtk_iommu_v1_attach_device,
589 		.map_pages	= mtk_iommu_v1_map,
590 		.unmap_pages	= mtk_iommu_v1_unmap,
591 		.iova_to_phys	= mtk_iommu_v1_iova_to_phys,
592 		.free		= mtk_iommu_v1_domain_free,
593 	}
594 };
595 
596 static const struct of_device_id mtk_iommu_v1_of_ids[] = {
597 	{ .compatible = "mediatek,mt2701-m4u", },
598 	{}
599 };
600 MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
601 
602 static const struct component_master_ops mtk_iommu_v1_com_ops = {
603 	.bind		= mtk_iommu_v1_bind,
604 	.unbind		= mtk_iommu_v1_unbind,
605 };
606 
mtk_iommu_v1_probe(struct platform_device * pdev)607 static int mtk_iommu_v1_probe(struct platform_device *pdev)
608 {
609 	struct device			*dev = &pdev->dev;
610 	struct mtk_iommu_v1_data	*data;
611 	struct resource			*res;
612 	struct component_match		*match = NULL;
613 	void				*protect;
614 	int				larb_nr, ret, i;
615 
616 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
617 	if (!data)
618 		return -ENOMEM;
619 
620 	data->dev = dev;
621 
622 	/* Protect memory. HW will access here while translation fault.*/
623 	protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN,
624 			       GFP_KERNEL | GFP_DMA);
625 	if (!protect)
626 		return -ENOMEM;
627 	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
628 
629 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
630 	data->base = devm_ioremap_resource(dev, res);
631 	if (IS_ERR(data->base))
632 		return PTR_ERR(data->base);
633 
634 	data->irq = platform_get_irq(pdev, 0);
635 	if (data->irq < 0)
636 		return data->irq;
637 
638 	data->bclk = devm_clk_get(dev, "bclk");
639 	if (IS_ERR(data->bclk))
640 		return PTR_ERR(data->bclk);
641 
642 	larb_nr = of_count_phandle_with_args(dev->of_node,
643 					     "mediatek,larbs", NULL);
644 	if (larb_nr < 0)
645 		return larb_nr;
646 
647 	for (i = 0; i < larb_nr; i++) {
648 		struct device_node *larbnode;
649 		struct platform_device *plarbdev;
650 
651 		larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
652 		if (!larbnode)
653 			return -EINVAL;
654 
655 		if (!of_device_is_available(larbnode)) {
656 			of_node_put(larbnode);
657 			continue;
658 		}
659 
660 		plarbdev = of_find_device_by_node(larbnode);
661 		if (!plarbdev) {
662 			of_node_put(larbnode);
663 			return -ENODEV;
664 		}
665 		if (!plarbdev->dev.driver) {
666 			of_node_put(larbnode);
667 			return -EPROBE_DEFER;
668 		}
669 		data->larb_imu[i].dev = &plarbdev->dev;
670 
671 		component_match_add_release(dev, &match, component_release_of,
672 					    component_compare_of, larbnode);
673 	}
674 
675 	platform_set_drvdata(pdev, data);
676 
677 	ret = mtk_iommu_v1_hw_init(data);
678 	if (ret)
679 		return ret;
680 
681 	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
682 				     dev_name(&pdev->dev));
683 	if (ret)
684 		goto out_clk_unprepare;
685 
686 	ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
687 	if (ret)
688 		goto out_sysfs_remove;
689 
690 	ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
691 	if (ret)
692 		goto out_dev_unreg;
693 	return ret;
694 
695 out_dev_unreg:
696 	iommu_device_unregister(&data->iommu);
697 out_sysfs_remove:
698 	iommu_device_sysfs_remove(&data->iommu);
699 out_clk_unprepare:
700 	clk_disable_unprepare(data->bclk);
701 	return ret;
702 }
703 
mtk_iommu_v1_remove(struct platform_device * pdev)704 static void mtk_iommu_v1_remove(struct platform_device *pdev)
705 {
706 	struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
707 
708 	iommu_device_sysfs_remove(&data->iommu);
709 	iommu_device_unregister(&data->iommu);
710 
711 	clk_disable_unprepare(data->bclk);
712 	devm_free_irq(&pdev->dev, data->irq, data);
713 	component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
714 }
715 
mtk_iommu_v1_suspend(struct device * dev)716 static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
717 {
718 	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
719 	struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
720 	void __iomem *base = data->base;
721 
722 	reg->standard_axi_mode = readl_relaxed(base +
723 					       REG_MMU_STANDARD_AXI_MODE);
724 	reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
725 	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
726 	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
727 	return 0;
728 }
729 
mtk_iommu_v1_resume(struct device * dev)730 static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
731 {
732 	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
733 	struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
734 	void __iomem *base = data->base;
735 
736 	writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
737 	writel_relaxed(reg->standard_axi_mode,
738 		       base + REG_MMU_STANDARD_AXI_MODE);
739 	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
740 	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
741 	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
742 	writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
743 	return 0;
744 }
745 
746 static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
747 	SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
748 };
749 
750 static struct platform_driver mtk_iommu_v1_driver = {
751 	.probe	= mtk_iommu_v1_probe,
752 	.remove = mtk_iommu_v1_remove,
753 	.driver	= {
754 		.name = "mtk-iommu-v1",
755 		.of_match_table = mtk_iommu_v1_of_ids,
756 		.pm = &mtk_iommu_v1_pm_ops,
757 	}
758 };
759 module_platform_driver(mtk_iommu_v1_driver);
760 
761 MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
762 MODULE_LICENSE("GPL v2");
763