xref: /linux/drivers/iommu/mtk_iommu.c (revision 53564f400572b1b8d9ee5bafb9c226eb1d38600a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016 MediaTek Inc.
4  * Author: Yong Wu <yong.wu@mediatek.com>
5  */
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bug.h>
9 #include <linux/clk.h>
10 #include <linux/component.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iommu.h>
16 #include <linux/iopoll.h>
17 #include <linux/io-pgtable.h>
18 #include <linux/list.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/pci.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/regmap.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/soc/mediatek/infracfg.h>
31 #include <linux/soc/mediatek/mtk_sip_svc.h>
32 #include <linux/string_choices.h>
33 #include <asm/barrier.h>
34 #include <soc/mediatek/smi.h>
35 
36 #include <dt-bindings/memory/mtk-memory-port.h>
37 
38 #define REG_MMU_PT_BASE_ADDR			0x000
39 
40 #define REG_MMU_INVALIDATE			0x020
41 #define F_ALL_INVLD				0x2
42 #define F_MMU_INV_RANGE				0x1
43 
44 #define REG_MMU_INVLD_START_A			0x024
45 #define REG_MMU_INVLD_END_A			0x028
46 
47 #define REG_MMU_INV_SEL_GEN2			0x02c
48 #define REG_MMU_INV_SEL_GEN1			0x038
49 #define F_INVLD_EN0				BIT(0)
50 #define F_INVLD_EN1				BIT(1)
51 
52 #define REG_MMU_MISC_CTRL			0x048
53 #define F_MMU_IN_ORDER_WR_EN_MASK		(BIT(1) | BIT(17))
54 #define F_MMU_STANDARD_AXI_MODE_MASK		(BIT(3) | BIT(19))
55 
56 #define REG_MMU_DCM_DIS				0x050
57 #define F_MMU_DCM				BIT(8)
58 
59 #define REG_MMU_WR_LEN_CTRL			0x054
60 #define F_MMU_WR_THROT_DIS_MASK			(BIT(5) | BIT(21))
61 
62 #define REG_MMU_CTRL_REG			0x110
63 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR		(2 << 4)
64 #define F_MMU_PREFETCH_RT_REPLACE_MOD		BIT(4)
65 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173	(2 << 5)
66 
67 #define REG_MMU_IVRP_PADDR			0x114
68 
69 #define REG_MMU_VLD_PA_RNG			0x118
70 #define F_MMU_VLD_PA_RNG(EA, SA)		(((EA) << 8) | (SA))
71 
72 #define REG_MMU_INT_CONTROL0			0x120
73 #define F_L2_MULIT_HIT_EN			BIT(0)
74 #define F_TABLE_WALK_FAULT_INT_EN		BIT(1)
75 #define F_PREETCH_FIFO_OVERFLOW_INT_EN		BIT(2)
76 #define F_MISS_FIFO_OVERFLOW_INT_EN		BIT(3)
77 #define F_PREFETCH_FIFO_ERR_INT_EN		BIT(5)
78 #define F_MISS_FIFO_ERR_INT_EN			BIT(6)
79 #define F_INT_CLR_BIT				BIT(12)
80 
81 #define REG_MMU_INT_MAIN_CONTROL		0x124
82 						/* mmu0 | mmu1 */
83 #define F_INT_TRANSLATION_FAULT			(BIT(0) | BIT(7))
84 #define F_INT_MAIN_MULTI_HIT_FAULT		(BIT(1) | BIT(8))
85 #define F_INT_INVALID_PA_FAULT			(BIT(2) | BIT(9))
86 #define F_INT_ENTRY_REPLACEMENT_FAULT		(BIT(3) | BIT(10))
87 #define F_INT_TLB_MISS_FAULT			(BIT(4) | BIT(11))
88 #define F_INT_MISS_TRANSACTION_FIFO_FAULT	(BIT(5) | BIT(12))
89 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT	(BIT(6) | BIT(13))
90 
91 #define REG_MMU_CPE_DONE			0x12C
92 
93 #define REG_MMU_FAULT_ST1			0x134
94 #define F_REG_MMU0_FAULT_MASK			GENMASK(6, 0)
95 #define F_REG_MMU1_FAULT_MASK			GENMASK(13, 7)
96 
97 #define REG_MMU0_FAULT_VA			0x13c
98 #define F_MMU_INVAL_VA_31_12_MASK		GENMASK(31, 12)
99 #define F_MMU_INVAL_VA_34_32_MASK		GENMASK(11, 9)
100 #define F_MMU_INVAL_PA_34_32_MASK		GENMASK(8, 6)
101 #define F_MMU_FAULT_VA_WRITE_BIT		BIT(1)
102 #define F_MMU_FAULT_VA_LAYER_BIT		BIT(0)
103 
104 #define REG_MMU0_INVLD_PA			0x140
105 #define REG_MMU1_FAULT_VA			0x144
106 #define REG_MMU1_INVLD_PA			0x148
107 #define REG_MMU0_INT_ID				0x150
108 #define REG_MMU1_INT_ID				0x154
109 #define F_MMU_INT_ID_COMM_ID(a)			(((a) >> 9) & 0x7)
110 #define F_MMU_INT_ID_SUB_COMM_ID(a)		(((a) >> 7) & 0x3)
111 #define F_MMU_INT_ID_COMM_ID_EXT(a)		(((a) >> 10) & 0x7)
112 #define F_MMU_INT_ID_SUB_COMM_ID_EXT(a)		(((a) >> 7) & 0x7)
113 /* Macro for 5 bits length port ID field (default) */
114 #define F_MMU_INT_ID_LARB_ID(a)			(((a) >> 7) & 0x7)
115 #define F_MMU_INT_ID_PORT_ID(a)			(((a) >> 2) & 0x1f)
116 /* Macro for 6 bits length port ID field */
117 #define F_MMU_INT_ID_LARB_ID_WID_6(a)		(((a) >> 8) & 0x7)
118 #define F_MMU_INT_ID_PORT_ID_WID_6(a)		(((a) >> 2) & 0x3f)
119 
120 #define MTK_PROTECT_PA_ALIGN			256
121 #define MTK_IOMMU_BANK_SZ			0x1000
122 
123 #define PERICFG_IOMMU_1				0x714
124 
125 #define HAS_4GB_MODE			BIT(0)
126 /* HW will use the EMI clock if there isn't the "bclk". */
127 #define HAS_BCLK			BIT(1)
128 #define HAS_VLD_PA_RNG			BIT(2)
129 #define RESET_AXI			BIT(3)
130 #define OUT_ORDER_WR_EN			BIT(4)
131 #define HAS_SUB_COMM_2BITS		BIT(5)
132 #define HAS_SUB_COMM_3BITS		BIT(6)
133 #define WR_THROT_EN			BIT(7)
134 #define HAS_LEGACY_IVRP_PADDR		BIT(8)
135 #define IOVA_34_EN			BIT(9)
136 #define SHARE_PGTABLE			BIT(10) /* 2 HW share pgtable */
137 #define DCM_DISABLE			BIT(11)
138 #define STD_AXI_MODE			BIT(12) /* For non MM iommu */
139 /* 2 bits: iommu type */
140 #define MTK_IOMMU_TYPE_MM		(0x0 << 13)
141 #define MTK_IOMMU_TYPE_INFRA		(0x1 << 13)
142 #define MTK_IOMMU_TYPE_MASK		(0x3 << 13)
143 /* PM and clock always on. e.g. infra iommu */
144 #define PM_CLK_AO			BIT(15)
145 #define IFA_IOMMU_PCIE_SUPPORT		BIT(16)
146 #define PGTABLE_PA_35_EN		BIT(17)
147 #define TF_PORT_TO_ADDR_MT8173		BIT(18)
148 #define INT_ID_PORT_WIDTH_6		BIT(19)
149 #define CFG_IFA_MASTER_IN_ATF		BIT(20)
150 
151 #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask)	\
152 				((((pdata)->flags) & (mask)) == (_x))
153 
154 #define MTK_IOMMU_HAS_FLAG(pdata, _x)	MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, _x)
155 #define MTK_IOMMU_IS_TYPE(pdata, _x)	MTK_IOMMU_HAS_FLAG_MASK(pdata, _x,\
156 							MTK_IOMMU_TYPE_MASK)
157 
158 #define MTK_INVALID_LARBID		MTK_LARB_NR_MAX
159 
160 #define MTK_LARB_COM_MAX	8
161 #define MTK_LARB_SUBCOM_MAX	8
162 
163 #define MTK_IOMMU_GROUP_MAX	8
164 #define MTK_IOMMU_BANK_MAX	5
165 
166 enum mtk_iommu_plat {
167 	M4U_MT2712,
168 	M4U_MT6779,
169 	M4U_MT6795,
170 	M4U_MT8167,
171 	M4U_MT8173,
172 	M4U_MT8183,
173 	M4U_MT8186,
174 	M4U_MT8188,
175 	M4U_MT8192,
176 	M4U_MT8195,
177 	M4U_MT8365,
178 };
179 
180 struct mtk_iommu_iova_region {
181 	dma_addr_t		iova_base;
182 	unsigned long long	size;
183 };
184 
185 struct mtk_iommu_suspend_reg {
186 	u32			misc_ctrl;
187 	u32			dcm_dis;
188 	u32			ctrl_reg;
189 	u32			vld_pa_rng;
190 	u32			wr_len_ctrl;
191 
192 	u32			int_control[MTK_IOMMU_BANK_MAX];
193 	u32			int_main_control[MTK_IOMMU_BANK_MAX];
194 	u32			ivrp_paddr[MTK_IOMMU_BANK_MAX];
195 };
196 
197 struct mtk_iommu_plat_data {
198 	enum mtk_iommu_plat	m4u_plat;
199 	u32			flags;
200 	u32			inv_sel_reg;
201 
202 	char			*pericfg_comp_str;
203 	struct list_head	*hw_list;
204 
205 	/*
206 	 * The IOMMU HW may support 16GB iova. In order to balance the IOVA ranges,
207 	 * different masters will be put in different iova ranges, for example vcodec
208 	 * is in 4G-8G and cam is in 8G-12G. Meanwhile, some masters may have the
209 	 * special IOVA range requirement, like CCU can only support the address
210 	 * 0x40000000-0x44000000.
211 	 * Here list the iova ranges this SoC supports and which larbs/ports are in
212 	 * which region.
213 	 *
214 	 * 16GB iova all use one pgtable, but each a region is a iommu group.
215 	 */
216 	struct {
217 		unsigned int	iova_region_nr;
218 		const struct mtk_iommu_iova_region	*iova_region;
219 		/*
220 		 * Indicate the correspondance between larbs, ports and regions.
221 		 *
222 		 * The index is the same as iova_region and larb port numbers are
223 		 * described as bit positions.
224 		 * For example, storing BIT(0) at index 2,1 means "larb 1, port0 is in region 2".
225 		 *              [2] = { [1] = BIT(0) }
226 		 */
227 		const u32	(*iova_region_larb_msk)[MTK_LARB_NR_MAX];
228 	};
229 
230 	/*
231 	 * The IOMMU HW may have 5 banks. Each bank has a independent pgtable.
232 	 * Here list how many banks this SoC supports/enables and which ports are in which bank.
233 	 */
234 	struct {
235 		u8		banks_num;
236 		bool		banks_enable[MTK_IOMMU_BANK_MAX];
237 		unsigned int	banks_portmsk[MTK_IOMMU_BANK_MAX];
238 	};
239 
240 	unsigned char       larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
241 };
242 
243 struct mtk_iommu_bank_data {
244 	void __iomem			*base;
245 	int				irq;
246 	u8				id;
247 	struct device			*parent_dev;
248 	struct mtk_iommu_data		*parent_data;
249 	spinlock_t			tlb_lock; /* lock for tlb range flush */
250 	struct mtk_iommu_domain		*m4u_dom; /* Each bank has a domain */
251 };
252 
253 struct mtk_iommu_data {
254 	struct device			*dev;
255 	struct clk			*bclk;
256 	phys_addr_t			protect_base; /* protect memory base */
257 	struct mtk_iommu_suspend_reg	reg;
258 	struct iommu_group		*m4u_group[MTK_IOMMU_GROUP_MAX];
259 	bool                            enable_4GB;
260 
261 	struct iommu_device		iommu;
262 	const struct mtk_iommu_plat_data *plat_data;
263 	struct device			*smicomm_dev;
264 
265 	struct mtk_iommu_bank_data	*bank;
266 	struct mtk_iommu_domain		*share_dom;
267 
268 	struct regmap			*pericfg;
269 	struct mutex			mutex; /* Protect m4u_group/m4u_dom above */
270 
271 	/*
272 	 * In the sharing pgtable case, list data->list to the global list like m4ulist.
273 	 * In the non-sharing pgtable case, list data->list to the itself hw_list_head.
274 	 */
275 	struct list_head		*hw_list;
276 	struct list_head		hw_list_head;
277 	struct list_head		list;
278 	struct mtk_smi_larb_iommu	larb_imu[MTK_LARB_NR_MAX];
279 };
280 
281 struct mtk_iommu_domain {
282 	struct io_pgtable_cfg		cfg;
283 	struct io_pgtable_ops		*iop;
284 
285 	struct mtk_iommu_bank_data	*bank;
286 	struct iommu_domain		domain;
287 
288 	struct mutex			mutex; /* Protect "data" in this structure */
289 };
290 
mtk_iommu_bind(struct device * dev)291 static int mtk_iommu_bind(struct device *dev)
292 {
293 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
294 
295 	return component_bind_all(dev, &data->larb_imu);
296 }
297 
mtk_iommu_unbind(struct device * dev)298 static void mtk_iommu_unbind(struct device *dev)
299 {
300 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
301 
302 	component_unbind_all(dev, &data->larb_imu);
303 }
304 
305 static const struct iommu_ops mtk_iommu_ops;
306 
307 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid);
308 
309 #define MTK_IOMMU_TLB_ADDR(iova) ({					\
310 	dma_addr_t _addr = iova;					\
311 	((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\
312 })
313 
314 /*
315  * In M4U 4GB mode, the physical address is remapped as below:
316  *
317  * CPU Physical address:
318  * ====================
319  *
320  * 0      1G       2G     3G       4G     5G
321  * |---A---|---B---|---C---|---D---|---E---|
322  * +--I/O--+------------Memory-------------+
323  *
324  * IOMMU output physical address:
325  *  =============================
326  *
327  *                                 4G      5G     6G      7G      8G
328  *                                 |---E---|---B---|---C---|---D---|
329  *                                 +------------Memory-------------+
330  *
331  * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
332  * bit32 of the CPU physical address always is needed to set, and for Region
333  * 'E', the CPU physical address keep as is.
334  * Additionally, The iommu consumers always use the CPU phyiscal address.
335  */
336 #define MTK_IOMMU_4GB_MODE_REMAP_BASE	 0x140000000UL
337 
338 static LIST_HEAD(m4ulist);	/* List all the M4U HWs */
339 
340 #define for_each_m4u(data, head)  list_for_each_entry(data, head, list)
341 
342 #define MTK_IOMMU_IOVA_SZ_4G		(SZ_4G - SZ_8M) /* 8M as gap */
343 
344 static const struct mtk_iommu_iova_region single_domain[] = {
345 	{.iova_base = 0,		.size = MTK_IOMMU_IOVA_SZ_4G},
346 };
347 
348 #define MT8192_MULTI_REGION_NR_MAX	6
349 
350 #define MT8192_MULTI_REGION_NR	(IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) ? \
351 				 MT8192_MULTI_REGION_NR_MAX : 1)
352 
353 static const struct mtk_iommu_iova_region mt8192_multi_dom[MT8192_MULTI_REGION_NR] = {
354 	{ .iova_base = 0x0,		.size = MTK_IOMMU_IOVA_SZ_4G},	/* 0 ~ 4G,  */
355 	#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
356 	{ .iova_base = SZ_4G,		.size = MTK_IOMMU_IOVA_SZ_4G},	/* 4G ~ 8G */
357 	{ .iova_base = SZ_4G * 2,	.size = MTK_IOMMU_IOVA_SZ_4G},	/* 8G ~ 12G */
358 	{ .iova_base = SZ_4G * 3,	.size = MTK_IOMMU_IOVA_SZ_4G},	/* 12G ~ 16G */
359 
360 	{ .iova_base = 0x240000000ULL,	.size = 0x4000000},	/* CCU0 */
361 	{ .iova_base = 0x244000000ULL,	.size = 0x4000000},	/* CCU1 */
362 	#endif
363 };
364 
365 /* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/
mtk_iommu_get_frst_data(struct list_head * hwlist)366 static struct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist)
367 {
368 	return list_first_entry(hwlist, struct mtk_iommu_data, list);
369 }
370 
to_mtk_domain(struct iommu_domain * dom)371 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
372 {
373 	return container_of(dom, struct mtk_iommu_domain, domain);
374 }
375 
mtk_iommu_tlb_flush_all(struct mtk_iommu_data * data)376 static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
377 {
378 	/* Tlb flush all always is in bank0. */
379 	struct mtk_iommu_bank_data *bank = &data->bank[0];
380 	void __iomem *base = bank->base;
381 	unsigned long flags;
382 
383 	spin_lock_irqsave(&bank->tlb_lock, flags);
384 	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg);
385 	writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE);
386 	wmb(); /* Make sure the tlb flush all done */
387 	spin_unlock_irqrestore(&bank->tlb_lock, flags);
388 }
389 
mtk_iommu_tlb_flush_range_sync(unsigned long iova,size_t size,struct mtk_iommu_bank_data * bank)390 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
391 					   struct mtk_iommu_bank_data *bank)
392 {
393 	struct list_head *head = bank->parent_data->hw_list;
394 	struct mtk_iommu_bank_data *curbank;
395 	struct mtk_iommu_data *data;
396 	bool check_pm_status;
397 	unsigned long flags;
398 	void __iomem *base;
399 	int ret;
400 	u32 tmp;
401 
402 	for_each_m4u(data, head) {
403 		/*
404 		 * To avoid resume the iommu device frequently when the iommu device
405 		 * is not active, it doesn't always call pm_runtime_get here, then tlb
406 		 * flush depends on the tlb flush all in the runtime resume.
407 		 *
408 		 * There are 2 special cases:
409 		 *
410 		 * Case1: The iommu dev doesn't have power domain but has bclk. This case
411 		 * should also avoid the tlb flush while the dev is not active to mute
412 		 * the tlb timeout log. like mt8173.
413 		 *
414 		 * Case2: The power/clock of infra iommu is always on, and it doesn't
415 		 * have the device link with the master devices. This case should avoid
416 		 * the PM status check.
417 		 */
418 		check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO);
419 
420 		if (check_pm_status) {
421 			if (pm_runtime_get_if_in_use(data->dev) <= 0)
422 				continue;
423 		}
424 
425 		curbank = &data->bank[bank->id];
426 		base = curbank->base;
427 
428 		spin_lock_irqsave(&curbank->tlb_lock, flags);
429 		writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
430 			       base + data->plat_data->inv_sel_reg);
431 
432 		writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A);
433 		writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
434 			       base + REG_MMU_INVLD_END_A);
435 		writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE);
436 
437 		/* tlb sync */
438 		ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE,
439 						tmp, tmp != 0, 10, 1000);
440 
441 		/* Clear the CPE status */
442 		writel_relaxed(0, base + REG_MMU_CPE_DONE);
443 		spin_unlock_irqrestore(&curbank->tlb_lock, flags);
444 
445 		if (ret) {
446 			dev_warn(data->dev,
447 				 "Partial TLB flush timed out, falling back to full flush\n");
448 			mtk_iommu_tlb_flush_all(data);
449 		}
450 
451 		if (check_pm_status)
452 			pm_runtime_put(data->dev);
453 	}
454 }
455 
mtk_iommu_isr(int irq,void * dev_id)456 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
457 {
458 	struct mtk_iommu_bank_data *bank = dev_id;
459 	struct mtk_iommu_data *data = bank->parent_data;
460 	struct mtk_iommu_domain *dom = bank->m4u_dom;
461 	unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0;
462 	u32 int_state, regval, va34_32, pa34_32;
463 	const struct mtk_iommu_plat_data *plat_data = data->plat_data;
464 	void __iomem *base = bank->base;
465 	u64 fault_iova, fault_pa;
466 	bool layer, write;
467 
468 	/* Read error info from registers */
469 	int_state = readl_relaxed(base + REG_MMU_FAULT_ST1);
470 	if (int_state & F_REG_MMU0_FAULT_MASK) {
471 		regval = readl_relaxed(base + REG_MMU0_INT_ID);
472 		fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA);
473 		fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA);
474 	} else {
475 		regval = readl_relaxed(base + REG_MMU1_INT_ID);
476 		fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA);
477 		fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA);
478 	}
479 	layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
480 	write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
481 	if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) {
482 		va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
483 		fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
484 		fault_iova |= (u64)va34_32 << 32;
485 	}
486 	pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
487 	fault_pa |= (u64)pa34_32 << 32;
488 
489 	if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) {
490 		if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) {
491 			fault_larb = F_MMU_INT_ID_COMM_ID(regval);
492 			sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
493 			fault_port = F_MMU_INT_ID_PORT_ID(regval);
494 		} else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) {
495 			fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval);
496 			sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval);
497 			fault_port = F_MMU_INT_ID_PORT_ID(regval);
498 		} else if (MTK_IOMMU_HAS_FLAG(plat_data, INT_ID_PORT_WIDTH_6)) {
499 			fault_port = F_MMU_INT_ID_PORT_ID_WID_6(regval);
500 			fault_larb = F_MMU_INT_ID_LARB_ID_WID_6(regval);
501 		} else {
502 			fault_port = F_MMU_INT_ID_PORT_ID(regval);
503 			fault_larb = F_MMU_INT_ID_LARB_ID(regval);
504 		}
505 		fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
506 	}
507 
508 	if (!dom || report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
509 			       write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
510 		dev_err_ratelimited(
511 			bank->parent_dev,
512 			"fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n",
513 			int_state, fault_iova, fault_pa, regval, fault_larb, fault_port,
514 			layer, str_write_read(write));
515 	}
516 
517 	/* Interrupt clear */
518 	regval = readl_relaxed(base + REG_MMU_INT_CONTROL0);
519 	regval |= F_INT_CLR_BIT;
520 	writel_relaxed(regval, base + REG_MMU_INT_CONTROL0);
521 
522 	mtk_iommu_tlb_flush_all(data);
523 
524 	return IRQ_HANDLED;
525 }
526 
mtk_iommu_get_bank_id(struct device * dev,const struct mtk_iommu_plat_data * plat_data)527 static unsigned int mtk_iommu_get_bank_id(struct device *dev,
528 					  const struct mtk_iommu_plat_data *plat_data)
529 {
530 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
531 	unsigned int i, portmsk = 0, bankid = 0;
532 
533 	if (plat_data->banks_num == 1)
534 		return bankid;
535 
536 	for (i = 0; i < fwspec->num_ids; i++)
537 		portmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i]));
538 
539 	for (i = 0; i < plat_data->banks_num && i < MTK_IOMMU_BANK_MAX; i++) {
540 		if (!plat_data->banks_enable[i])
541 			continue;
542 
543 		if (portmsk & plat_data->banks_portmsk[i]) {
544 			bankid = i;
545 			break;
546 		}
547 	}
548 	return bankid; /* default is 0 */
549 }
550 
mtk_iommu_get_iova_region_id(struct device * dev,const struct mtk_iommu_plat_data * plat_data)551 static int mtk_iommu_get_iova_region_id(struct device *dev,
552 					const struct mtk_iommu_plat_data *plat_data)
553 {
554 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
555 	unsigned int portidmsk = 0, larbid;
556 	const u32 *rgn_larb_msk;
557 	int i;
558 
559 	if (plat_data->iova_region_nr == 1)
560 		return 0;
561 
562 	larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
563 	for (i = 0; i < fwspec->num_ids; i++)
564 		portidmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i]));
565 
566 	for (i = 0; i < plat_data->iova_region_nr; i++) {
567 		rgn_larb_msk = plat_data->iova_region_larb_msk[i];
568 		if (!rgn_larb_msk)
569 			continue;
570 
571 		if ((rgn_larb_msk[larbid] & portidmsk) == portidmsk)
572 			return i;
573 	}
574 
575 	dev_err(dev, "Can NOT find the region for larb(%d-%x).\n",
576 		larbid, portidmsk);
577 	return -EINVAL;
578 }
579 
mtk_iommu_config(struct mtk_iommu_data * data,struct device * dev,bool enable,unsigned int regionid)580 static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
581 			    bool enable, unsigned int regionid)
582 {
583 	struct mtk_smi_larb_iommu    *larb_mmu;
584 	unsigned int                 larbid, portid;
585 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
586 	const struct mtk_iommu_iova_region *region;
587 	unsigned long portid_msk = 0;
588 	struct arm_smccc_res res;
589 	int i, ret = 0;
590 
591 	for (i = 0; i < fwspec->num_ids; ++i) {
592 		portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
593 		portid_msk |= BIT(portid);
594 	}
595 
596 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
597 		/* All ports should be in the same larb. just use 0 here */
598 		larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
599 		larb_mmu = &data->larb_imu[larbid];
600 		region = data->plat_data->iova_region + regionid;
601 
602 		for_each_set_bit(portid, &portid_msk, 32)
603 			larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
604 
605 		dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
606 			str_enable_disable(enable), dev_name(larb_mmu->dev),
607 			portid_msk, regionid, upper_32_bits(region->iova_base));
608 
609 		if (enable)
610 			larb_mmu->mmu |= portid_msk;
611 		else
612 			larb_mmu->mmu &= ~portid_msk;
613 	} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
614 		if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
615 			arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL,
616 				      IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU,
617 				      portid_msk, enable, 0, 0, 0, 0, &res);
618 			ret = res.a0;
619 		} else {
620 			/* PCI dev has only one output id, enable the next writing bit for PCIe */
621 			if (dev_is_pci(dev)) {
622 				if (fwspec->num_ids != 1) {
623 					dev_err(dev, "PCI dev can only have one port.\n");
624 					return -ENODEV;
625 				}
626 				portid_msk |= BIT(portid + 1);
627 			}
628 
629 			ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1,
630 						 (u32)portid_msk, enable ? (u32)portid_msk : 0);
631 		}
632 		if (ret)
633 			dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n",
634 				str_enable_disable(enable), dev_name(data->dev),
635 				portid_msk, ret);
636 	}
637 	return ret;
638 }
639 
mtk_iommu_domain_finalise(struct mtk_iommu_domain * dom,struct mtk_iommu_data * data,unsigned int region_id)640 static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
641 				     struct mtk_iommu_data *data,
642 				     unsigned int region_id)
643 {
644 	struct mtk_iommu_domain	*share_dom = data->share_dom;
645 	const struct mtk_iommu_iova_region *region;
646 
647 	/* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */
648 	if (share_dom) {
649 		dom->iop = share_dom->iop;
650 		dom->cfg = share_dom->cfg;
651 		dom->domain.pgsize_bitmap = share_dom->domain.pgsize_bitmap;
652 		goto update_iova_region;
653 	}
654 
655 	dom->cfg = (struct io_pgtable_cfg) {
656 		.quirks = IO_PGTABLE_QUIRK_ARM_NS |
657 			IO_PGTABLE_QUIRK_NO_PERMS |
658 			IO_PGTABLE_QUIRK_ARM_MTK_EXT,
659 		.pgsize_bitmap = dom->domain.pgsize_bitmap,
660 		.ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32,
661 		.iommu_dev = data->dev,
662 	};
663 
664 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN))
665 		dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT;
666 
667 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
668 		dom->cfg.oas = data->enable_4GB ? 33 : 32;
669 	else
670 		dom->cfg.oas = 35;
671 
672 	dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
673 	if (!dom->iop) {
674 		dev_err(data->dev, "Failed to alloc io pgtable\n");
675 		return -ENOMEM;
676 	}
677 
678 	data->share_dom = dom;
679 
680 update_iova_region:
681 	/* Update the iova region for this domain */
682 	region = data->plat_data->iova_region + region_id;
683 	dom->domain.geometry.aperture_start = region->iova_base;
684 	dom->domain.geometry.aperture_end = region->iova_base + region->size - 1;
685 	dom->domain.geometry.force_aperture = true;
686 	return 0;
687 }
688 
mtk_iommu_domain_alloc_paging(struct device * dev)689 static struct iommu_domain *mtk_iommu_domain_alloc_paging(struct device *dev)
690 {
691 	struct mtk_iommu_domain *dom;
692 
693 	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
694 	if (!dom)
695 		return NULL;
696 	mutex_init(&dom->mutex);
697 	dom->domain.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M;
698 
699 	return &dom->domain;
700 }
701 
mtk_iommu_domain_free(struct iommu_domain * domain)702 static void mtk_iommu_domain_free(struct iommu_domain *domain)
703 {
704 	kfree(to_mtk_domain(domain));
705 }
706 
mtk_iommu_attach_device(struct iommu_domain * domain,struct device * dev)707 static int mtk_iommu_attach_device(struct iommu_domain *domain,
708 				   struct device *dev)
709 {
710 	struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata;
711 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
712 	struct list_head *hw_list = data->hw_list;
713 	struct device *m4udev = data->dev;
714 	struct mtk_iommu_bank_data *bank;
715 	unsigned int bankid;
716 	int ret, region_id;
717 
718 	region_id = mtk_iommu_get_iova_region_id(dev, data->plat_data);
719 	if (region_id < 0)
720 		return region_id;
721 
722 	bankid = mtk_iommu_get_bank_id(dev, data->plat_data);
723 	mutex_lock(&dom->mutex);
724 	if (!dom->bank) {
725 		/* Data is in the frstdata in sharing pgtable case. */
726 		frstdata = mtk_iommu_get_frst_data(hw_list);
727 
728 		mutex_lock(&frstdata->mutex);
729 		ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
730 		mutex_unlock(&frstdata->mutex);
731 		if (ret) {
732 			mutex_unlock(&dom->mutex);
733 			return ret;
734 		}
735 		dom->bank = &data->bank[bankid];
736 	}
737 	mutex_unlock(&dom->mutex);
738 
739 	mutex_lock(&data->mutex);
740 	bank = &data->bank[bankid];
741 	if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */
742 		ret = pm_runtime_resume_and_get(m4udev);
743 		if (ret < 0) {
744 			dev_err(m4udev, "pm get fail(%d) in attach.\n", ret);
745 			goto err_unlock;
746 		}
747 
748 		ret = mtk_iommu_hw_init(data, bankid);
749 		if (ret) {
750 			pm_runtime_put(m4udev);
751 			goto err_unlock;
752 		}
753 		bank->m4u_dom = dom;
754 		writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR);
755 
756 		pm_runtime_put(m4udev);
757 	}
758 	mutex_unlock(&data->mutex);
759 
760 	if (region_id > 0) {
761 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34));
762 		if (ret) {
763 			dev_err(m4udev, "Failed to set dma_mask for %s(%d).\n", dev_name(dev), ret);
764 			return ret;
765 		}
766 	}
767 
768 	return mtk_iommu_config(data, dev, true, region_id);
769 
770 err_unlock:
771 	mutex_unlock(&data->mutex);
772 	return ret;
773 }
774 
mtk_iommu_identity_attach(struct iommu_domain * identity_domain,struct device * dev)775 static int mtk_iommu_identity_attach(struct iommu_domain *identity_domain,
776 				     struct device *dev)
777 {
778 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
779 	struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
780 
781 	if (domain == identity_domain || !domain)
782 		return 0;
783 
784 	mtk_iommu_config(data, dev, false, 0);
785 	return 0;
786 }
787 
788 static struct iommu_domain_ops mtk_iommu_identity_ops = {
789 	.attach_dev = mtk_iommu_identity_attach,
790 };
791 
792 static struct iommu_domain mtk_iommu_identity_domain = {
793 	.type = IOMMU_DOMAIN_IDENTITY,
794 	.ops = &mtk_iommu_identity_ops,
795 };
796 
mtk_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)797 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
798 			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
799 			 int prot, gfp_t gfp, size_t *mapped)
800 {
801 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
802 
803 	/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
804 	if (dom->bank->parent_data->enable_4GB)
805 		paddr |= BIT_ULL(32);
806 
807 	/* Synchronize with the tlb_lock */
808 	return dom->iop->map_pages(dom->iop, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
809 }
810 
mtk_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)811 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
812 			      unsigned long iova, size_t pgsize, size_t pgcount,
813 			      struct iommu_iotlb_gather *gather)
814 {
815 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
816 
817 	iommu_iotlb_gather_add_range(gather, iova, pgsize * pgcount);
818 	return dom->iop->unmap_pages(dom->iop, iova, pgsize, pgcount, gather);
819 }
820 
mtk_iommu_flush_iotlb_all(struct iommu_domain * domain)821 static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
822 {
823 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
824 
825 	if (dom->bank)
826 		mtk_iommu_tlb_flush_all(dom->bank->parent_data);
827 }
828 
mtk_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)829 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
830 				 struct iommu_iotlb_gather *gather)
831 {
832 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
833 	size_t length = gather->end - gather->start + 1;
834 
835 	mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
836 }
837 
mtk_iommu_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)838 static int mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
839 			      size_t size)
840 {
841 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
842 
843 	mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
844 	return 0;
845 }
846 
mtk_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)847 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
848 					  dma_addr_t iova)
849 {
850 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
851 	phys_addr_t pa;
852 
853 	pa = dom->iop->iova_to_phys(dom->iop, iova);
854 	if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
855 	    dom->bank->parent_data->enable_4GB &&
856 	    pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
857 		pa &= ~BIT_ULL(32);
858 
859 	return pa;
860 }
861 
mtk_iommu_probe_device(struct device * dev)862 static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
863 {
864 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
865 	struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
866 	struct device_link *link;
867 	struct device *larbdev;
868 	unsigned int larbid, larbidx, i;
869 
870 	if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
871 		return &data->iommu;
872 
873 	/*
874 	 * Link the consumer device with the smi-larb device(supplier).
875 	 * The device that connects with each a larb is a independent HW.
876 	 * All the ports in each a device should be in the same larbs.
877 	 */
878 	larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
879 	if (larbid >= MTK_LARB_NR_MAX)
880 		return ERR_PTR(-EINVAL);
881 
882 	for (i = 1; i < fwspec->num_ids; i++) {
883 		larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
884 		if (larbid != larbidx) {
885 			dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
886 				larbid, larbidx);
887 			return ERR_PTR(-EINVAL);
888 		}
889 	}
890 	larbdev = data->larb_imu[larbid].dev;
891 	if (!larbdev)
892 		return ERR_PTR(-EINVAL);
893 
894 	link = device_link_add(dev, larbdev,
895 			       DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
896 	if (!link)
897 		dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
898 	return &data->iommu;
899 }
900 
mtk_iommu_release_device(struct device * dev)901 static void mtk_iommu_release_device(struct device *dev)
902 {
903 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
904 	struct mtk_iommu_data *data;
905 	struct device *larbdev;
906 	unsigned int larbid;
907 
908 	data = dev_iommu_priv_get(dev);
909 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
910 		larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
911 		larbdev = data->larb_imu[larbid].dev;
912 		device_link_remove(dev, larbdev);
913 	}
914 }
915 
mtk_iommu_get_group_id(struct device * dev,const struct mtk_iommu_plat_data * plat_data)916 static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data)
917 {
918 	unsigned int bankid;
919 
920 	/*
921 	 * If the bank function is enabled, each bank is a iommu group/domain.
922 	 * Otherwise, each iova region is a iommu group/domain.
923 	 */
924 	bankid = mtk_iommu_get_bank_id(dev, plat_data);
925 	if (bankid)
926 		return bankid;
927 
928 	return mtk_iommu_get_iova_region_id(dev, plat_data);
929 }
930 
mtk_iommu_device_group(struct device * dev)931 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
932 {
933 	struct mtk_iommu_data *c_data = dev_iommu_priv_get(dev), *data;
934 	struct list_head *hw_list = c_data->hw_list;
935 	struct iommu_group *group;
936 	int groupid;
937 
938 	data = mtk_iommu_get_frst_data(hw_list);
939 	if (!data)
940 		return ERR_PTR(-ENODEV);
941 
942 	groupid = mtk_iommu_get_group_id(dev, data->plat_data);
943 	if (groupid < 0)
944 		return ERR_PTR(groupid);
945 
946 	mutex_lock(&data->mutex);
947 	group = data->m4u_group[groupid];
948 	if (!group) {
949 		group = iommu_group_alloc();
950 		if (!IS_ERR(group))
951 			data->m4u_group[groupid] = group;
952 	} else {
953 		iommu_group_ref_get(group);
954 	}
955 	mutex_unlock(&data->mutex);
956 	return group;
957 }
958 
mtk_iommu_of_xlate(struct device * dev,const struct of_phandle_args * args)959 static int mtk_iommu_of_xlate(struct device *dev,
960 			      const struct of_phandle_args *args)
961 {
962 	struct platform_device *m4updev;
963 
964 	if (args->args_count != 1) {
965 		dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
966 			args->args_count);
967 		return -EINVAL;
968 	}
969 
970 	if (!dev_iommu_priv_get(dev)) {
971 		/* Get the m4u device */
972 		m4updev = of_find_device_by_node(args->np);
973 		if (WARN_ON(!m4updev))
974 			return -EINVAL;
975 
976 		dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
977 	}
978 
979 	return iommu_fwspec_add_ids(dev, args->args, 1);
980 }
981 
mtk_iommu_get_resv_regions(struct device * dev,struct list_head * head)982 static void mtk_iommu_get_resv_regions(struct device *dev,
983 				       struct list_head *head)
984 {
985 	struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
986 	unsigned int regionid = mtk_iommu_get_iova_region_id(dev, data->plat_data), i;
987 	const struct mtk_iommu_iova_region *resv, *curdom;
988 	struct iommu_resv_region *region;
989 	int prot = IOMMU_WRITE | IOMMU_READ;
990 
991 	if ((int)regionid < 0)
992 		return;
993 	curdom = data->plat_data->iova_region + regionid;
994 	for (i = 0; i < data->plat_data->iova_region_nr; i++) {
995 		resv = data->plat_data->iova_region + i;
996 
997 		/* Only reserve when the region is inside the current domain */
998 		if (resv->iova_base <= curdom->iova_base ||
999 		    resv->iova_base + resv->size >= curdom->iova_base + curdom->size)
1000 			continue;
1001 
1002 		region = iommu_alloc_resv_region(resv->iova_base, resv->size,
1003 						 prot, IOMMU_RESV_RESERVED,
1004 						 GFP_KERNEL);
1005 		if (!region)
1006 			return;
1007 
1008 		list_add_tail(&region->list, head);
1009 	}
1010 }
1011 
1012 static const struct iommu_ops mtk_iommu_ops = {
1013 	.identity_domain = &mtk_iommu_identity_domain,
1014 	.domain_alloc_paging = mtk_iommu_domain_alloc_paging,
1015 	.probe_device	= mtk_iommu_probe_device,
1016 	.release_device	= mtk_iommu_release_device,
1017 	.device_group	= mtk_iommu_device_group,
1018 	.of_xlate	= mtk_iommu_of_xlate,
1019 	.get_resv_regions = mtk_iommu_get_resv_regions,
1020 	.owner		= THIS_MODULE,
1021 	.default_domain_ops = &(const struct iommu_domain_ops) {
1022 		.attach_dev	= mtk_iommu_attach_device,
1023 		.map_pages	= mtk_iommu_map,
1024 		.unmap_pages	= mtk_iommu_unmap,
1025 		.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
1026 		.iotlb_sync	= mtk_iommu_iotlb_sync,
1027 		.iotlb_sync_map	= mtk_iommu_sync_map,
1028 		.iova_to_phys	= mtk_iommu_iova_to_phys,
1029 		.free		= mtk_iommu_domain_free,
1030 	}
1031 };
1032 
mtk_iommu_hw_init(const struct mtk_iommu_data * data,unsigned int bankid)1033 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid)
1034 {
1035 	const struct mtk_iommu_bank_data *bankx = &data->bank[bankid];
1036 	const struct mtk_iommu_bank_data *bank0 = &data->bank[0];
1037 	u32 regval;
1038 
1039 	/*
1040 	 * Global control settings are in bank0. May re-init these global registers
1041 	 * since no sure if there is bank0 consumers.
1042 	 */
1043 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) {
1044 		regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
1045 			 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
1046 	} else {
1047 		regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG);
1048 		regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
1049 	}
1050 	writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG);
1051 
1052 	if (data->enable_4GB &&
1053 	    MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
1054 		/*
1055 		 * If 4GB mode is enabled, the validate PA range is from
1056 		 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
1057 		 */
1058 		regval = F_MMU_VLD_PA_RNG(7, 4);
1059 		writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG);
1060 	}
1061 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE))
1062 		writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS);
1063 	else
1064 		writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS);
1065 
1066 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
1067 		/* write command throttling mode */
1068 		regval = readl_relaxed(bank0->base + REG_MMU_WR_LEN_CTRL);
1069 		regval &= ~F_MMU_WR_THROT_DIS_MASK;
1070 		writel_relaxed(regval, bank0->base + REG_MMU_WR_LEN_CTRL);
1071 	}
1072 
1073 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
1074 		/* The register is called STANDARD_AXI_MODE in this case */
1075 		regval = 0;
1076 	} else {
1077 		regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL);
1078 		if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE))
1079 			regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
1080 		if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
1081 			regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
1082 	}
1083 	writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL);
1084 
1085 	/* Independent settings for each bank */
1086 	regval = F_L2_MULIT_HIT_EN |
1087 		F_TABLE_WALK_FAULT_INT_EN |
1088 		F_PREETCH_FIFO_OVERFLOW_INT_EN |
1089 		F_MISS_FIFO_OVERFLOW_INT_EN |
1090 		F_PREFETCH_FIFO_ERR_INT_EN |
1091 		F_MISS_FIFO_ERR_INT_EN;
1092 	writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0);
1093 
1094 	regval = F_INT_TRANSLATION_FAULT |
1095 		F_INT_MAIN_MULTI_HIT_FAULT |
1096 		F_INT_INVALID_PA_FAULT |
1097 		F_INT_ENTRY_REPLACEMENT_FAULT |
1098 		F_INT_TLB_MISS_FAULT |
1099 		F_INT_MISS_TRANSACTION_FIFO_FAULT |
1100 		F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
1101 	writel_relaxed(regval, bankx->base + REG_MMU_INT_MAIN_CONTROL);
1102 
1103 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
1104 		regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
1105 	else
1106 		regval = lower_32_bits(data->protect_base) |
1107 			 upper_32_bits(data->protect_base);
1108 	writel_relaxed(regval, bankx->base + REG_MMU_IVRP_PADDR);
1109 
1110 	if (devm_request_irq(bankx->parent_dev, bankx->irq, mtk_iommu_isr, 0,
1111 			     dev_name(bankx->parent_dev), (void *)bankx)) {
1112 		writel_relaxed(0, bankx->base + REG_MMU_PT_BASE_ADDR);
1113 		dev_err(bankx->parent_dev, "Failed @ IRQ-%d Request\n", bankx->irq);
1114 		return -ENODEV;
1115 	}
1116 
1117 	return 0;
1118 }
1119 
1120 static const struct component_master_ops mtk_iommu_com_ops = {
1121 	.bind		= mtk_iommu_bind,
1122 	.unbind		= mtk_iommu_unbind,
1123 };
1124 
mtk_iommu_mm_dts_parse(struct device * dev,struct component_match ** match,struct mtk_iommu_data * data)1125 static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match,
1126 				  struct mtk_iommu_data *data)
1127 {
1128 	struct device_node *larbnode, *frst_avail_smicomm_node = NULL;
1129 	struct platform_device *plarbdev, *pcommdev;
1130 	struct device_link *link;
1131 	int i, larb_nr, ret;
1132 
1133 	larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL);
1134 	if (larb_nr < 0)
1135 		return larb_nr;
1136 	if (larb_nr == 0 || larb_nr > MTK_LARB_NR_MAX)
1137 		return -EINVAL;
1138 
1139 	for (i = 0; i < larb_nr; i++) {
1140 		struct device_node *smicomm_node, *smi_subcomm_node;
1141 		u32 id;
1142 
1143 		larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
1144 		if (!larbnode) {
1145 			ret = -EINVAL;
1146 			goto err_larbdev_put;
1147 		}
1148 
1149 		if (!of_device_is_available(larbnode)) {
1150 			of_node_put(larbnode);
1151 			continue;
1152 		}
1153 
1154 		ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
1155 		if (ret)/* The id is consecutive if there is no this property */
1156 			id = i;
1157 		if (id >= MTK_LARB_NR_MAX) {
1158 			of_node_put(larbnode);
1159 			ret = -EINVAL;
1160 			goto err_larbdev_put;
1161 		}
1162 
1163 		plarbdev = of_find_device_by_node(larbnode);
1164 		of_node_put(larbnode);
1165 		if (!plarbdev) {
1166 			ret = -ENODEV;
1167 			goto err_larbdev_put;
1168 		}
1169 		if (data->larb_imu[id].dev) {
1170 			platform_device_put(plarbdev);
1171 			ret = -EEXIST;
1172 			goto err_larbdev_put;
1173 		}
1174 		data->larb_imu[id].dev = &plarbdev->dev;
1175 
1176 		if (!plarbdev->dev.driver) {
1177 			ret = -EPROBE_DEFER;
1178 			goto err_larbdev_put;
1179 		}
1180 
1181 		/* Get smi-(sub)-common dev from the last larb. */
1182 		smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
1183 		if (!smi_subcomm_node) {
1184 			ret = -EINVAL;
1185 			goto err_larbdev_put;
1186 		}
1187 
1188 		/*
1189 		 * It may have two level smi-common. the node is smi-sub-common if it
1190 		 * has a new mediatek,smi property. otherwise it is smi-commmon.
1191 		 */
1192 		smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0);
1193 		if (smicomm_node)
1194 			of_node_put(smi_subcomm_node);
1195 		else
1196 			smicomm_node = smi_subcomm_node;
1197 
1198 		/*
1199 		 * All the larbs that connect to one IOMMU must connect with the same
1200 		 * smi-common.
1201 		 */
1202 		if (!frst_avail_smicomm_node) {
1203 			frst_avail_smicomm_node = smicomm_node;
1204 		} else if (frst_avail_smicomm_node != smicomm_node) {
1205 			dev_err(dev, "mediatek,smi property is not right @larb%d.", id);
1206 			of_node_put(smicomm_node);
1207 			ret = -EINVAL;
1208 			goto err_larbdev_put;
1209 		} else {
1210 			of_node_put(smicomm_node);
1211 		}
1212 
1213 		component_match_add(dev, match, component_compare_dev, &plarbdev->dev);
1214 		platform_device_put(plarbdev);
1215 	}
1216 
1217 	if (!frst_avail_smicomm_node)
1218 		return -EINVAL;
1219 
1220 	pcommdev = of_find_device_by_node(frst_avail_smicomm_node);
1221 	of_node_put(frst_avail_smicomm_node);
1222 	if (!pcommdev)
1223 		return -ENODEV;
1224 	data->smicomm_dev = &pcommdev->dev;
1225 
1226 	link = device_link_add(data->smicomm_dev, dev,
1227 			       DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1228 	platform_device_put(pcommdev);
1229 	if (!link) {
1230 		dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
1231 		return -EINVAL;
1232 	}
1233 	return 0;
1234 
1235 err_larbdev_put:
1236 	for (i = MTK_LARB_NR_MAX - 1; i >= 0; i--) {
1237 		if (!data->larb_imu[i].dev)
1238 			continue;
1239 		put_device(data->larb_imu[i].dev);
1240 	}
1241 	return ret;
1242 }
1243 
mtk_iommu_probe(struct platform_device * pdev)1244 static int mtk_iommu_probe(struct platform_device *pdev)
1245 {
1246 	struct mtk_iommu_data   *data;
1247 	struct device           *dev = &pdev->dev;
1248 	struct resource         *res;
1249 	resource_size_t		ioaddr;
1250 	struct component_match  *match = NULL;
1251 	struct regmap		*infracfg;
1252 	void                    *protect;
1253 	int                     ret, banks_num, i = 0;
1254 	u32			val;
1255 	char                    *p;
1256 	struct mtk_iommu_bank_data *bank;
1257 	void __iomem		*base;
1258 
1259 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1260 	if (!data)
1261 		return -ENOMEM;
1262 	data->dev = dev;
1263 	data->plat_data = of_device_get_match_data(dev);
1264 
1265 	/* Protect memory. HW will access here while translation fault.*/
1266 	protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN, GFP_KERNEL);
1267 	if (!protect)
1268 		return -ENOMEM;
1269 	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
1270 
1271 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
1272 		infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg");
1273 		if (IS_ERR(infracfg)) {
1274 			/*
1275 			 * Legacy devicetrees will not specify a phandle to
1276 			 * mediatek,infracfg: in that case, we use the older
1277 			 * way to retrieve a syscon to infra.
1278 			 *
1279 			 * This is for retrocompatibility purposes only, hence
1280 			 * no more compatibles shall be added to this.
1281 			 */
1282 			switch (data->plat_data->m4u_plat) {
1283 			case M4U_MT2712:
1284 				p = "mediatek,mt2712-infracfg";
1285 				break;
1286 			case M4U_MT8173:
1287 				p = "mediatek,mt8173-infracfg";
1288 				break;
1289 			default:
1290 				p = NULL;
1291 			}
1292 
1293 			infracfg = syscon_regmap_lookup_by_compatible(p);
1294 			if (IS_ERR(infracfg))
1295 				return PTR_ERR(infracfg);
1296 		}
1297 
1298 		ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
1299 		if (ret)
1300 			return ret;
1301 		data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
1302 	}
1303 
1304 	banks_num = data->plat_data->banks_num;
1305 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1306 	if (!res)
1307 		return -EINVAL;
1308 	if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) {
1309 		dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res);
1310 		return -EINVAL;
1311 	}
1312 	base = devm_ioremap_resource(dev, res);
1313 	if (IS_ERR(base))
1314 		return PTR_ERR(base);
1315 	ioaddr = res->start;
1316 
1317 	data->bank = devm_kmalloc(dev, banks_num * sizeof(*data->bank), GFP_KERNEL);
1318 	if (!data->bank)
1319 		return -ENOMEM;
1320 
1321 	do {
1322 		if (!data->plat_data->banks_enable[i])
1323 			continue;
1324 		bank = &data->bank[i];
1325 		bank->id = i;
1326 		bank->base = base + i * MTK_IOMMU_BANK_SZ;
1327 		bank->m4u_dom = NULL;
1328 
1329 		bank->irq = platform_get_irq(pdev, i);
1330 		if (bank->irq < 0)
1331 			return bank->irq;
1332 		bank->parent_dev = dev;
1333 		bank->parent_data = data;
1334 		spin_lock_init(&bank->tlb_lock);
1335 	} while (++i < banks_num);
1336 
1337 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
1338 		data->bclk = devm_clk_get(dev, "bclk");
1339 		if (IS_ERR(data->bclk))
1340 			return PTR_ERR(data->bclk);
1341 	}
1342 
1343 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) {
1344 		ret = dma_set_mask(dev, DMA_BIT_MASK(35));
1345 		if (ret) {
1346 			dev_err(dev, "Failed to set dma_mask 35.\n");
1347 			return ret;
1348 		}
1349 	}
1350 
1351 	pm_runtime_enable(dev);
1352 
1353 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
1354 		ret = mtk_iommu_mm_dts_parse(dev, &match, data);
1355 		if (ret) {
1356 			dev_err_probe(dev, ret, "mm dts parse fail\n");
1357 			goto out_runtime_disable;
1358 		}
1359 	} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
1360 		   !MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
1361 		p = data->plat_data->pericfg_comp_str;
1362 		data->pericfg = syscon_regmap_lookup_by_compatible(p);
1363 		if (IS_ERR(data->pericfg)) {
1364 			ret = PTR_ERR(data->pericfg);
1365 			goto out_runtime_disable;
1366 		}
1367 	}
1368 
1369 	platform_set_drvdata(pdev, data);
1370 	mutex_init(&data->mutex);
1371 
1372 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) {
1373 		list_add_tail(&data->list, data->plat_data->hw_list);
1374 		data->hw_list = data->plat_data->hw_list;
1375 	} else {
1376 		INIT_LIST_HEAD(&data->hw_list_head);
1377 		list_add_tail(&data->list, &data->hw_list_head);
1378 		data->hw_list = &data->hw_list_head;
1379 	}
1380 
1381 	ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
1382 				     "mtk-iommu.%pa", &ioaddr);
1383 	if (ret)
1384 		goto out_list_del;
1385 
1386 	ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
1387 	if (ret)
1388 		goto out_sysfs_remove;
1389 
1390 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
1391 		ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
1392 		if (ret)
1393 			goto out_device_unregister;
1394 	}
1395 	return ret;
1396 
1397 out_device_unregister:
1398 	iommu_device_unregister(&data->iommu);
1399 out_sysfs_remove:
1400 	iommu_device_sysfs_remove(&data->iommu);
1401 out_list_del:
1402 	list_del(&data->list);
1403 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
1404 		device_link_remove(data->smicomm_dev, dev);
1405 out_runtime_disable:
1406 	pm_runtime_disable(dev);
1407 	return ret;
1408 }
1409 
mtk_iommu_remove(struct platform_device * pdev)1410 static void mtk_iommu_remove(struct platform_device *pdev)
1411 {
1412 	struct mtk_iommu_data *data = platform_get_drvdata(pdev);
1413 	struct mtk_iommu_bank_data *bank;
1414 	int i;
1415 
1416 	iommu_device_sysfs_remove(&data->iommu);
1417 	iommu_device_unregister(&data->iommu);
1418 
1419 	list_del(&data->list);
1420 
1421 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
1422 		device_link_remove(data->smicomm_dev, &pdev->dev);
1423 		component_master_del(&pdev->dev, &mtk_iommu_com_ops);
1424 	}
1425 	pm_runtime_disable(&pdev->dev);
1426 	for (i = 0; i < data->plat_data->banks_num; i++) {
1427 		bank = &data->bank[i];
1428 		if (!bank->m4u_dom)
1429 			continue;
1430 		devm_free_irq(&pdev->dev, bank->irq, bank);
1431 	}
1432 }
1433 
mtk_iommu_runtime_suspend(struct device * dev)1434 static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev)
1435 {
1436 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
1437 	struct mtk_iommu_suspend_reg *reg = &data->reg;
1438 	void __iomem *base;
1439 	int i = 0;
1440 
1441 	base = data->bank[i].base;
1442 	reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
1443 	reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
1444 	reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
1445 	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
1446 	reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
1447 	do {
1448 		if (!data->plat_data->banks_enable[i])
1449 			continue;
1450 		base = data->bank[i].base;
1451 		reg->int_control[i] = readl_relaxed(base + REG_MMU_INT_CONTROL0);
1452 		reg->int_main_control[i] = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
1453 		reg->ivrp_paddr[i] = readl_relaxed(base + REG_MMU_IVRP_PADDR);
1454 	} while (++i < data->plat_data->banks_num);
1455 	clk_disable_unprepare(data->bclk);
1456 	return 0;
1457 }
1458 
mtk_iommu_runtime_resume(struct device * dev)1459 static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
1460 {
1461 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
1462 	struct mtk_iommu_suspend_reg *reg = &data->reg;
1463 	struct mtk_iommu_domain *m4u_dom;
1464 	void __iomem *base;
1465 	int ret, i = 0;
1466 
1467 	ret = clk_prepare_enable(data->bclk);
1468 	if (ret) {
1469 		dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
1470 		return ret;
1471 	}
1472 
1473 	/*
1474 	 * Uppon first resume, only enable the clk and return, since the values of the
1475 	 * registers are not yet set.
1476 	 */
1477 	if (!reg->wr_len_ctrl)
1478 		return 0;
1479 
1480 	base = data->bank[i].base;
1481 	writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
1482 	writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
1483 	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
1484 	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
1485 	writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
1486 	do {
1487 		m4u_dom = data->bank[i].m4u_dom;
1488 		if (!data->plat_data->banks_enable[i] || !m4u_dom)
1489 			continue;
1490 		base = data->bank[i].base;
1491 		writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
1492 		writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
1493 		writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
1494 		writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR);
1495 	} while (++i < data->plat_data->banks_num);
1496 
1497 	/*
1498 	 * Users may allocate dma buffer before they call pm_runtime_get,
1499 	 * in which case it will lack the necessary tlb flush.
1500 	 * Thus, make sure to update the tlb after each PM resume.
1501 	 */
1502 	mtk_iommu_tlb_flush_all(data);
1503 	return 0;
1504 }
1505 
1506 static const struct dev_pm_ops mtk_iommu_pm_ops = {
1507 	SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL)
1508 	SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1509 				     pm_runtime_force_resume)
1510 };
1511 
1512 static const struct mtk_iommu_plat_data mt2712_data = {
1513 	.m4u_plat     = M4U_MT2712,
1514 	.flags        = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG | SHARE_PGTABLE |
1515 			MTK_IOMMU_TYPE_MM,
1516 	.hw_list      = &m4ulist,
1517 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
1518 	.iova_region  = single_domain,
1519 	.banks_num    = 1,
1520 	.banks_enable = {true},
1521 	.iova_region_nr = ARRAY_SIZE(single_domain),
1522 	.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
1523 };
1524 
1525 static const struct mtk_iommu_plat_data mt6779_data = {
1526 	.m4u_plat      = M4U_MT6779,
1527 	.flags         = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN |
1528 			 MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN,
1529 	.inv_sel_reg   = REG_MMU_INV_SEL_GEN2,
1530 	.banks_num    = 1,
1531 	.banks_enable = {true},
1532 	.iova_region   = single_domain,
1533 	.iova_region_nr = ARRAY_SIZE(single_domain),
1534 	.larbid_remap  = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
1535 };
1536 
1537 static const struct mtk_iommu_plat_data mt6795_data = {
1538 	.m4u_plat     = M4U_MT6795,
1539 	.flags	      = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
1540 			HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
1541 			TF_PORT_TO_ADDR_MT8173,
1542 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
1543 	.banks_num    = 1,
1544 	.banks_enable = {true},
1545 	.iova_region  = single_domain,
1546 	.iova_region_nr = ARRAY_SIZE(single_domain),
1547 	.larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
1548 };
1549 
1550 static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
1551 	[0] = {~0, ~0},				/* Region0: larb0/1 */
1552 	[1] = {0, 0, 0, 0, ~0, ~0, 0, ~0},	/* Region1: larb4/5/7 */
1553 	[2] = {0, 0, ~0, 0, 0, 0, 0, 0,		/* Region2: larb2/9/11/13/14/16/17/18/19/20 */
1554 	       0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
1555 	       ~0, ~0, ~0, ~0, ~0},
1556 	[3] = {0},
1557 	[4] = {[13] = BIT(9) | BIT(10)},	/* larb13 port9/10 */
1558 	[5] = {[14] = BIT(4) | BIT(5)},		/* larb14 port4/5 */
1559 };
1560 
1561 static const struct mtk_iommu_plat_data mt6893_data = {
1562 	.m4u_plat     = M4U_MT8192,
1563 	.flags        = HAS_BCLK | OUT_ORDER_WR_EN | HAS_SUB_COMM_2BITS |
1564 			WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
1565 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN2,
1566 	.banks_num    = 1,
1567 	.banks_enable = {true},
1568 	.iova_region  = mt8192_multi_dom,
1569 	.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
1570 	.iova_region_larb_msk = mt8192_larb_region_msk,
1571 	.larbid_remap    = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
1572 			    {0, 14, 16}, {0, 13, 18, 17}},
1573 };
1574 
1575 static const struct mtk_iommu_plat_data mt8167_data = {
1576 	.m4u_plat     = M4U_MT8167,
1577 	.flags        = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
1578 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
1579 	.banks_num    = 1,
1580 	.banks_enable = {true},
1581 	.iova_region  = single_domain,
1582 	.iova_region_nr = ARRAY_SIZE(single_domain),
1583 	.larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
1584 };
1585 
1586 static const struct mtk_iommu_plat_data mt8173_data = {
1587 	.m4u_plat     = M4U_MT8173,
1588 	.flags	      = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
1589 			HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
1590 			TF_PORT_TO_ADDR_MT8173,
1591 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
1592 	.banks_num    = 1,
1593 	.banks_enable = {true},
1594 	.iova_region  = single_domain,
1595 	.iova_region_nr = ARRAY_SIZE(single_domain),
1596 	.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
1597 };
1598 
1599 static const struct mtk_iommu_plat_data mt8183_data = {
1600 	.m4u_plat     = M4U_MT8183,
1601 	.flags        = RESET_AXI | MTK_IOMMU_TYPE_MM,
1602 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
1603 	.banks_num    = 1,
1604 	.banks_enable = {true},
1605 	.iova_region  = single_domain,
1606 	.iova_region_nr = ARRAY_SIZE(single_domain),
1607 	.larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
1608 };
1609 
1610 static const unsigned int mt8186_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
1611 	[0] = {~0, ~0, ~0},			/* Region0: all ports for larb0/1/2 */
1612 	[1] = {0, 0, 0, 0, ~0, 0, 0, ~0},	/* Region1: larb4/7 */
1613 	[2] = {0, 0, 0, 0, 0, 0, 0, 0,		/* Region2: larb8/9/11/13/16/17/19/20 */
1614 	       ~0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), 0, 0,
1615 						/* larb13: the other ports except port9/10 */
1616 	       ~0, ~0, 0, ~0, ~0},
1617 	[3] = {0},
1618 	[4] = {[13] = BIT(9) | BIT(10)},	/* larb13 port9/10 */
1619 	[5] = {[14] = ~0},			/* larb14 */
1620 };
1621 
1622 static const struct mtk_iommu_plat_data mt8186_data_mm = {
1623 	.m4u_plat       = M4U_MT8186,
1624 	.flags          = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
1625 			  WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN,
1626 	.larbid_remap   = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20},
1627 			   {MTK_INVALID_LARBID, 14, 16},
1628 			   {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}},
1629 	.inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
1630 	.banks_num      = 1,
1631 	.banks_enable   = {true},
1632 	.iova_region    = mt8192_multi_dom,
1633 	.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
1634 	.iova_region_larb_msk = mt8186_larb_region_msk,
1635 };
1636 
1637 static const struct mtk_iommu_plat_data mt8188_data_infra = {
1638 	.m4u_plat         = M4U_MT8188,
1639 	.flags            = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO |
1640 			    MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT |
1641 			    PGTABLE_PA_35_EN | CFG_IFA_MASTER_IN_ATF,
1642 	.inv_sel_reg      = REG_MMU_INV_SEL_GEN2,
1643 	.banks_num        = 1,
1644 	.banks_enable     = {true},
1645 	.iova_region      = single_domain,
1646 	.iova_region_nr   = ARRAY_SIZE(single_domain),
1647 };
1648 
1649 static const u32 mt8188_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
1650 	[0] = {~0, ~0, ~0, ~0},               /* Region0: all ports for larb0/1/2/3 */
1651 	[1] = {0, 0, 0, 0, 0, 0, 0, 0,
1652 	       0, 0, 0, 0, 0, 0, 0, 0,
1653 	       0, 0, 0, 0, 0, ~0, ~0, ~0},    /* Region1: larb19(21)/21(22)/23 */
1654 	[2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0,    /* Region2: the other larbs. */
1655 	       ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
1656 	       ~0, ~0, ~0, ~0, ~0, 0, 0, 0,
1657 	       0, ~0},
1658 	[3] = {0},
1659 	[4] = {[24] = BIT(0) | BIT(1)},       /* Only larb27(24) port0/1 */
1660 	[5] = {[24] = BIT(2) | BIT(3)},       /* Only larb27(24) port2/3 */
1661 };
1662 
1663 static const struct mtk_iommu_plat_data mt8188_data_vdo = {
1664 	.m4u_plat       = M4U_MT8188,
1665 	.flags          = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
1666 			  WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
1667 			  PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
1668 	.hw_list        = &m4ulist,
1669 	.inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
1670 	.banks_num      = 1,
1671 	.banks_enable   = {true},
1672 	.iova_region    = mt8192_multi_dom,
1673 	.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
1674 	.iova_region_larb_msk = mt8188_larb_region_msk,
1675 	.larbid_remap   = {{2}, {0}, {21}, {0}, {19}, {9, 10,
1676 			   11 /* 11a */, 25 /* 11c */},
1677 			   {13, 0, 29 /* 16b */, 30 /* 17b */, 0}, {5}},
1678 };
1679 
1680 static const struct mtk_iommu_plat_data mt8188_data_vpp = {
1681 	.m4u_plat       = M4U_MT8188,
1682 	.flags          = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
1683 			  WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
1684 			  PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
1685 	.hw_list        = &m4ulist,
1686 	.inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
1687 	.banks_num      = 1,
1688 	.banks_enable   = {true},
1689 	.iova_region    = mt8192_multi_dom,
1690 	.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
1691 	.iova_region_larb_msk = mt8188_larb_region_msk,
1692 	.larbid_remap   = {{1}, {3}, {23}, {7}, {MTK_INVALID_LARBID},
1693 			   {12, 15, 24 /* 11b */}, {14, MTK_INVALID_LARBID,
1694 			   16 /* 16a */, 17 /* 17a */, MTK_INVALID_LARBID,
1695 			   27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
1696 };
1697 
1698 static const struct mtk_iommu_plat_data mt8192_data = {
1699 	.m4u_plat       = M4U_MT8192,
1700 	.flags          = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
1701 			  WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM,
1702 	.inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
1703 	.banks_num      = 1,
1704 	.banks_enable   = {true},
1705 	.iova_region    = mt8192_multi_dom,
1706 	.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
1707 	.iova_region_larb_msk = mt8192_larb_region_msk,
1708 	.larbid_remap   = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
1709 			   {0, 14, 16}, {0, 13, 18, 17}},
1710 };
1711 
1712 static const struct mtk_iommu_plat_data mt8195_data_infra = {
1713 	.m4u_plat	  = M4U_MT8195,
1714 	.flags            = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO |
1715 			    MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT,
1716 	.pericfg_comp_str = "mediatek,mt8195-pericfg_ao",
1717 	.inv_sel_reg      = REG_MMU_INV_SEL_GEN2,
1718 	.banks_num	  = 5,
1719 	.banks_enable     = {true, false, false, false, true},
1720 	.banks_portmsk    = {[0] = GENMASK(19, 16),     /* PCIe */
1721 			     [4] = GENMASK(31, 20),     /* USB */
1722 			    },
1723 	.iova_region      = single_domain,
1724 	.iova_region_nr   = ARRAY_SIZE(single_domain),
1725 };
1726 
1727 static const unsigned int mt8195_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
1728 	[0] = {~0, ~0, ~0, ~0},               /* Region0: all ports for larb0/1/2/3 */
1729 	[1] = {0, 0, 0, 0, 0, 0, 0, 0,
1730 	       0, 0, 0, 0, 0, 0, 0, 0,
1731 	       0, 0, 0, ~0, ~0, ~0, ~0, ~0,   /* Region1: larb19/20/21/22/23/24 */
1732 	       ~0},
1733 	[2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0,    /* Region2: the other larbs. */
1734 	       ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
1735 	       ~0, ~0, 0, 0, 0, 0, 0, 0,
1736 	       0, ~0, ~0, ~0, ~0},
1737 	[3] = {0},
1738 	[4] = {[18] = BIT(0) | BIT(1)},       /* Only larb18 port0/1 */
1739 	[5] = {[18] = BIT(2) | BIT(3)},       /* Only larb18 port2/3 */
1740 };
1741 
1742 static const struct mtk_iommu_plat_data mt8195_data_vdo = {
1743 	.m4u_plat	= M4U_MT8195,
1744 	.flags          = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
1745 			  WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
1746 	.hw_list        = &m4ulist,
1747 	.inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
1748 	.banks_num      = 1,
1749 	.banks_enable   = {true},
1750 	.iova_region	= mt8192_multi_dom,
1751 	.iova_region_nr	= ARRAY_SIZE(mt8192_multi_dom),
1752 	.iova_region_larb_msk = mt8195_larb_region_msk,
1753 	.larbid_remap   = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11},
1754 			   {13, 17, 15/* 17b */, 25}, {5}},
1755 };
1756 
1757 static const struct mtk_iommu_plat_data mt8195_data_vpp = {
1758 	.m4u_plat	= M4U_MT8195,
1759 	.flags          = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
1760 			  WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
1761 	.hw_list        = &m4ulist,
1762 	.inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
1763 	.banks_num      = 1,
1764 	.banks_enable   = {true},
1765 	.iova_region	= mt8192_multi_dom,
1766 	.iova_region_nr	= ARRAY_SIZE(mt8192_multi_dom),
1767 	.iova_region_larb_msk = mt8195_larb_region_msk,
1768 	.larbid_remap   = {{1}, {3},
1769 			   {22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23},
1770 			   {8}, {20}, {12},
1771 			   /* 16: 16a; 29: 16b; 30: CCUtop0; 31: CCUtop1 */
1772 			   {14, 16, 29, 26, 30, 31, 18},
1773 			   {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}},
1774 };
1775 
1776 static const struct mtk_iommu_plat_data mt8365_data = {
1777 	.m4u_plat	= M4U_MT8365,
1778 	.flags		= RESET_AXI | INT_ID_PORT_WIDTH_6,
1779 	.inv_sel_reg	= REG_MMU_INV_SEL_GEN1,
1780 	.banks_num	= 1,
1781 	.banks_enable	= {true},
1782 	.iova_region	= single_domain,
1783 	.iova_region_nr	= ARRAY_SIZE(single_domain),
1784 	.larbid_remap	= {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
1785 };
1786 
1787 static const struct of_device_id mtk_iommu_of_ids[] = {
1788 	{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
1789 	{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
1790 	{ .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
1791 	{ .compatible = "mediatek,mt6893-iommu-mm", .data = &mt6893_data},
1792 	{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
1793 	{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
1794 	{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
1795 	{ .compatible = "mediatek,mt8186-iommu-mm",    .data = &mt8186_data_mm}, /* mm: m4u */
1796 	{ .compatible = "mediatek,mt8188-iommu-infra", .data = &mt8188_data_infra},
1797 	{ .compatible = "mediatek,mt8188-iommu-vdo",   .data = &mt8188_data_vdo},
1798 	{ .compatible = "mediatek,mt8188-iommu-vpp",   .data = &mt8188_data_vpp},
1799 	{ .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
1800 	{ .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
1801 	{ .compatible = "mediatek,mt8195-iommu-vdo",   .data = &mt8195_data_vdo},
1802 	{ .compatible = "mediatek,mt8195-iommu-vpp",   .data = &mt8195_data_vpp},
1803 	{ .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
1804 	{}
1805 };
1806 MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
1807 
1808 static struct platform_driver mtk_iommu_driver = {
1809 	.probe	= mtk_iommu_probe,
1810 	.remove = mtk_iommu_remove,
1811 	.driver	= {
1812 		.name = "mtk-iommu",
1813 		.of_match_table = mtk_iommu_of_ids,
1814 		.pm = &mtk_iommu_pm_ops,
1815 	}
1816 };
1817 module_platform_driver(mtk_iommu_driver);
1818 
1819 MODULE_DESCRIPTION("IOMMU API for MediaTek M4U implementations");
1820 MODULE_LICENSE("GPL v2");
1821