xref: /linux/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c (revision 115c0cc25130c59025cab8f8a5f1a3a97b1e16e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
3 
4 #define dev_fmt(fmt) "tegra241_cmdqv: " fmt
5 
6 #include <linux/acpi.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/iommu.h>
11 #include <linux/iopoll.h>
12 
13 #include <acpi/acpixf.h>
14 
15 #include "arm-smmu-v3.h"
16 
17 /* CMDQV register page base and size defines */
18 #define TEGRA241_CMDQV_CONFIG_BASE	(0)
19 #define TEGRA241_CMDQV_CONFIG_SIZE	(SZ_64K)
20 #define TEGRA241_VCMDQ_PAGE0_BASE	(TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
21 #define TEGRA241_VCMDQ_PAGE1_BASE	(TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
22 #define TEGRA241_VINTF_PAGE_BASE	(TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
23 
24 /* CMDQV global base regs */
25 #define TEGRA241_CMDQV_CONFIG		0x0000
26 #define  CMDQV_EN			BIT(0)
27 
28 #define TEGRA241_CMDQV_PARAM		0x0004
29 #define  CMDQV_NUM_VINTF_LOG2		GENMASK(11, 8)
30 #define  CMDQV_NUM_VCMDQ_LOG2		GENMASK(7, 4)
31 
32 #define TEGRA241_CMDQV_STATUS		0x0008
33 #define  CMDQV_ENABLED			BIT(0)
34 
35 #define TEGRA241_CMDQV_VINTF_ERR_MAP	0x0014
36 #define TEGRA241_CMDQV_VINTF_INT_MASK	0x001C
37 #define TEGRA241_CMDQV_CMDQ_ERR_MAP(m)  (0x0024 + 0x4*(m))
38 
39 #define TEGRA241_CMDQV_CMDQ_ALLOC(q)	(0x0200 + 0x4*(q))
40 #define  CMDQV_CMDQ_ALLOC_VINTF		GENMASK(20, 15)
41 #define  CMDQV_CMDQ_ALLOC_LVCMDQ	GENMASK(7, 1)
42 #define  CMDQV_CMDQ_ALLOCATED		BIT(0)
43 
44 /* VINTF base regs */
45 #define TEGRA241_VINTF(v)		(0x1000 + 0x100*(v))
46 
47 #define TEGRA241_VINTF_CONFIG		0x0000
48 #define  VINTF_HYP_OWN			BIT(17)
49 #define  VINTF_VMID			GENMASK(16, 1)
50 #define  VINTF_EN			BIT(0)
51 
52 #define TEGRA241_VINTF_STATUS		0x0004
53 #define  VINTF_STATUS			GENMASK(3, 1)
54 #define  VINTF_ENABLED			BIT(0)
55 
56 #define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
57 					(0x00C0 + 0x8*(m))
58 #define  LVCMDQ_ERR_MAP_NUM_64		2
59 
60 /* VCMDQ base regs */
61 /* -- PAGE0 -- */
62 #define TEGRA241_VCMDQ_PAGE0(q)		(TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
63 
64 #define TEGRA241_VCMDQ_CONS		0x00000
65 #define  VCMDQ_CONS_ERR			GENMASK(30, 24)
66 
67 #define TEGRA241_VCMDQ_PROD		0x00004
68 
69 #define TEGRA241_VCMDQ_CONFIG		0x00008
70 #define  VCMDQ_EN			BIT(0)
71 
72 #define TEGRA241_VCMDQ_STATUS		0x0000C
73 #define  VCMDQ_ENABLED			BIT(0)
74 
75 #define TEGRA241_VCMDQ_GERROR		0x00010
76 #define TEGRA241_VCMDQ_GERRORN		0x00014
77 
78 /* -- PAGE1 -- */
79 #define TEGRA241_VCMDQ_PAGE1(q)		(TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
80 #define  VCMDQ_ADDR			GENMASK(47, 5)
81 #define  VCMDQ_LOG2SIZE			GENMASK(4, 0)
82 #define  VCMDQ_LOG2SIZE_MAX		19
83 
84 #define TEGRA241_VCMDQ_BASE		0x00000
85 #define TEGRA241_VCMDQ_CONS_INDX_BASE	0x00008
86 
87 /* VINTF logical-VCMDQ pages */
88 #define TEGRA241_VINTFi_PAGE0(i)	(TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
89 #define TEGRA241_VINTFi_PAGE1(i)	(TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
90 #define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
91 					(TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
92 #define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
93 					(TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
94 
95 /* MMIO helpers */
96 #define REG_CMDQV(_cmdqv, _regname) \
97 	((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
98 #define REG_VINTF(_vintf, _regname) \
99 	((_vintf)->base + TEGRA241_VINTF_##_regname)
100 #define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
101 	((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
102 #define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
103 	((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
104 
105 
106 static bool disable_cmdqv;
107 module_param(disable_cmdqv, bool, 0444);
108 MODULE_PARM_DESC(disable_cmdqv,
109 	"This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
110 
111 static bool bypass_vcmdq;
112 module_param(bypass_vcmdq, bool, 0444);
113 MODULE_PARM_DESC(bypass_vcmdq,
114 	"This allows to bypass VCMDQ for debugging use or perf comparison.");
115 
116 /**
117  * struct tegra241_vcmdq - Virtual Command Queue
118  * @idx: Global index in the CMDQV
119  * @lidx: Local index in the VINTF
120  * @enabled: Enable status
121  * @cmdqv: Parent CMDQV pointer
122  * @vintf: Parent VINTF pointer
123  * @cmdq: Command Queue struct
124  * @page0: MMIO Page0 base address
125  * @page1: MMIO Page1 base address
126  */
127 struct tegra241_vcmdq {
128 	u16 idx;
129 	u16 lidx;
130 
131 	bool enabled;
132 
133 	struct tegra241_cmdqv *cmdqv;
134 	struct tegra241_vintf *vintf;
135 	struct arm_smmu_cmdq cmdq;
136 
137 	void __iomem *page0;
138 	void __iomem *page1;
139 };
140 
141 /**
142  * struct tegra241_vintf - Virtual Interface
143  * @idx: Global index in the CMDQV
144  * @enabled: Enable status
145  * @hyp_own: Owned by hypervisor (in-kernel)
146  * @cmdqv: Parent CMDQV pointer
147  * @lvcmdqs: List of logical VCMDQ pointers
148  * @base: MMIO base address
149  */
150 struct tegra241_vintf {
151 	u16 idx;
152 
153 	bool enabled;
154 	bool hyp_own;
155 
156 	struct tegra241_cmdqv *cmdqv;
157 	struct tegra241_vcmdq **lvcmdqs;
158 
159 	void __iomem *base;
160 };
161 
162 /**
163  * struct tegra241_cmdqv - CMDQ-V for SMMUv3
164  * @smmu: SMMUv3 device
165  * @dev: CMDQV device
166  * @base: MMIO base address
167  * @irq: IRQ number
168  * @num_vintfs: Total number of VINTFs
169  * @num_vcmdqs: Total number of VCMDQs
170  * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
171  * @vintf_ids: VINTF id allocator
172  * @vintfs: List of VINTFs
173  */
174 struct tegra241_cmdqv {
175 	struct arm_smmu_device smmu;
176 	struct device *dev;
177 
178 	void __iomem *base;
179 	int irq;
180 
181 	/* CMDQV Hardware Params */
182 	u16 num_vintfs;
183 	u16 num_vcmdqs;
184 	u16 num_lvcmdqs_per_vintf;
185 
186 	struct ida vintf_ids;
187 
188 	struct tegra241_vintf **vintfs;
189 };
190 
191 /* Config and Polling Helpers */
192 
tegra241_cmdqv_write_config(struct tegra241_cmdqv * cmdqv,void __iomem * addr_config,void __iomem * addr_status,u32 regval,const char * header,bool * out_enabled)193 static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv,
194 					      void __iomem *addr_config,
195 					      void __iomem *addr_status,
196 					      u32 regval, const char *header,
197 					      bool *out_enabled)
198 {
199 	bool en = regval & BIT(0);
200 	int ret;
201 
202 	writel(regval, addr_config);
203 	ret = readl_poll_timeout(addr_status, regval,
204 				 en ? regval & BIT(0) : !(regval & BIT(0)),
205 				 1, ARM_SMMU_POLL_TIMEOUT_US);
206 	if (ret)
207 		dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n",
208 			header, en ? "en" : "dis", regval);
209 	if (out_enabled)
210 		WRITE_ONCE(*out_enabled, regval & BIT(0));
211 	return ret;
212 }
213 
cmdqv_write_config(struct tegra241_cmdqv * cmdqv,u32 regval)214 static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval)
215 {
216 	return tegra241_cmdqv_write_config(cmdqv,
217 					   REG_CMDQV(cmdqv, CONFIG),
218 					   REG_CMDQV(cmdqv, STATUS),
219 					   regval, "CMDQV: ", NULL);
220 }
221 
vintf_write_config(struct tegra241_vintf * vintf,u32 regval)222 static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval)
223 {
224 	char header[16];
225 
226 	snprintf(header, 16, "VINTF%u: ", vintf->idx);
227 	return tegra241_cmdqv_write_config(vintf->cmdqv,
228 					   REG_VINTF(vintf, CONFIG),
229 					   REG_VINTF(vintf, STATUS),
230 					   regval, header, &vintf->enabled);
231 }
232 
lvcmdq_error_header(struct tegra241_vcmdq * vcmdq,char * header,int hlen)233 static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq,
234 					char *header, int hlen)
235 {
236 	WARN_ON(hlen < 64);
237 	if (WARN_ON(!vcmdq->vintf))
238 		return "";
239 	snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
240 		 vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx);
241 	return header;
242 }
243 
vcmdq_write_config(struct tegra241_vcmdq * vcmdq,u32 regval)244 static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
245 {
246 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
247 
248 	return tegra241_cmdqv_write_config(vcmdq->cmdqv,
249 					   REG_VCMDQ_PAGE0(vcmdq, CONFIG),
250 					   REG_VCMDQ_PAGE0(vcmdq, STATUS),
251 					   regval, h, &vcmdq->enabled);
252 }
253 
254 /* ISR Functions */
255 
tegra241_vintf0_handle_error(struct tegra241_vintf * vintf)256 static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
257 {
258 	int i;
259 
260 	for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
261 		u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
262 
263 		while (map) {
264 			unsigned long lidx = __ffs64(map);
265 			struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
266 			u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
267 
268 			__arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq);
269 			writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
270 			map &= ~BIT_ULL(lidx);
271 		}
272 	}
273 }
274 
tegra241_cmdqv_isr(int irq,void * devid)275 static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
276 {
277 	struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid;
278 	void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP);
279 	char err_str[256];
280 	u64 vintf_map;
281 
282 	/* Use readl_relaxed() as register addresses are not 64-bit aligned */
283 	vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
284 		    (u64)readl_relaxed(reg_vintf_map);
285 
286 	snprintf(err_str, sizeof(err_str),
287 		 "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map,
288 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))),
289 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))),
290 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))),
291 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0))));
292 
293 	dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str);
294 
295 	/* Handle VINTF0 and its LVCMDQs */
296 	if (vintf_map & BIT_ULL(0)) {
297 		tegra241_vintf0_handle_error(cmdqv->vintfs[0]);
298 		vintf_map &= ~BIT_ULL(0);
299 	}
300 
301 	return IRQ_HANDLED;
302 }
303 
304 /* Command Queue Function */
305 
tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent * ent)306 static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
307 {
308 	switch (ent->opcode) {
309 	case CMDQ_OP_TLBI_NH_ASID:
310 	case CMDQ_OP_TLBI_NH_VA:
311 	case CMDQ_OP_ATC_INV:
312 		return true;
313 	default:
314 		return false;
315 	}
316 }
317 
318 static struct arm_smmu_cmdq *
tegra241_cmdqv_get_cmdq(struct arm_smmu_device * smmu,struct arm_smmu_cmdq_ent * ent)319 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
320 			struct arm_smmu_cmdq_ent *ent)
321 {
322 	struct tegra241_cmdqv *cmdqv =
323 		container_of(smmu, struct tegra241_cmdqv, smmu);
324 	struct tegra241_vintf *vintf = cmdqv->vintfs[0];
325 	struct tegra241_vcmdq *vcmdq;
326 	u16 lidx;
327 
328 	if (READ_ONCE(bypass_vcmdq))
329 		return NULL;
330 
331 	/* Use SMMU CMDQ if VINTF0 is uninitialized */
332 	if (!READ_ONCE(vintf->enabled))
333 		return NULL;
334 
335 	/*
336 	 * Select a LVCMDQ to use. Here we use a temporal solution to
337 	 * balance out traffic on cmdq issuing: each cmdq has its own
338 	 * lock, if all cpus issue cmdlist using the same cmdq, only
339 	 * one CPU at a time can enter the process, while the others
340 	 * will be spinning at the same lock.
341 	 */
342 	lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
343 	vcmdq = vintf->lvcmdqs[lidx];
344 	if (!vcmdq || !READ_ONCE(vcmdq->enabled))
345 		return NULL;
346 
347 	/* Unsupported CMD goes for smmu->cmdq pathway */
348 	if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
349 		return NULL;
350 	return &vcmdq->cmdq;
351 }
352 
353 /* HW Reset Functions */
354 
tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq * vcmdq)355 static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
356 {
357 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
358 	u32 gerrorn, gerror;
359 
360 	if (vcmdq_write_config(vcmdq, 0)) {
361 		dev_err(vcmdq->cmdqv->dev,
362 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
363 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
364 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
365 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
366 	}
367 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
368 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
369 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
370 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE));
371 
372 	gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN));
373 	gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
374 	if (gerror != gerrorn) {
375 		dev_warn(vcmdq->cmdqv->dev,
376 			 "%suncleared error detected, resetting\n", h);
377 		writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
378 	}
379 
380 	dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
381 }
382 
tegra241_vcmdq_hw_init(struct tegra241_vcmdq * vcmdq)383 static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
384 {
385 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
386 	int ret;
387 
388 	/* Reset VCMDQ */
389 	tegra241_vcmdq_hw_deinit(vcmdq);
390 
391 	/* Configure and enable VCMDQ */
392 	writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
393 
394 	ret = vcmdq_write_config(vcmdq, VCMDQ_EN);
395 	if (ret) {
396 		dev_err(vcmdq->cmdqv->dev,
397 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
398 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
399 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
400 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
401 		return ret;
402 	}
403 
404 	dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h);
405 	return 0;
406 }
407 
tegra241_vintf_hw_deinit(struct tegra241_vintf * vintf)408 static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
409 {
410 	u16 lidx;
411 
412 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
413 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx])
414 			tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
415 	vintf_write_config(vintf, 0);
416 }
417 
tegra241_vintf_hw_init(struct tegra241_vintf * vintf,bool hyp_own)418 static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
419 {
420 	u32 regval;
421 	u16 lidx;
422 	int ret;
423 
424 	/* Reset VINTF */
425 	tegra241_vintf_hw_deinit(vintf);
426 
427 	/* Configure and enable VINTF */
428 	/*
429 	 * Note that HYP_OWN bit is wired to zero when running in guest kernel,
430 	 * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
431 	 * restricted set of supported commands.
432 	 */
433 	regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
434 	writel(regval, REG_VINTF(vintf, CONFIG));
435 
436 	ret = vintf_write_config(vintf, regval | VINTF_EN);
437 	if (ret)
438 		return ret;
439 	/*
440 	 * As being mentioned above, HYP_OWN bit is wired to zero for a guest
441 	 * kernel, so read it back from HW to ensure that reflects in hyp_own
442 	 */
443 	vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
444 
445 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
446 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
447 			ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
448 			if (ret) {
449 				tegra241_vintf_hw_deinit(vintf);
450 				return ret;
451 			}
452 		}
453 	}
454 
455 	return 0;
456 }
457 
tegra241_cmdqv_hw_reset(struct arm_smmu_device * smmu)458 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
459 {
460 	struct tegra241_cmdqv *cmdqv =
461 		container_of(smmu, struct tegra241_cmdqv, smmu);
462 	u16 qidx, lidx, idx;
463 	u32 regval;
464 	int ret;
465 
466 	/* Reset CMDQV */
467 	regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG));
468 	ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN);
469 	if (ret)
470 		return ret;
471 	ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN);
472 	if (ret)
473 		return ret;
474 
475 	/* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
476 	for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) {
477 		for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
478 			regval  = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
479 			regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
480 			regval |= CMDQV_CMDQ_ALLOCATED;
481 			writel_relaxed(regval,
482 				       REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
483 		}
484 	}
485 
486 	return tegra241_vintf_hw_init(cmdqv->vintfs[0], true);
487 }
488 
489 /* VCMDQ Resource Helpers */
490 
tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq * vcmdq)491 static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
492 {
493 	struct arm_smmu_queue *q = &vcmdq->cmdq.q;
494 	size_t nents = 1 << q->llq.max_n_shift;
495 	size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
496 
497 	if (!q->base)
498 		return;
499 	dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
500 }
501 
tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq * vcmdq)502 static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
503 {
504 	struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
505 	struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
506 	struct arm_smmu_queue *q = &cmdq->q;
507 	char name[16];
508 	int ret;
509 
510 	snprintf(name, 16, "vcmdq%u", vcmdq->idx);
511 
512 	/* Queue size, capped to ensure natural alignment */
513 	q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX);
514 
515 	/* Use the common helper to init the VCMDQ, and then... */
516 	ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
517 				      TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
518 				      CMDQ_ENT_DWORDS, name);
519 	if (ret)
520 		return ret;
521 
522 	/* ...override q_base to write VCMDQ_BASE registers */
523 	q->q_base = q->base_dma & VCMDQ_ADDR;
524 	q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
525 
526 	if (!vcmdq->vintf->hyp_own)
527 		cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
528 
529 	return arm_smmu_cmdq_init(smmu, cmdq);
530 }
531 
532 /* VINTF Logical VCMDQ Resource Helpers */
533 
tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)534 static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
535 {
536 	vintf->lvcmdqs[lidx] = NULL;
537 }
538 
tegra241_vintf_init_lvcmdq(struct tegra241_vintf * vintf,u16 lidx,struct tegra241_vcmdq * vcmdq)539 static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx,
540 				      struct tegra241_vcmdq *vcmdq)
541 {
542 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
543 	u16 idx = vintf->idx;
544 
545 	vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx;
546 	vcmdq->lidx = lidx;
547 	vcmdq->cmdqv = cmdqv;
548 	vcmdq->vintf = vintf;
549 	vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx);
550 	vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx);
551 
552 	vintf->lvcmdqs[lidx] = vcmdq;
553 	return 0;
554 }
555 
tegra241_vintf_free_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)556 static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
557 {
558 	struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
559 	char header[64];
560 
561 	tegra241_vcmdq_free_smmu_cmdq(vcmdq);
562 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
563 
564 	dev_dbg(vintf->cmdqv->dev,
565 		"%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
566 	kfree(vcmdq);
567 }
568 
569 static struct tegra241_vcmdq *
tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)570 tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
571 {
572 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
573 	struct tegra241_vcmdq *vcmdq;
574 	char header[64];
575 	int ret;
576 
577 	vcmdq = kzalloc(sizeof(*vcmdq), GFP_KERNEL);
578 	if (!vcmdq)
579 		return ERR_PTR(-ENOMEM);
580 
581 	ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
582 	if (ret)
583 		goto free_vcmdq;
584 
585 	/* Build an arm_smmu_cmdq for each LVCMDQ */
586 	ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq);
587 	if (ret)
588 		goto deinit_lvcmdq;
589 
590 	dev_dbg(cmdqv->dev,
591 		"%sallocated\n", lvcmdq_error_header(vcmdq, header, 64));
592 	return vcmdq;
593 
594 deinit_lvcmdq:
595 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
596 free_vcmdq:
597 	kfree(vcmdq);
598 	return ERR_PTR(ret);
599 }
600 
601 /* VINTF Resource Helpers */
602 
tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)603 static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
604 {
605 	kfree(cmdqv->vintfs[idx]->lvcmdqs);
606 	ida_free(&cmdqv->vintf_ids, idx);
607 	cmdqv->vintfs[idx] = NULL;
608 }
609 
tegra241_cmdqv_init_vintf(struct tegra241_cmdqv * cmdqv,u16 max_idx,struct tegra241_vintf * vintf)610 static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
611 				     struct tegra241_vintf *vintf)
612 {
613 
614 	u16 idx;
615 	int ret;
616 
617 	ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL);
618 	if (ret < 0)
619 		return ret;
620 	idx = ret;
621 
622 	vintf->idx = idx;
623 	vintf->cmdqv = cmdqv;
624 	vintf->base = cmdqv->base + TEGRA241_VINTF(idx);
625 
626 	vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf,
627 				 sizeof(*vintf->lvcmdqs), GFP_KERNEL);
628 	if (!vintf->lvcmdqs) {
629 		ida_free(&cmdqv->vintf_ids, idx);
630 		return -ENOMEM;
631 	}
632 
633 	cmdqv->vintfs[idx] = vintf;
634 	return ret;
635 }
636 
637 /* Remove Helpers */
638 
tegra241_vintf_remove_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)639 static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
640 {
641 	tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
642 	tegra241_vintf_free_lvcmdq(vintf, lidx);
643 }
644 
tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)645 static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
646 {
647 	struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
648 	u16 lidx;
649 
650 	/* Remove LVCMDQ resources */
651 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
652 		if (vintf->lvcmdqs[lidx])
653 			tegra241_vintf_remove_lvcmdq(vintf, lidx);
654 
655 	/* Remove VINTF resources */
656 	tegra241_vintf_hw_deinit(vintf);
657 
658 	dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
659 	tegra241_cmdqv_deinit_vintf(cmdqv, idx);
660 	kfree(vintf);
661 }
662 
tegra241_cmdqv_remove(struct arm_smmu_device * smmu)663 static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
664 {
665 	struct tegra241_cmdqv *cmdqv =
666 		container_of(smmu, struct tegra241_cmdqv, smmu);
667 	u16 idx;
668 
669 	/* Remove VINTF resources */
670 	for (idx = 0; idx < cmdqv->num_vintfs; idx++) {
671 		if (cmdqv->vintfs[idx]) {
672 			/* Only vintf0 should remain at this stage */
673 			WARN_ON(idx > 0);
674 			tegra241_cmdqv_remove_vintf(cmdqv, idx);
675 		}
676 	}
677 
678 	/* Remove cmdqv resources */
679 	ida_destroy(&cmdqv->vintf_ids);
680 
681 	if (cmdqv->irq > 0)
682 		free_irq(cmdqv->irq, cmdqv);
683 	iounmap(cmdqv->base);
684 	kfree(cmdqv->vintfs);
685 	put_device(cmdqv->dev); /* smmu->impl_dev */
686 }
687 
688 static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
689 	.get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
690 	.device_reset = tegra241_cmdqv_hw_reset,
691 	.device_remove = tegra241_cmdqv_remove,
692 };
693 
694 /* Probe Functions */
695 
tegra241_cmdqv_acpi_is_memory(struct acpi_resource * res,void * data)696 static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource *res, void *data)
697 {
698 	struct resource_win win;
699 
700 	return !acpi_dev_resource_address_space(res, &win);
701 }
702 
tegra241_cmdqv_acpi_get_irqs(struct acpi_resource * ares,void * data)703 static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource *ares, void *data)
704 {
705 	struct resource r;
706 	int *irq = data;
707 
708 	if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
709 		*irq = r.start;
710 	return 1; /* No need to add resource to the list */
711 }
712 
713 static struct resource *
tegra241_cmdqv_find_acpi_resource(struct device * dev,int * irq)714 tegra241_cmdqv_find_acpi_resource(struct device *dev, int *irq)
715 {
716 	struct acpi_device *adev = to_acpi_device(dev);
717 	struct list_head resource_list;
718 	struct resource_entry *rentry;
719 	struct resource *res = NULL;
720 	int ret;
721 
722 	INIT_LIST_HEAD(&resource_list);
723 	ret = acpi_dev_get_resources(adev, &resource_list,
724 				     tegra241_cmdqv_acpi_is_memory, NULL);
725 	if (ret < 0) {
726 		dev_err(dev, "failed to get memory resource: %d\n", ret);
727 		return NULL;
728 	}
729 
730 	rentry = list_first_entry_or_null(&resource_list,
731 					  struct resource_entry, node);
732 	if (!rentry) {
733 		dev_err(dev, "failed to get memory resource entry\n");
734 		goto free_list;
735 	}
736 
737 	/* Caller must free the res */
738 	res = kzalloc(sizeof(*res), GFP_KERNEL);
739 	if (!res)
740 		goto free_list;
741 
742 	*res = *rentry->res;
743 
744 	acpi_dev_free_resource_list(&resource_list);
745 
746 	INIT_LIST_HEAD(&resource_list);
747 
748 	if (irq)
749 		ret = acpi_dev_get_resources(adev, &resource_list,
750 					     tegra241_cmdqv_acpi_get_irqs, irq);
751 	if (ret < 0 || !irq || *irq <= 0)
752 		dev_warn(dev, "no interrupt. errors will not be reported\n");
753 
754 free_list:
755 	acpi_dev_free_resource_list(&resource_list);
756 	return res;
757 }
758 
tegra241_cmdqv_init_structures(struct arm_smmu_device * smmu)759 static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
760 {
761 	struct tegra241_cmdqv *cmdqv =
762 		container_of(smmu, struct tegra241_cmdqv, smmu);
763 	struct tegra241_vintf *vintf;
764 	int lidx;
765 	int ret;
766 
767 	vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
768 	if (!vintf)
769 		goto out_fallback;
770 
771 	/* Init VINTF0 for in-kernel use */
772 	ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
773 	if (ret) {
774 		dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
775 		goto free_vintf;
776 	}
777 
778 	/* Preallocate logical VCMDQs to VINTF0 */
779 	for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
780 		struct tegra241_vcmdq *vcmdq;
781 
782 		vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
783 		if (IS_ERR(vcmdq))
784 			goto free_lvcmdq;
785 	}
786 
787 	/* Now, we are ready to run all the impl ops */
788 	smmu->impl_ops = &tegra241_cmdqv_impl_ops;
789 	return 0;
790 
791 free_lvcmdq:
792 	for (lidx--; lidx >= 0; lidx--)
793 		tegra241_vintf_free_lvcmdq(vintf, lidx);
794 	tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
795 free_vintf:
796 	kfree(vintf);
797 out_fallback:
798 	dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
799 	smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
800 	tegra241_cmdqv_remove(smmu);
801 	return 0;
802 }
803 
804 #ifdef CONFIG_IOMMU_DEBUGFS
805 static struct dentry *cmdqv_debugfs_dir;
806 #endif
807 
808 static struct arm_smmu_device *
__tegra241_cmdqv_probe(struct arm_smmu_device * smmu,struct resource * res,int irq)809 __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
810 		       int irq)
811 {
812 	static const struct arm_smmu_impl_ops init_ops = {
813 		.init_structures = tegra241_cmdqv_init_structures,
814 		.device_remove = tegra241_cmdqv_remove,
815 	};
816 	struct tegra241_cmdqv *cmdqv = NULL;
817 	struct arm_smmu_device *new_smmu;
818 	void __iomem *base;
819 	u32 regval;
820 	int ret;
821 
822 	static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0);
823 
824 	base = ioremap(res->start, resource_size(res));
825 	if (!base) {
826 		dev_err(smmu->dev, "failed to ioremap\n");
827 		return NULL;
828 	}
829 
830 	regval = readl(base + TEGRA241_CMDQV_CONFIG);
831 	if (disable_cmdqv) {
832 		dev_info(smmu->dev, "Detected disable_cmdqv=true\n");
833 		writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG);
834 		goto iounmap;
835 	}
836 
837 	cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL);
838 	if (!cmdqv)
839 		goto iounmap;
840 	new_smmu = &cmdqv->smmu;
841 
842 	cmdqv->irq = irq;
843 	cmdqv->base = base;
844 	cmdqv->dev = smmu->impl_dev;
845 
846 	if (cmdqv->irq > 0) {
847 		ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv",
848 				  cmdqv);
849 		if (ret) {
850 			dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
851 				cmdqv->irq, ret);
852 			goto iounmap;
853 		}
854 	}
855 
856 	regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
857 	cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
858 	cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
859 	cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
860 
861 	cmdqv->vintfs =
862 		kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL);
863 	if (!cmdqv->vintfs)
864 		goto free_irq;
865 
866 	ida_init(&cmdqv->vintf_ids);
867 
868 #ifdef CONFIG_IOMMU_DEBUGFS
869 	if (!cmdqv_debugfs_dir) {
870 		cmdqv_debugfs_dir =
871 			debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir);
872 		debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir,
873 				    &bypass_vcmdq);
874 	}
875 #endif
876 
877 	/* Provide init-level ops only, until tegra241_cmdqv_init_structures */
878 	new_smmu->impl_ops = &init_ops;
879 
880 	return new_smmu;
881 
882 free_irq:
883 	if (cmdqv->irq > 0)
884 		free_irq(cmdqv->irq, cmdqv);
885 iounmap:
886 	iounmap(base);
887 	return NULL;
888 }
889 
tegra241_cmdqv_probe(struct arm_smmu_device * smmu)890 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
891 {
892 	struct arm_smmu_device *new_smmu;
893 	struct resource *res = NULL;
894 	int irq;
895 
896 	if (!smmu->dev->of_node)
897 		res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq);
898 	if (!res)
899 		goto out_fallback;
900 
901 	new_smmu = __tegra241_cmdqv_probe(smmu, res, irq);
902 	kfree(res);
903 
904 	if (new_smmu)
905 		return new_smmu;
906 
907 out_fallback:
908 	dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
909 	smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
910 	put_device(smmu->impl_dev);
911 	return ERR_PTR(-ENODEV);
912 }
913