xref: /linux/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c (revision 803e41f36d227022ab9bbe780c82283fd4713b2e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
3 
4 #define dev_fmt(fmt) "tegra241_cmdqv: " fmt
5 
6 #include <linux/debugfs.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/iommu.h>
10 #include <linux/iommufd.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_device.h>
13 #include <uapi/linux/iommufd.h>
14 
15 #include "arm-smmu-v3.h"
16 
17 /* CMDQV register page base and size defines */
18 #define TEGRA241_CMDQV_CONFIG_BASE	(0)
19 #define TEGRA241_CMDQV_CONFIG_SIZE	(SZ_64K)
20 #define TEGRA241_VCMDQ_PAGE0_BASE	(TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
21 #define TEGRA241_VCMDQ_PAGE1_BASE	(TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
22 #define TEGRA241_VINTF_PAGE_BASE	(TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
23 
24 /* CMDQV global base regs */
25 #define TEGRA241_CMDQV_CONFIG		0x0000
26 #define  CMDQV_EN			BIT(0)
27 
28 #define TEGRA241_CMDQV_PARAM		0x0004
29 #define  CMDQV_NUM_SID_PER_VM_LOG2	GENMASK(15, 12)
30 #define  CMDQV_NUM_VINTF_LOG2		GENMASK(11, 8)
31 #define  CMDQV_NUM_VCMDQ_LOG2		GENMASK(7, 4)
32 #define  CMDQV_VER			GENMASK(3, 0)
33 
34 #define TEGRA241_CMDQV_STATUS		0x0008
35 #define  CMDQV_ENABLED			BIT(0)
36 
37 #define TEGRA241_CMDQV_VINTF_ERR_MAP	0x0014
38 #define TEGRA241_CMDQV_VINTF_INT_MASK	0x001C
39 #define TEGRA241_CMDQV_CMDQ_ERR_MAP(m)  (0x0024 + 0x4*(m))
40 
41 #define TEGRA241_CMDQV_CMDQ_ALLOC(q)	(0x0200 + 0x4*(q))
42 #define  CMDQV_CMDQ_ALLOC_VINTF		GENMASK(20, 15)
43 #define  CMDQV_CMDQ_ALLOC_LVCMDQ	GENMASK(7, 1)
44 #define  CMDQV_CMDQ_ALLOCATED		BIT(0)
45 
46 /* VINTF base regs */
47 #define TEGRA241_VINTF(v)		(0x1000 + 0x100*(v))
48 
49 #define TEGRA241_VINTF_CONFIG		0x0000
50 #define  VINTF_HYP_OWN			BIT(17)
51 #define  VINTF_VMID			GENMASK(16, 1)
52 #define  VINTF_EN			BIT(0)
53 
54 #define TEGRA241_VINTF_STATUS		0x0004
55 #define  VINTF_STATUS			GENMASK(3, 1)
56 #define  VINTF_ENABLED			BIT(0)
57 
58 #define TEGRA241_VINTF_SID_MATCH(s)	(0x0040 + 0x4*(s))
59 #define TEGRA241_VINTF_SID_REPLACE(s)	(0x0080 + 0x4*(s))
60 
61 #define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
62 					(0x00C0 + 0x8*(m))
63 #define  LVCMDQ_ERR_MAP_NUM_64		2
64 
65 /* VCMDQ base regs */
66 /* -- PAGE0 -- */
67 #define TEGRA241_VCMDQ_PAGE0(q)		(TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
68 
69 #define TEGRA241_VCMDQ_CONS		0x00000
70 #define  VCMDQ_CONS_ERR			GENMASK(30, 24)
71 
72 #define TEGRA241_VCMDQ_PROD		0x00004
73 
74 #define TEGRA241_VCMDQ_CONFIG		0x00008
75 #define  VCMDQ_EN			BIT(0)
76 
77 #define TEGRA241_VCMDQ_STATUS		0x0000C
78 #define  VCMDQ_ENABLED			BIT(0)
79 
80 #define TEGRA241_VCMDQ_GERROR		0x00010
81 #define TEGRA241_VCMDQ_GERRORN		0x00014
82 
83 /* -- PAGE1 -- */
84 #define TEGRA241_VCMDQ_PAGE1(q)		(TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
85 #define  VCMDQ_ADDR			GENMASK(47, 5)
86 #define  VCMDQ_LOG2SIZE			GENMASK(4, 0)
87 
88 #define TEGRA241_VCMDQ_BASE		0x00000
89 #define TEGRA241_VCMDQ_CONS_INDX_BASE	0x00008
90 
91 /* VINTF logical-VCMDQ pages */
92 #define TEGRA241_VINTFi_PAGE0(i)	(TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
93 #define TEGRA241_VINTFi_PAGE1(i)	(TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
94 #define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
95 					(TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
96 #define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
97 					(TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
98 
99 /* MMIO helpers */
100 #define REG_CMDQV(_cmdqv, _regname) \
101 	((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
102 #define REG_VINTF(_vintf, _regname) \
103 	((_vintf)->base + TEGRA241_VINTF_##_regname)
104 #define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
105 	((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
106 #define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
107 	((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
108 
109 
110 static bool disable_cmdqv;
111 module_param(disable_cmdqv, bool, 0444);
112 MODULE_PARM_DESC(disable_cmdqv,
113 	"This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
114 
115 static bool bypass_vcmdq;
116 module_param(bypass_vcmdq, bool, 0444);
117 MODULE_PARM_DESC(bypass_vcmdq,
118 	"This allows to bypass VCMDQ for debugging use or perf comparison.");
119 
120 /**
121  * struct tegra241_vcmdq - Virtual Command Queue
122  * @core: Embedded iommufd_hw_queue structure
123  * @idx: Global index in the CMDQV
124  * @lidx: Local index in the VINTF
125  * @enabled: Enable status
126  * @cmdqv: Parent CMDQV pointer
127  * @vintf: Parent VINTF pointer
128  * @prev: Previous LVCMDQ to depend on
129  * @cmdq: Command Queue struct
130  * @page0: MMIO Page0 base address
131  * @page1: MMIO Page1 base address
132  */
133 struct tegra241_vcmdq {
134 	struct iommufd_hw_queue core;
135 
136 	u16 idx;
137 	u16 lidx;
138 
139 	bool enabled;
140 
141 	struct tegra241_cmdqv *cmdqv;
142 	struct tegra241_vintf *vintf;
143 	struct tegra241_vcmdq *prev;
144 	struct arm_smmu_cmdq cmdq;
145 
146 	void __iomem *page0;
147 	void __iomem *page1;
148 };
149 #define hw_queue_to_vcmdq(v) container_of(v, struct tegra241_vcmdq, core)
150 
151 /**
152  * struct tegra241_vintf - Virtual Interface
153  * @vsmmu: Embedded arm_vsmmu structure
154  * @idx: Global index in the CMDQV
155  * @enabled: Enable status
156  * @hyp_own: Owned by hypervisor (in-kernel)
157  * @cmdqv: Parent CMDQV pointer
158  * @lvcmdqs: List of logical VCMDQ pointers
159  * @lvcmdq_mutex: Lock to serialize user-allocated lvcmdqs
160  * @base: MMIO base address
161  * @mmap_offset: Offset argument for mmap() syscall
162  * @sids: Stream ID mapping resources
163  */
164 struct tegra241_vintf {
165 	struct arm_vsmmu vsmmu;
166 
167 	u16 idx;
168 
169 	bool enabled;
170 	bool hyp_own;
171 
172 	struct tegra241_cmdqv *cmdqv;
173 	struct tegra241_vcmdq **lvcmdqs;
174 	struct mutex lvcmdq_mutex; /* user space race */
175 
176 	void __iomem *base;
177 	unsigned long mmap_offset;
178 
179 	struct ida sids;
180 };
181 #define viommu_to_vintf(v) container_of(v, struct tegra241_vintf, vsmmu.core)
182 
183 /**
184  * struct tegra241_vintf_sid - Virtual Interface Stream ID Mapping
185  * @core: Embedded iommufd_vdevice structure, holding virtual Stream ID
186  * @vintf: Parent VINTF pointer
187  * @sid: Physical Stream ID
188  * @idx: Mapping index in the VINTF
189  */
190 struct tegra241_vintf_sid {
191 	struct iommufd_vdevice core;
192 	struct tegra241_vintf *vintf;
193 	u32 sid;
194 	u8 idx;
195 };
196 #define vdev_to_vsid(v) container_of(v, struct tegra241_vintf_sid, core)
197 
198 /**
199  * struct tegra241_cmdqv - CMDQ-V for SMMUv3
200  * @smmu: SMMUv3 device
201  * @dev: CMDQV device
202  * @base: MMIO base address
203  * @base_phys: MMIO physical base address, for mmap
204  * @irq: IRQ number
205  * @num_vintfs: Total number of VINTFs
206  * @num_vcmdqs: Total number of VCMDQs
207  * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
208  * @num_sids_per_vintf: Total number of SID mappings per VINTF
209  * @vintf_ids: VINTF id allocator
210  * @vintfs: List of VINTFs
211  */
212 struct tegra241_cmdqv {
213 	struct arm_smmu_device smmu;
214 	struct device *dev;
215 
216 	void __iomem *base;
217 	phys_addr_t base_phys;
218 	int irq;
219 
220 	/* CMDQV Hardware Params */
221 	u16 num_vintfs;
222 	u16 num_vcmdqs;
223 	u16 num_lvcmdqs_per_vintf;
224 	u16 num_sids_per_vintf;
225 
226 	struct ida vintf_ids;
227 
228 	struct tegra241_vintf **vintfs;
229 };
230 
231 /* Config and Polling Helpers */
232 
233 static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv,
234 					      void __iomem *addr_config,
235 					      void __iomem *addr_status,
236 					      u32 regval, const char *header,
237 					      bool *out_enabled)
238 {
239 	bool en = regval & BIT(0);
240 	int ret;
241 
242 	writel(regval, addr_config);
243 	ret = readl_poll_timeout(addr_status, regval,
244 				 en ? regval & BIT(0) : !(regval & BIT(0)),
245 				 1, ARM_SMMU_POLL_TIMEOUT_US);
246 	if (ret)
247 		dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n",
248 			header, en ? "en" : "dis", regval);
249 	if (out_enabled)
250 		WRITE_ONCE(*out_enabled, regval & BIT(0));
251 	return ret;
252 }
253 
254 static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval)
255 {
256 	return tegra241_cmdqv_write_config(cmdqv,
257 					   REG_CMDQV(cmdqv, CONFIG),
258 					   REG_CMDQV(cmdqv, STATUS),
259 					   regval, "CMDQV: ", NULL);
260 }
261 
262 static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval)
263 {
264 	char header[16];
265 
266 	snprintf(header, 16, "VINTF%u: ", vintf->idx);
267 	return tegra241_cmdqv_write_config(vintf->cmdqv,
268 					   REG_VINTF(vintf, CONFIG),
269 					   REG_VINTF(vintf, STATUS),
270 					   regval, header, &vintf->enabled);
271 }
272 
273 static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq,
274 					char *header, int hlen)
275 {
276 	WARN_ON(hlen < 64);
277 	if (WARN_ON(!vcmdq->vintf))
278 		return "";
279 	snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
280 		 vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx);
281 	return header;
282 }
283 
284 static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
285 {
286 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
287 
288 	return tegra241_cmdqv_write_config(vcmdq->cmdqv,
289 					   REG_VCMDQ_PAGE0(vcmdq, CONFIG),
290 					   REG_VCMDQ_PAGE0(vcmdq, STATUS),
291 					   regval, h, &vcmdq->enabled);
292 }
293 
294 /* ISR Functions */
295 
296 static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf)
297 {
298 	struct iommufd_viommu *viommu = &vintf->vsmmu.core;
299 	struct iommu_vevent_tegra241_cmdqv vevent_data;
300 	int i;
301 
302 	for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
303 		u64 err = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
304 
305 		vevent_data.lvcmdq_err_map[i] = cpu_to_le64(err);
306 	}
307 
308 	iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV,
309 				    &vevent_data, sizeof(vevent_data));
310 }
311 
312 static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
313 {
314 	int i;
315 
316 	for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
317 		u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
318 
319 		while (map) {
320 			unsigned long lidx = __ffs64(map);
321 			struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
322 			u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
323 
324 			__arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq);
325 			writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
326 			map &= ~BIT_ULL(lidx);
327 		}
328 	}
329 }
330 
331 static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
332 {
333 	struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid;
334 	void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP);
335 	char err_str[256];
336 	u64 vintf_map;
337 
338 	/* Use readl_relaxed() as register addresses are not 64-bit aligned */
339 	vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
340 		    (u64)readl_relaxed(reg_vintf_map);
341 
342 	snprintf(err_str, sizeof(err_str),
343 		 "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map,
344 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))),
345 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))),
346 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))),
347 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0))));
348 
349 	dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str);
350 
351 	/* Handle VINTF0 and its LVCMDQs */
352 	if (vintf_map & BIT_ULL(0)) {
353 		tegra241_vintf0_handle_error(cmdqv->vintfs[0]);
354 		vintf_map &= ~BIT_ULL(0);
355 	}
356 
357 	/* Handle other user VINTFs and their LVCMDQs */
358 	while (vintf_map) {
359 		unsigned long idx = __ffs64(vintf_map);
360 
361 		tegra241_vintf_user_handle_error(cmdqv->vintfs[idx]);
362 		vintf_map &= ~BIT_ULL(idx);
363 	}
364 
365 	return IRQ_HANDLED;
366 }
367 
368 /* Command Queue Function */
369 
370 static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
371 {
372 	switch (ent->opcode) {
373 	case CMDQ_OP_TLBI_NH_ASID:
374 	case CMDQ_OP_TLBI_NH_VA:
375 	case CMDQ_OP_ATC_INV:
376 		return true;
377 	default:
378 		return false;
379 	}
380 }
381 
382 static struct arm_smmu_cmdq *
383 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
384 			struct arm_smmu_cmdq_ent *ent)
385 {
386 	struct tegra241_cmdqv *cmdqv =
387 		container_of(smmu, struct tegra241_cmdqv, smmu);
388 	struct tegra241_vintf *vintf = cmdqv->vintfs[0];
389 	struct tegra241_vcmdq *vcmdq;
390 	u16 lidx;
391 
392 	if (READ_ONCE(bypass_vcmdq))
393 		return NULL;
394 
395 	/* Use SMMU CMDQ if VINTF0 is uninitialized */
396 	if (!READ_ONCE(vintf->enabled))
397 		return NULL;
398 
399 	/*
400 	 * Select a LVCMDQ to use. Here we use a temporal solution to
401 	 * balance out traffic on cmdq issuing: each cmdq has its own
402 	 * lock, if all cpus issue cmdlist using the same cmdq, only
403 	 * one CPU at a time can enter the process, while the others
404 	 * will be spinning at the same lock.
405 	 */
406 	lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
407 	vcmdq = vintf->lvcmdqs[lidx];
408 	if (!vcmdq || !READ_ONCE(vcmdq->enabled))
409 		return NULL;
410 
411 	/* Unsupported CMD goes for smmu->cmdq pathway */
412 	if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
413 		return NULL;
414 	return &vcmdq->cmdq;
415 }
416 
417 /* HW Reset Functions */
418 
419 /*
420  * When a guest-owned VCMDQ is disabled, if the guest did not enqueue a CMD_SYNC
421  * following an ATC_INV command at the end of the guest queue while this ATC_INV
422  * is timed out, the TIMEOUT will not be reported until this VCMDQ gets assigned
423  * to the next VM, which will be a false alarm potentially causing some unwanted
424  * behavior in the new VM. Thus, a guest-owned VCMDQ must flush the TIMEOUT when
425  * it gets disabled. This can be done by just issuing a CMD_SYNC to SMMU CMDQ.
426  */
427 static void tegra241_vcmdq_hw_flush_timeout(struct tegra241_vcmdq *vcmdq)
428 {
429 	struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
430 	u64 cmd_sync[CMDQ_ENT_DWORDS] = {};
431 
432 	cmd_sync[0] = FIELD_PREP(CMDQ_0_OP, CMDQ_OP_CMD_SYNC) |
433 		      FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_NONE);
434 
435 	/*
436 	 * It does not hurt to insert another CMD_SYNC, taking advantage of the
437 	 * arm_smmu_cmdq_issue_cmdlist() that waits for the CMD_SYNC completion.
438 	 */
439 	arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, cmd_sync, 1, true);
440 }
441 
442 /* This function is for LVCMDQ, so @vcmdq must not be unmapped yet */
443 static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
444 {
445 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
446 	u32 gerrorn, gerror;
447 
448 	if (vcmdq_write_config(vcmdq, 0)) {
449 		dev_err(vcmdq->cmdqv->dev,
450 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
451 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
452 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
453 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
454 	}
455 	tegra241_vcmdq_hw_flush_timeout(vcmdq);
456 
457 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
458 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
459 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
460 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE));
461 
462 	gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN));
463 	gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
464 	if (gerror != gerrorn) {
465 		dev_warn(vcmdq->cmdqv->dev,
466 			 "%suncleared error detected, resetting\n", h);
467 		writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
468 	}
469 
470 	dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
471 }
472 
473 /* This function is for LVCMDQ, so @vcmdq must be mapped prior */
474 static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
475 {
476 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
477 	int ret;
478 
479 	/* Reset VCMDQ */
480 	tegra241_vcmdq_hw_deinit(vcmdq);
481 
482 	/* vintf->hyp_own is a HW state finalized in tegra241_vintf_hw_init() */
483 	if (!vcmdq->vintf->hyp_own)
484 		vcmdq->cmdq.supports_cmd = tegra241_guest_vcmdq_supports_cmd;
485 
486 	/* Configure and enable VCMDQ */
487 	writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
488 
489 	ret = vcmdq_write_config(vcmdq, VCMDQ_EN);
490 	if (ret) {
491 		dev_err(vcmdq->cmdqv->dev,
492 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
493 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
494 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
495 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
496 		return ret;
497 	}
498 
499 	dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h);
500 	return 0;
501 }
502 
503 /* Unmap a global VCMDQ from the pre-assigned LVCMDQ */
504 static void tegra241_vcmdq_unmap_lvcmdq(struct tegra241_vcmdq *vcmdq)
505 {
506 	u32 regval = readl(REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
507 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
508 
509 	writel(regval & ~CMDQV_CMDQ_ALLOCATED,
510 	       REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
511 	dev_dbg(vcmdq->cmdqv->dev, "%sunmapped\n", h);
512 }
513 
514 static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
515 {
516 	u16 lidx = vintf->cmdqv->num_lvcmdqs_per_vintf;
517 	int sidx;
518 
519 	/* HW requires to unmap LVCMDQs in descending order */
520 	while (lidx--) {
521 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
522 			tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
523 			tegra241_vcmdq_unmap_lvcmdq(vintf->lvcmdqs[lidx]);
524 		}
525 	}
526 	vintf_write_config(vintf, 0);
527 	for (sidx = 0; sidx < vintf->cmdqv->num_sids_per_vintf; sidx++) {
528 		writel(0, REG_VINTF(vintf, SID_MATCH(sidx)));
529 		writel(0, REG_VINTF(vintf, SID_REPLACE(sidx)));
530 	}
531 }
532 
533 /* Map a global VCMDQ to the pre-assigned LVCMDQ */
534 static void tegra241_vcmdq_map_lvcmdq(struct tegra241_vcmdq *vcmdq)
535 {
536 	u32 regval = readl(REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
537 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
538 
539 	writel(regval | CMDQV_CMDQ_ALLOCATED,
540 	       REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
541 	dev_dbg(vcmdq->cmdqv->dev, "%smapped\n", h);
542 }
543 
544 static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
545 {
546 	u32 regval;
547 	u16 lidx;
548 	int ret;
549 
550 	/* Reset VINTF */
551 	tegra241_vintf_hw_deinit(vintf);
552 
553 	/* Configure and enable VINTF */
554 	/*
555 	 * Note that HYP_OWN bit is wired to zero when running in guest kernel,
556 	 * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
557 	 * restricted set of supported commands.
558 	 */
559 	regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own) |
560 		 FIELD_PREP(VINTF_VMID, vintf->vsmmu.vmid);
561 	writel(regval, REG_VINTF(vintf, CONFIG));
562 
563 	ret = vintf_write_config(vintf, regval | VINTF_EN);
564 	if (ret)
565 		return ret;
566 	/*
567 	 * As being mentioned above, HYP_OWN bit is wired to zero for a guest
568 	 * kernel, so read it back from HW to ensure that reflects in hyp_own
569 	 */
570 	vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
571 
572 	/* HW requires to map LVCMDQs in ascending order */
573 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
574 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
575 			tegra241_vcmdq_map_lvcmdq(vintf->lvcmdqs[lidx]);
576 			ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
577 			if (ret) {
578 				tegra241_vintf_hw_deinit(vintf);
579 				return ret;
580 			}
581 		}
582 	}
583 
584 	return 0;
585 }
586 
587 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
588 {
589 	struct tegra241_cmdqv *cmdqv =
590 		container_of(smmu, struct tegra241_cmdqv, smmu);
591 	u16 qidx, lidx, idx;
592 	u32 regval;
593 	int ret;
594 
595 	/* Reset CMDQV */
596 	regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG));
597 	ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN);
598 	if (ret)
599 		return ret;
600 	ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN);
601 	if (ret)
602 		return ret;
603 
604 	/* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
605 	for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) {
606 		for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
607 			regval  = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
608 			regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
609 			writel_relaxed(regval,
610 				       REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
611 		}
612 	}
613 
614 	return tegra241_vintf_hw_init(cmdqv->vintfs[0], true);
615 }
616 
617 /* VCMDQ Resource Helpers */
618 
619 static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
620 {
621 	struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
622 	struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
623 	struct arm_smmu_queue *q = &cmdq->q;
624 	char name[16];
625 	u32 regval;
626 	int ret;
627 
628 	snprintf(name, 16, "vcmdq%u", vcmdq->idx);
629 
630 	/* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
631 	regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
632 	q->llq.max_n_shift =
633 		min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
634 
635 	/* Use the common helper to init the VCMDQ, and then... */
636 	ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
637 				      TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
638 				      CMDQ_ENT_DWORDS, name);
639 	if (ret)
640 		return ret;
641 
642 	/* ...override q_base to write VCMDQ_BASE registers */
643 	q->q_base = q->base_dma & VCMDQ_ADDR;
644 	q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
645 
646 	return arm_smmu_cmdq_init(smmu, cmdq);
647 }
648 
649 /* VINTF Logical VCMDQ Resource Helpers */
650 
651 static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
652 {
653 	vintf->lvcmdqs[lidx] = NULL;
654 }
655 
656 static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx,
657 				      struct tegra241_vcmdq *vcmdq)
658 {
659 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
660 	u16 idx = vintf->idx;
661 
662 	vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx;
663 	vcmdq->lidx = lidx;
664 	vcmdq->cmdqv = cmdqv;
665 	vcmdq->vintf = vintf;
666 	vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx);
667 	vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx);
668 
669 	vintf->lvcmdqs[lidx] = vcmdq;
670 	return 0;
671 }
672 
673 static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
674 {
675 	struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
676 	char header[64];
677 
678 	/* Note that the lvcmdq queue memory space is managed by devres */
679 
680 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
681 
682 	dev_dbg(vintf->cmdqv->dev,
683 		"%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
684 	/* Guest-owned VCMDQ is free-ed with hw_queue by iommufd core */
685 	if (vcmdq->vintf->hyp_own)
686 		kfree(vcmdq);
687 }
688 
689 static struct tegra241_vcmdq *
690 tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
691 {
692 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
693 	struct tegra241_vcmdq *vcmdq;
694 	char header[64];
695 	int ret;
696 
697 	vcmdq = kzalloc_obj(*vcmdq);
698 	if (!vcmdq)
699 		return ERR_PTR(-ENOMEM);
700 
701 	ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
702 	if (ret)
703 		goto free_vcmdq;
704 
705 	/* Build an arm_smmu_cmdq for each LVCMDQ */
706 	ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq);
707 	if (ret)
708 		goto deinit_lvcmdq;
709 
710 	dev_dbg(cmdqv->dev,
711 		"%sallocated\n", lvcmdq_error_header(vcmdq, header, 64));
712 	return vcmdq;
713 
714 deinit_lvcmdq:
715 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
716 free_vcmdq:
717 	kfree(vcmdq);
718 	return ERR_PTR(ret);
719 }
720 
721 /* VINTF Resource Helpers */
722 
723 static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
724 {
725 	kfree(cmdqv->vintfs[idx]->lvcmdqs);
726 	ida_free(&cmdqv->vintf_ids, idx);
727 	cmdqv->vintfs[idx] = NULL;
728 }
729 
730 static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
731 				     struct tegra241_vintf *vintf)
732 {
733 
734 	u16 idx;
735 	int ret;
736 
737 	ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL);
738 	if (ret < 0)
739 		return ret;
740 	idx = ret;
741 
742 	vintf->idx = idx;
743 	vintf->cmdqv = cmdqv;
744 	vintf->base = cmdqv->base + TEGRA241_VINTF(idx);
745 
746 	vintf->lvcmdqs = kzalloc_objs(*vintf->lvcmdqs,
747 				      cmdqv->num_lvcmdqs_per_vintf);
748 	if (!vintf->lvcmdqs) {
749 		ida_free(&cmdqv->vintf_ids, idx);
750 		return -ENOMEM;
751 	}
752 
753 	cmdqv->vintfs[idx] = vintf;
754 	return ret;
755 }
756 
757 /* Remove Helpers */
758 
759 static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
760 {
761 	struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
762 	u16 lidx;
763 
764 	tegra241_vintf_hw_deinit(vintf);
765 
766 	/* Remove LVCMDQ resources */
767 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
768 		if (vintf->lvcmdqs[lidx])
769 			tegra241_vintf_free_lvcmdq(vintf, lidx);
770 
771 	dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
772 	tegra241_cmdqv_deinit_vintf(cmdqv, idx);
773 	if (!vintf->hyp_own) {
774 		mutex_destroy(&vintf->lvcmdq_mutex);
775 		ida_destroy(&vintf->sids);
776 		/* Guest-owned VINTF is free-ed with viommu by iommufd core */
777 	} else {
778 		kfree(vintf);
779 	}
780 }
781 
782 static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
783 {
784 	struct tegra241_cmdqv *cmdqv =
785 		container_of(smmu, struct tegra241_cmdqv, smmu);
786 	u16 idx;
787 
788 	/* Remove VINTF resources */
789 	for (idx = 0; idx < cmdqv->num_vintfs; idx++) {
790 		if (cmdqv->vintfs[idx]) {
791 			/* Only vintf0 should remain at this stage */
792 			WARN_ON(idx > 0);
793 			tegra241_cmdqv_remove_vintf(cmdqv, idx);
794 		}
795 	}
796 
797 	/* Remove cmdqv resources */
798 	ida_destroy(&cmdqv->vintf_ids);
799 
800 	if (cmdqv->irq > 0)
801 		free_irq(cmdqv->irq, cmdqv);
802 	iounmap(cmdqv->base);
803 	kfree(cmdqv->vintfs);
804 	put_device(cmdqv->dev); /* smmu->impl_dev */
805 }
806 
807 static int
808 tegra241_cmdqv_init_vintf_user(struct arm_vsmmu *vsmmu,
809 			       const struct iommu_user_data *user_data);
810 
811 static void *tegra241_cmdqv_hw_info(struct arm_smmu_device *smmu, u32 *length,
812 				    enum iommu_hw_info_type *type)
813 {
814 	struct tegra241_cmdqv *cmdqv =
815 		container_of(smmu, struct tegra241_cmdqv, smmu);
816 	struct iommu_hw_info_tegra241_cmdqv *info;
817 	u32 regval;
818 
819 	if (*type != IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV)
820 		return ERR_PTR(-EOPNOTSUPP);
821 
822 	info = kzalloc_obj(*info);
823 	if (!info)
824 		return ERR_PTR(-ENOMEM);
825 
826 	regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
827 	info->log2vcmdqs = ilog2(cmdqv->num_lvcmdqs_per_vintf);
828 	info->log2vsids = ilog2(cmdqv->num_sids_per_vintf);
829 	info->version = FIELD_GET(CMDQV_VER, regval);
830 
831 	*length = sizeof(*info);
832 	*type = IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV;
833 	return info;
834 }
835 
836 static size_t tegra241_cmdqv_get_vintf_size(enum iommu_viommu_type viommu_type)
837 {
838 	if (viommu_type != IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV)
839 		return 0;
840 	return VIOMMU_STRUCT_SIZE(struct tegra241_vintf, vsmmu.core);
841 }
842 
843 static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
844 	/* For in-kernel use */
845 	.get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
846 	.device_reset = tegra241_cmdqv_hw_reset,
847 	.device_remove = tegra241_cmdqv_remove,
848 	/* For user-space use */
849 	.hw_info = tegra241_cmdqv_hw_info,
850 	.get_viommu_size = tegra241_cmdqv_get_vintf_size,
851 	.vsmmu_init = tegra241_cmdqv_init_vintf_user,
852 };
853 
854 /* Probe Functions */
855 
856 static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
857 {
858 	struct tegra241_cmdqv *cmdqv =
859 		container_of(smmu, struct tegra241_cmdqv, smmu);
860 	struct tegra241_vintf *vintf;
861 	int lidx;
862 	int ret;
863 
864 	vintf = kzalloc_obj(*vintf);
865 	if (!vintf)
866 		return -ENOMEM;
867 
868 	/* Init VINTF0 for in-kernel use */
869 	ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
870 	if (ret) {
871 		dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
872 		return ret;
873 	}
874 
875 	/* Preallocate logical VCMDQs to VINTF0 */
876 	for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
877 		struct tegra241_vcmdq *vcmdq;
878 
879 		vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
880 		if (IS_ERR(vcmdq))
881 			return PTR_ERR(vcmdq);
882 	}
883 
884 	/* Now, we are ready to run all the impl ops */
885 	smmu->impl_ops = &tegra241_cmdqv_impl_ops;
886 	return 0;
887 }
888 
889 #ifdef CONFIG_IOMMU_DEBUGFS
890 static struct dentry *cmdqv_debugfs_dir;
891 #endif
892 
893 static struct arm_smmu_device *
894 __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
895 		       int irq)
896 {
897 	static const struct arm_smmu_impl_ops init_ops = {
898 		.init_structures = tegra241_cmdqv_init_structures,
899 		.device_remove = tegra241_cmdqv_remove,
900 	};
901 	struct tegra241_cmdqv *cmdqv = NULL;
902 	struct arm_smmu_device *new_smmu;
903 	void __iomem *base;
904 	u32 regval;
905 	int ret;
906 
907 	static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0);
908 
909 	base = ioremap(res->start, resource_size(res));
910 	if (!base) {
911 		dev_err(smmu->dev, "failed to ioremap\n");
912 		return NULL;
913 	}
914 
915 	regval = readl(base + TEGRA241_CMDQV_CONFIG);
916 	if (disable_cmdqv) {
917 		dev_info(smmu->dev, "Detected disable_cmdqv=true\n");
918 		writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG);
919 		goto iounmap;
920 	}
921 
922 	cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL);
923 	if (!cmdqv)
924 		goto iounmap;
925 	new_smmu = &cmdqv->smmu;
926 
927 	cmdqv->irq = irq;
928 	cmdqv->base = base;
929 	cmdqv->dev = smmu->impl_dev;
930 	cmdqv->base_phys = res->start;
931 
932 	if (cmdqv->irq > 0) {
933 		ret = request_threaded_irq(irq, NULL, tegra241_cmdqv_isr,
934 					   IRQF_ONESHOT, "tegra241-cmdqv",
935 					   cmdqv);
936 		if (ret) {
937 			dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
938 				cmdqv->irq, ret);
939 			goto iounmap;
940 		}
941 	}
942 
943 	regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
944 	cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
945 	cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
946 	cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
947 	cmdqv->num_sids_per_vintf =
948 		1 << FIELD_GET(CMDQV_NUM_SID_PER_VM_LOG2, regval);
949 
950 	cmdqv->vintfs =
951 		kzalloc_objs(*cmdqv->vintfs, cmdqv->num_vintfs);
952 	if (!cmdqv->vintfs)
953 		goto free_irq;
954 
955 	ida_init(&cmdqv->vintf_ids);
956 
957 #ifdef CONFIG_IOMMU_DEBUGFS
958 	if (!cmdqv_debugfs_dir) {
959 		cmdqv_debugfs_dir =
960 			debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir);
961 		debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir,
962 				    &bypass_vcmdq);
963 	}
964 #endif
965 
966 	/* Provide init-level ops only, until tegra241_cmdqv_init_structures */
967 	new_smmu->impl_ops = &init_ops;
968 
969 	return new_smmu;
970 
971 free_irq:
972 	if (cmdqv->irq > 0)
973 		free_irq(cmdqv->irq, cmdqv);
974 iounmap:
975 	iounmap(base);
976 	return NULL;
977 }
978 
979 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
980 {
981 	struct platform_device *pdev = to_platform_device(smmu->impl_dev);
982 	struct arm_smmu_device *new_smmu;
983 	struct resource *res;
984 	int irq;
985 
986 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
987 	if (!res) {
988 		dev_err(&pdev->dev, "no memory resource found for CMDQV\n");
989 		goto out_fallback;
990 	}
991 
992 	irq = platform_get_irq_optional(pdev, 0);
993 	if (irq <= 0)
994 		dev_warn(&pdev->dev,
995 			 "no interrupt. errors will not be reported\n");
996 
997 	new_smmu = __tegra241_cmdqv_probe(smmu, res, irq);
998 	if (new_smmu)
999 		return new_smmu;
1000 
1001 out_fallback:
1002 	dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
1003 	smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
1004 	put_device(smmu->impl_dev);
1005 	return ERR_PTR(-ENODEV);
1006 }
1007 
1008 /* User space VINTF and VCMDQ Functions */
1009 
1010 static size_t tegra241_vintf_get_vcmdq_size(struct iommufd_viommu *viommu,
1011 					    enum iommu_hw_queue_type queue_type)
1012 {
1013 	if (queue_type != IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV)
1014 		return 0;
1015 	return HW_QUEUE_STRUCT_SIZE(struct tegra241_vcmdq, core);
1016 }
1017 
1018 static int tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq *vcmdq)
1019 {
1020 	char header[64];
1021 
1022 	/* Reset VCMDQ */
1023 	tegra241_vcmdq_hw_deinit(vcmdq);
1024 
1025 	/* Configure the vcmdq only; User space does the enabling */
1026 	writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
1027 
1028 	dev_dbg(vcmdq->cmdqv->dev, "%sinited at host PA 0x%llx size 0x%lx\n",
1029 		lvcmdq_error_header(vcmdq, header, 64),
1030 		vcmdq->cmdq.q.q_base & VCMDQ_ADDR,
1031 		1UL << (vcmdq->cmdq.q.q_base & VCMDQ_LOG2SIZE));
1032 	return 0;
1033 }
1034 
1035 static void
1036 tegra241_vintf_destroy_lvcmdq_user(struct iommufd_hw_queue *hw_queue)
1037 {
1038 	struct tegra241_vcmdq *vcmdq = hw_queue_to_vcmdq(hw_queue);
1039 
1040 	mutex_lock(&vcmdq->vintf->lvcmdq_mutex);
1041 	tegra241_vcmdq_hw_deinit(vcmdq);
1042 	tegra241_vcmdq_unmap_lvcmdq(vcmdq);
1043 	tegra241_vintf_free_lvcmdq(vcmdq->vintf, vcmdq->lidx);
1044 	if (vcmdq->prev)
1045 		iommufd_hw_queue_undepend(vcmdq, vcmdq->prev, core);
1046 	mutex_unlock(&vcmdq->vintf->lvcmdq_mutex);
1047 }
1048 
1049 static int tegra241_vintf_alloc_lvcmdq_user(struct iommufd_hw_queue *hw_queue,
1050 					    u32 lidx, phys_addr_t base_addr_pa)
1051 {
1052 	struct tegra241_vintf *vintf = viommu_to_vintf(hw_queue->viommu);
1053 	struct tegra241_vcmdq *vcmdq = hw_queue_to_vcmdq(hw_queue);
1054 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
1055 	struct arm_smmu_device *smmu = &cmdqv->smmu;
1056 	struct tegra241_vcmdq *prev = NULL;
1057 	u32 log2size, max_n_shift;
1058 	char header[64];
1059 	int ret;
1060 
1061 	if (hw_queue->type != IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV)
1062 		return -EOPNOTSUPP;
1063 	if (lidx >= cmdqv->num_lvcmdqs_per_vintf)
1064 		return -EINVAL;
1065 
1066 	mutex_lock(&vintf->lvcmdq_mutex);
1067 
1068 	if (vintf->lvcmdqs[lidx]) {
1069 		ret = -EEXIST;
1070 		goto unlock;
1071 	}
1072 
1073 	/*
1074 	 * HW requires to map LVCMDQs in ascending order, so reject if the
1075 	 * previous lvcmdqs is not allocated yet.
1076 	 */
1077 	if (lidx) {
1078 		prev = vintf->lvcmdqs[lidx - 1];
1079 		if (!prev) {
1080 			ret = -EIO;
1081 			goto unlock;
1082 		}
1083 	}
1084 
1085 	/*
1086 	 * hw_queue->length must be a power of 2, in range of
1087 	 *   [ 32, 2 ^ (idr[1].CMDQS + CMDQ_ENT_SZ_SHIFT) ]
1088 	 */
1089 	max_n_shift = FIELD_GET(IDR1_CMDQS,
1090 				readl_relaxed(smmu->base + ARM_SMMU_IDR1));
1091 	if (!is_power_of_2(hw_queue->length) || hw_queue->length < 32 ||
1092 	    hw_queue->length > (1 << (max_n_shift + CMDQ_ENT_SZ_SHIFT))) {
1093 		ret = -EINVAL;
1094 		goto unlock;
1095 	}
1096 	log2size = ilog2(hw_queue->length) - CMDQ_ENT_SZ_SHIFT;
1097 
1098 	/* base_addr_pa must be aligned to hw_queue->length */
1099 	if (base_addr_pa & ~VCMDQ_ADDR ||
1100 	    base_addr_pa & (hw_queue->length - 1)) {
1101 		ret = -EINVAL;
1102 		goto unlock;
1103 	}
1104 
1105 	/*
1106 	 * HW requires to unmap LVCMDQs in descending order, so destroy() must
1107 	 * follow this rule. Set a dependency on its previous LVCMDQ so iommufd
1108 	 * core will help enforce it.
1109 	 */
1110 	if (prev) {
1111 		ret = iommufd_hw_queue_depend(vcmdq, prev, core);
1112 		if (ret)
1113 			goto unlock;
1114 	}
1115 	vcmdq->prev = prev;
1116 
1117 	ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
1118 	if (ret)
1119 		goto undepend_vcmdq;
1120 
1121 	dev_dbg(cmdqv->dev, "%sallocated\n",
1122 		lvcmdq_error_header(vcmdq, header, 64));
1123 
1124 	tegra241_vcmdq_map_lvcmdq(vcmdq);
1125 
1126 	vcmdq->cmdq.q.q_base = base_addr_pa & VCMDQ_ADDR;
1127 	vcmdq->cmdq.q.q_base |= log2size;
1128 
1129 	ret = tegra241_vcmdq_hw_init_user(vcmdq);
1130 	if (ret)
1131 		goto unmap_lvcmdq;
1132 
1133 	hw_queue->destroy = &tegra241_vintf_destroy_lvcmdq_user;
1134 	mutex_unlock(&vintf->lvcmdq_mutex);
1135 	return 0;
1136 
1137 unmap_lvcmdq:
1138 	tegra241_vcmdq_unmap_lvcmdq(vcmdq);
1139 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
1140 undepend_vcmdq:
1141 	if (vcmdq->prev)
1142 		iommufd_hw_queue_undepend(vcmdq, vcmdq->prev, core);
1143 unlock:
1144 	mutex_unlock(&vintf->lvcmdq_mutex);
1145 	return ret;
1146 }
1147 
1148 static void tegra241_cmdqv_destroy_vintf_user(struct iommufd_viommu *viommu)
1149 {
1150 	struct tegra241_vintf *vintf = viommu_to_vintf(viommu);
1151 
1152 	if (vintf->mmap_offset)
1153 		iommufd_viommu_destroy_mmap(&vintf->vsmmu.core,
1154 					    vintf->mmap_offset);
1155 	tegra241_cmdqv_remove_vintf(vintf->cmdqv, vintf->idx);
1156 }
1157 
1158 static void tegra241_vintf_destroy_vsid(struct iommufd_vdevice *vdev)
1159 {
1160 	struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev);
1161 	struct tegra241_vintf *vintf = vsid->vintf;
1162 
1163 	writel(0, REG_VINTF(vintf, SID_MATCH(vsid->idx)));
1164 	writel(0, REG_VINTF(vintf, SID_REPLACE(vsid->idx)));
1165 	ida_free(&vintf->sids, vsid->idx);
1166 	dev_dbg(vintf->cmdqv->dev,
1167 		"VINTF%u: deallocated SID_REPLACE%d for pSID=%x\n", vintf->idx,
1168 		vsid->idx, vsid->sid);
1169 }
1170 
1171 static int tegra241_vintf_init_vsid(struct iommufd_vdevice *vdev)
1172 {
1173 	struct device *dev = iommufd_vdevice_to_device(vdev);
1174 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
1175 	struct tegra241_vintf *vintf = viommu_to_vintf(vdev->viommu);
1176 	struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev);
1177 	struct arm_smmu_stream *stream = &master->streams[0];
1178 	u64 virt_sid = vdev->virt_id;
1179 	int sidx;
1180 
1181 	if (virt_sid > UINT_MAX)
1182 		return -EINVAL;
1183 
1184 	WARN_ON_ONCE(master->num_streams != 1);
1185 
1186 	/* Find an empty pair of SID_REPLACE and SID_MATCH */
1187 	sidx = ida_alloc_max(&vintf->sids, vintf->cmdqv->num_sids_per_vintf - 1,
1188 			     GFP_KERNEL);
1189 	if (sidx < 0)
1190 		return sidx;
1191 
1192 	writel(stream->id, REG_VINTF(vintf, SID_REPLACE(sidx)));
1193 	writel(virt_sid << 1 | 0x1, REG_VINTF(vintf, SID_MATCH(sidx)));
1194 	dev_dbg(vintf->cmdqv->dev,
1195 		"VINTF%u: allocated SID_REPLACE%d for pSID=%x, vSID=%x\n",
1196 		vintf->idx, sidx, stream->id, (u32)virt_sid);
1197 
1198 	vsid->idx = sidx;
1199 	vsid->vintf = vintf;
1200 	vsid->sid = stream->id;
1201 
1202 	vdev->destroy = &tegra241_vintf_destroy_vsid;
1203 	return 0;
1204 }
1205 
1206 static struct iommufd_viommu_ops tegra241_cmdqv_viommu_ops = {
1207 	.destroy = tegra241_cmdqv_destroy_vintf_user,
1208 	.alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
1209 	/* Non-accelerated commands will be still handled by the kernel */
1210 	.cache_invalidate = arm_vsmmu_cache_invalidate,
1211 	.vdevice_size = VDEVICE_STRUCT_SIZE(struct tegra241_vintf_sid, core),
1212 	.vdevice_init = tegra241_vintf_init_vsid,
1213 	.get_hw_queue_size = tegra241_vintf_get_vcmdq_size,
1214 	.hw_queue_init_phys = tegra241_vintf_alloc_lvcmdq_user,
1215 };
1216 
1217 static int
1218 tegra241_cmdqv_init_vintf_user(struct arm_vsmmu *vsmmu,
1219 			       const struct iommu_user_data *user_data)
1220 {
1221 	struct tegra241_cmdqv *cmdqv =
1222 		container_of(vsmmu->smmu, struct tegra241_cmdqv, smmu);
1223 	struct tegra241_vintf *vintf = viommu_to_vintf(&vsmmu->core);
1224 	struct iommu_viommu_tegra241_cmdqv data;
1225 	phys_addr_t page0_base;
1226 	int ret;
1227 
1228 	/*
1229 	 * Unsupported type should be rejected by tegra241_cmdqv_get_vintf_size.
1230 	 * Seeing one here indicates a kernel bug or some data corruption.
1231 	 */
1232 	if (WARN_ON(vsmmu->core.type != IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV))
1233 		return -EOPNOTSUPP;
1234 
1235 	if (!user_data)
1236 		return -EINVAL;
1237 
1238 	ret = iommu_copy_struct_from_user(&data, user_data,
1239 					  IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
1240 					  out_vintf_mmap_length);
1241 	if (ret)
1242 		return ret;
1243 
1244 	ret = tegra241_cmdqv_init_vintf(cmdqv, cmdqv->num_vintfs - 1, vintf);
1245 	if (ret < 0) {
1246 		dev_err(cmdqv->dev, "no more available vintf\n");
1247 		return ret;
1248 	}
1249 
1250 	/*
1251 	 * Initialize the user-owned VINTF without a LVCMDQ, as it cannot pre-
1252 	 * allocate a LVCMDQ until user space wants one, for security reasons.
1253 	 * It is different than the kernel-owned VINTF0, which had pre-assigned
1254 	 * and pre-allocated global VCMDQs that would be mapped to the LVCMDQs
1255 	 * by the tegra241_vintf_hw_init() call.
1256 	 */
1257 	ret = tegra241_vintf_hw_init(vintf, false);
1258 	if (ret)
1259 		goto deinit_vintf;
1260 
1261 	page0_base = cmdqv->base_phys + TEGRA241_VINTFi_PAGE0(vintf->idx);
1262 	ret = iommufd_viommu_alloc_mmap(&vintf->vsmmu.core, page0_base, SZ_64K,
1263 					&vintf->mmap_offset);
1264 	if (ret)
1265 		goto hw_deinit_vintf;
1266 
1267 	data.out_vintf_mmap_length = SZ_64K;
1268 	data.out_vintf_mmap_offset = vintf->mmap_offset;
1269 	ret = iommu_copy_struct_to_user(user_data, &data,
1270 					IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
1271 					out_vintf_mmap_length);
1272 	if (ret)
1273 		goto free_mmap;
1274 
1275 	ida_init(&vintf->sids);
1276 	mutex_init(&vintf->lvcmdq_mutex);
1277 
1278 	dev_dbg(cmdqv->dev, "VINTF%u: allocated with vmid (%d)\n", vintf->idx,
1279 		vintf->vsmmu.vmid);
1280 
1281 	vsmmu->core.ops = &tegra241_cmdqv_viommu_ops;
1282 	return 0;
1283 
1284 free_mmap:
1285 	iommufd_viommu_destroy_mmap(&vintf->vsmmu.core, vintf->mmap_offset);
1286 hw_deinit_vintf:
1287 	tegra241_vintf_hw_deinit(vintf);
1288 deinit_vintf:
1289 	tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
1290 	return ret;
1291 }
1292 
1293 MODULE_IMPORT_NS("IOMMUFD");
1294