xref: /linux/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c (revision 3e93d5bbcbfc3808f83712c0701f9d4c148cc8ed)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
3 
4 #define dev_fmt(fmt) "tegra241_cmdqv: " fmt
5 
6 #include <linux/acpi.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/iommu.h>
11 #include <linux/iommufd.h>
12 #include <linux/iopoll.h>
13 #include <uapi/linux/iommufd.h>
14 
15 #include <acpi/acpixf.h>
16 
17 #include "arm-smmu-v3.h"
18 
19 /* CMDQV register page base and size defines */
20 #define TEGRA241_CMDQV_CONFIG_BASE	(0)
21 #define TEGRA241_CMDQV_CONFIG_SIZE	(SZ_64K)
22 #define TEGRA241_VCMDQ_PAGE0_BASE	(TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
23 #define TEGRA241_VCMDQ_PAGE1_BASE	(TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
24 #define TEGRA241_VINTF_PAGE_BASE	(TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
25 
26 /* CMDQV global base regs */
27 #define TEGRA241_CMDQV_CONFIG		0x0000
28 #define  CMDQV_EN			BIT(0)
29 
30 #define TEGRA241_CMDQV_PARAM		0x0004
31 #define  CMDQV_NUM_SID_PER_VM_LOG2	GENMASK(15, 12)
32 #define  CMDQV_NUM_VINTF_LOG2		GENMASK(11, 8)
33 #define  CMDQV_NUM_VCMDQ_LOG2		GENMASK(7, 4)
34 #define  CMDQV_VER			GENMASK(3, 0)
35 
36 #define TEGRA241_CMDQV_STATUS		0x0008
37 #define  CMDQV_ENABLED			BIT(0)
38 
39 #define TEGRA241_CMDQV_VINTF_ERR_MAP	0x0014
40 #define TEGRA241_CMDQV_VINTF_INT_MASK	0x001C
41 #define TEGRA241_CMDQV_CMDQ_ERR_MAP(m)  (0x0024 + 0x4*(m))
42 
43 #define TEGRA241_CMDQV_CMDQ_ALLOC(q)	(0x0200 + 0x4*(q))
44 #define  CMDQV_CMDQ_ALLOC_VINTF		GENMASK(20, 15)
45 #define  CMDQV_CMDQ_ALLOC_LVCMDQ	GENMASK(7, 1)
46 #define  CMDQV_CMDQ_ALLOCATED		BIT(0)
47 
48 /* VINTF base regs */
49 #define TEGRA241_VINTF(v)		(0x1000 + 0x100*(v))
50 
51 #define TEGRA241_VINTF_CONFIG		0x0000
52 #define  VINTF_HYP_OWN			BIT(17)
53 #define  VINTF_VMID			GENMASK(16, 1)
54 #define  VINTF_EN			BIT(0)
55 
56 #define TEGRA241_VINTF_STATUS		0x0004
57 #define  VINTF_STATUS			GENMASK(3, 1)
58 #define  VINTF_ENABLED			BIT(0)
59 
60 #define TEGRA241_VINTF_SID_MATCH(s)	(0x0040 + 0x4*(s))
61 #define TEGRA241_VINTF_SID_REPLACE(s)	(0x0080 + 0x4*(s))
62 
63 #define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
64 					(0x00C0 + 0x8*(m))
65 #define  LVCMDQ_ERR_MAP_NUM_64		2
66 
67 /* VCMDQ base regs */
68 /* -- PAGE0 -- */
69 #define TEGRA241_VCMDQ_PAGE0(q)		(TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
70 
71 #define TEGRA241_VCMDQ_CONS		0x00000
72 #define  VCMDQ_CONS_ERR			GENMASK(30, 24)
73 
74 #define TEGRA241_VCMDQ_PROD		0x00004
75 
76 #define TEGRA241_VCMDQ_CONFIG		0x00008
77 #define  VCMDQ_EN			BIT(0)
78 
79 #define TEGRA241_VCMDQ_STATUS		0x0000C
80 #define  VCMDQ_ENABLED			BIT(0)
81 
82 #define TEGRA241_VCMDQ_GERROR		0x00010
83 #define TEGRA241_VCMDQ_GERRORN		0x00014
84 
85 /* -- PAGE1 -- */
86 #define TEGRA241_VCMDQ_PAGE1(q)		(TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
87 #define  VCMDQ_ADDR			GENMASK(47, 5)
88 #define  VCMDQ_LOG2SIZE			GENMASK(4, 0)
89 
90 #define TEGRA241_VCMDQ_BASE		0x00000
91 #define TEGRA241_VCMDQ_CONS_INDX_BASE	0x00008
92 
93 /* VINTF logical-VCMDQ pages */
94 #define TEGRA241_VINTFi_PAGE0(i)	(TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
95 #define TEGRA241_VINTFi_PAGE1(i)	(TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
96 #define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
97 					(TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
98 #define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
99 					(TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
100 
101 /* MMIO helpers */
102 #define REG_CMDQV(_cmdqv, _regname) \
103 	((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
104 #define REG_VINTF(_vintf, _regname) \
105 	((_vintf)->base + TEGRA241_VINTF_##_regname)
106 #define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
107 	((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
108 #define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
109 	((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
110 
111 
112 static bool disable_cmdqv;
113 module_param(disable_cmdqv, bool, 0444);
114 MODULE_PARM_DESC(disable_cmdqv,
115 	"This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
116 
117 static bool bypass_vcmdq;
118 module_param(bypass_vcmdq, bool, 0444);
119 MODULE_PARM_DESC(bypass_vcmdq,
120 	"This allows to bypass VCMDQ for debugging use or perf comparison.");
121 
122 /**
123  * struct tegra241_vcmdq - Virtual Command Queue
124  * @core: Embedded iommufd_hw_queue structure
125  * @idx: Global index in the CMDQV
126  * @lidx: Local index in the VINTF
127  * @enabled: Enable status
128  * @cmdqv: Parent CMDQV pointer
129  * @vintf: Parent VINTF pointer
130  * @prev: Previous LVCMDQ to depend on
131  * @cmdq: Command Queue struct
132  * @page0: MMIO Page0 base address
133  * @page1: MMIO Page1 base address
134  */
135 struct tegra241_vcmdq {
136 	struct iommufd_hw_queue core;
137 
138 	u16 idx;
139 	u16 lidx;
140 
141 	bool enabled;
142 
143 	struct tegra241_cmdqv *cmdqv;
144 	struct tegra241_vintf *vintf;
145 	struct tegra241_vcmdq *prev;
146 	struct arm_smmu_cmdq cmdq;
147 
148 	void __iomem *page0;
149 	void __iomem *page1;
150 };
151 #define hw_queue_to_vcmdq(v) container_of(v, struct tegra241_vcmdq, core)
152 
153 /**
154  * struct tegra241_vintf - Virtual Interface
155  * @vsmmu: Embedded arm_vsmmu structure
156  * @idx: Global index in the CMDQV
157  * @enabled: Enable status
158  * @hyp_own: Owned by hypervisor (in-kernel)
159  * @cmdqv: Parent CMDQV pointer
160  * @lvcmdqs: List of logical VCMDQ pointers
161  * @lvcmdq_mutex: Lock to serialize user-allocated lvcmdqs
162  * @base: MMIO base address
163  * @mmap_offset: Offset argument for mmap() syscall
164  * @sids: Stream ID mapping resources
165  */
166 struct tegra241_vintf {
167 	struct arm_vsmmu vsmmu;
168 
169 	u16 idx;
170 
171 	bool enabled;
172 	bool hyp_own;
173 
174 	struct tegra241_cmdqv *cmdqv;
175 	struct tegra241_vcmdq **lvcmdqs;
176 	struct mutex lvcmdq_mutex; /* user space race */
177 
178 	void __iomem *base;
179 	unsigned long mmap_offset;
180 
181 	struct ida sids;
182 };
183 #define viommu_to_vintf(v) container_of(v, struct tegra241_vintf, vsmmu.core)
184 
185 /**
186  * struct tegra241_vintf_sid - Virtual Interface Stream ID Mapping
187  * @core: Embedded iommufd_vdevice structure, holding virtual Stream ID
188  * @vintf: Parent VINTF pointer
189  * @sid: Physical Stream ID
190  * @idx: Mapping index in the VINTF
191  */
192 struct tegra241_vintf_sid {
193 	struct iommufd_vdevice core;
194 	struct tegra241_vintf *vintf;
195 	u32 sid;
196 	u8 idx;
197 };
198 #define vdev_to_vsid(v) container_of(v, struct tegra241_vintf_sid, core)
199 
200 /**
201  * struct tegra241_cmdqv - CMDQ-V for SMMUv3
202  * @smmu: SMMUv3 device
203  * @dev: CMDQV device
204  * @base: MMIO base address
205  * @base_phys: MMIO physical base address, for mmap
206  * @irq: IRQ number
207  * @num_vintfs: Total number of VINTFs
208  * @num_vcmdqs: Total number of VCMDQs
209  * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
210  * @num_sids_per_vintf: Total number of SID mappings per VINTF
211  * @vintf_ids: VINTF id allocator
212  * @vintfs: List of VINTFs
213  */
214 struct tegra241_cmdqv {
215 	struct arm_smmu_device smmu;
216 	struct device *dev;
217 
218 	void __iomem *base;
219 	phys_addr_t base_phys;
220 	int irq;
221 
222 	/* CMDQV Hardware Params */
223 	u16 num_vintfs;
224 	u16 num_vcmdqs;
225 	u16 num_lvcmdqs_per_vintf;
226 	u16 num_sids_per_vintf;
227 
228 	struct ida vintf_ids;
229 
230 	struct tegra241_vintf **vintfs;
231 };
232 
233 /* Config and Polling Helpers */
234 
tegra241_cmdqv_write_config(struct tegra241_cmdqv * cmdqv,void __iomem * addr_config,void __iomem * addr_status,u32 regval,const char * header,bool * out_enabled)235 static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv,
236 					      void __iomem *addr_config,
237 					      void __iomem *addr_status,
238 					      u32 regval, const char *header,
239 					      bool *out_enabled)
240 {
241 	bool en = regval & BIT(0);
242 	int ret;
243 
244 	writel(regval, addr_config);
245 	ret = readl_poll_timeout(addr_status, regval,
246 				 en ? regval & BIT(0) : !(regval & BIT(0)),
247 				 1, ARM_SMMU_POLL_TIMEOUT_US);
248 	if (ret)
249 		dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n",
250 			header, en ? "en" : "dis", regval);
251 	if (out_enabled)
252 		WRITE_ONCE(*out_enabled, regval & BIT(0));
253 	return ret;
254 }
255 
cmdqv_write_config(struct tegra241_cmdqv * cmdqv,u32 regval)256 static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval)
257 {
258 	return tegra241_cmdqv_write_config(cmdqv,
259 					   REG_CMDQV(cmdqv, CONFIG),
260 					   REG_CMDQV(cmdqv, STATUS),
261 					   regval, "CMDQV: ", NULL);
262 }
263 
vintf_write_config(struct tegra241_vintf * vintf,u32 regval)264 static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval)
265 {
266 	char header[16];
267 
268 	snprintf(header, 16, "VINTF%u: ", vintf->idx);
269 	return tegra241_cmdqv_write_config(vintf->cmdqv,
270 					   REG_VINTF(vintf, CONFIG),
271 					   REG_VINTF(vintf, STATUS),
272 					   regval, header, &vintf->enabled);
273 }
274 
lvcmdq_error_header(struct tegra241_vcmdq * vcmdq,char * header,int hlen)275 static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq,
276 					char *header, int hlen)
277 {
278 	WARN_ON(hlen < 64);
279 	if (WARN_ON(!vcmdq->vintf))
280 		return "";
281 	snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
282 		 vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx);
283 	return header;
284 }
285 
vcmdq_write_config(struct tegra241_vcmdq * vcmdq,u32 regval)286 static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
287 {
288 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
289 
290 	return tegra241_cmdqv_write_config(vcmdq->cmdqv,
291 					   REG_VCMDQ_PAGE0(vcmdq, CONFIG),
292 					   REG_VCMDQ_PAGE0(vcmdq, STATUS),
293 					   regval, h, &vcmdq->enabled);
294 }
295 
296 /* ISR Functions */
297 
tegra241_vintf_user_handle_error(struct tegra241_vintf * vintf)298 static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf)
299 {
300 	struct iommufd_viommu *viommu = &vintf->vsmmu.core;
301 	struct iommu_vevent_tegra241_cmdqv vevent_data;
302 	int i;
303 
304 	for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
305 		u64 err = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
306 
307 		vevent_data.lvcmdq_err_map[i] = cpu_to_le64(err);
308 	}
309 
310 	iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV,
311 				    &vevent_data, sizeof(vevent_data));
312 }
313 
tegra241_vintf0_handle_error(struct tegra241_vintf * vintf)314 static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
315 {
316 	int i;
317 
318 	for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
319 		u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
320 
321 		while (map) {
322 			unsigned long lidx = __ffs64(map);
323 			struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
324 			u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
325 
326 			__arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq);
327 			writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
328 			map &= ~BIT_ULL(lidx);
329 		}
330 	}
331 }
332 
tegra241_cmdqv_isr(int irq,void * devid)333 static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
334 {
335 	struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid;
336 	void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP);
337 	char err_str[256];
338 	u64 vintf_map;
339 
340 	/* Use readl_relaxed() as register addresses are not 64-bit aligned */
341 	vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
342 		    (u64)readl_relaxed(reg_vintf_map);
343 
344 	snprintf(err_str, sizeof(err_str),
345 		 "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map,
346 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))),
347 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))),
348 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))),
349 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0))));
350 
351 	dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str);
352 
353 	/* Handle VINTF0 and its LVCMDQs */
354 	if (vintf_map & BIT_ULL(0)) {
355 		tegra241_vintf0_handle_error(cmdqv->vintfs[0]);
356 		vintf_map &= ~BIT_ULL(0);
357 	}
358 
359 	/* Handle other user VINTFs and their LVCMDQs */
360 	while (vintf_map) {
361 		unsigned long idx = __ffs64(vintf_map);
362 
363 		tegra241_vintf_user_handle_error(cmdqv->vintfs[idx]);
364 		vintf_map &= ~BIT_ULL(idx);
365 	}
366 
367 	return IRQ_HANDLED;
368 }
369 
370 /* Command Queue Function */
371 
tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent * ent)372 static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
373 {
374 	switch (ent->opcode) {
375 	case CMDQ_OP_TLBI_NH_ASID:
376 	case CMDQ_OP_TLBI_NH_VA:
377 	case CMDQ_OP_ATC_INV:
378 		return true;
379 	default:
380 		return false;
381 	}
382 }
383 
384 static struct arm_smmu_cmdq *
tegra241_cmdqv_get_cmdq(struct arm_smmu_device * smmu,struct arm_smmu_cmdq_ent * ent)385 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
386 			struct arm_smmu_cmdq_ent *ent)
387 {
388 	struct tegra241_cmdqv *cmdqv =
389 		container_of(smmu, struct tegra241_cmdqv, smmu);
390 	struct tegra241_vintf *vintf = cmdqv->vintfs[0];
391 	struct tegra241_vcmdq *vcmdq;
392 	u16 lidx;
393 
394 	if (READ_ONCE(bypass_vcmdq))
395 		return NULL;
396 
397 	/* Use SMMU CMDQ if VINTF0 is uninitialized */
398 	if (!READ_ONCE(vintf->enabled))
399 		return NULL;
400 
401 	/*
402 	 * Select a LVCMDQ to use. Here we use a temporal solution to
403 	 * balance out traffic on cmdq issuing: each cmdq has its own
404 	 * lock, if all cpus issue cmdlist using the same cmdq, only
405 	 * one CPU at a time can enter the process, while the others
406 	 * will be spinning at the same lock.
407 	 */
408 	lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
409 	vcmdq = vintf->lvcmdqs[lidx];
410 	if (!vcmdq || !READ_ONCE(vcmdq->enabled))
411 		return NULL;
412 
413 	/* Unsupported CMD goes for smmu->cmdq pathway */
414 	if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
415 		return NULL;
416 	return &vcmdq->cmdq;
417 }
418 
419 /* HW Reset Functions */
420 
421 /*
422  * When a guest-owned VCMDQ is disabled, if the guest did not enqueue a CMD_SYNC
423  * following an ATC_INV command at the end of the guest queue while this ATC_INV
424  * is timed out, the TIMEOUT will not be reported until this VCMDQ gets assigned
425  * to the next VM, which will be a false alarm potentially causing some unwanted
426  * behavior in the new VM. Thus, a guest-owned VCMDQ must flush the TIMEOUT when
427  * it gets disabled. This can be done by just issuing a CMD_SYNC to SMMU CMDQ.
428  */
tegra241_vcmdq_hw_flush_timeout(struct tegra241_vcmdq * vcmdq)429 static void tegra241_vcmdq_hw_flush_timeout(struct tegra241_vcmdq *vcmdq)
430 {
431 	struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
432 	u64 cmd_sync[CMDQ_ENT_DWORDS] = {};
433 
434 	cmd_sync[0] = FIELD_PREP(CMDQ_0_OP, CMDQ_OP_CMD_SYNC) |
435 		      FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_NONE);
436 
437 	/*
438 	 * It does not hurt to insert another CMD_SYNC, taking advantage of the
439 	 * arm_smmu_cmdq_issue_cmdlist() that waits for the CMD_SYNC completion.
440 	 */
441 	arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, cmd_sync, 1, true);
442 }
443 
444 /* This function is for LVCMDQ, so @vcmdq must not be unmapped yet */
tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq * vcmdq)445 static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
446 {
447 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
448 	u32 gerrorn, gerror;
449 
450 	if (vcmdq_write_config(vcmdq, 0)) {
451 		dev_err(vcmdq->cmdqv->dev,
452 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
453 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
454 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
455 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
456 	}
457 	tegra241_vcmdq_hw_flush_timeout(vcmdq);
458 
459 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
460 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
461 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
462 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE));
463 
464 	gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN));
465 	gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
466 	if (gerror != gerrorn) {
467 		dev_warn(vcmdq->cmdqv->dev,
468 			 "%suncleared error detected, resetting\n", h);
469 		writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
470 	}
471 
472 	dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
473 }
474 
475 /* This function is for LVCMDQ, so @vcmdq must be mapped prior */
tegra241_vcmdq_hw_init(struct tegra241_vcmdq * vcmdq)476 static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
477 {
478 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
479 	int ret;
480 
481 	/* Reset VCMDQ */
482 	tegra241_vcmdq_hw_deinit(vcmdq);
483 
484 	/* Configure and enable VCMDQ */
485 	writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
486 
487 	ret = vcmdq_write_config(vcmdq, VCMDQ_EN);
488 	if (ret) {
489 		dev_err(vcmdq->cmdqv->dev,
490 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
491 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
492 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
493 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
494 		return ret;
495 	}
496 
497 	dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h);
498 	return 0;
499 }
500 
501 /* Unmap a global VCMDQ from the pre-assigned LVCMDQ */
tegra241_vcmdq_unmap_lvcmdq(struct tegra241_vcmdq * vcmdq)502 static void tegra241_vcmdq_unmap_lvcmdq(struct tegra241_vcmdq *vcmdq)
503 {
504 	u32 regval = readl(REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
505 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
506 
507 	writel(regval & ~CMDQV_CMDQ_ALLOCATED,
508 	       REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
509 	dev_dbg(vcmdq->cmdqv->dev, "%sunmapped\n", h);
510 }
511 
tegra241_vintf_hw_deinit(struct tegra241_vintf * vintf)512 static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
513 {
514 	u16 lidx = vintf->cmdqv->num_lvcmdqs_per_vintf;
515 	int sidx;
516 
517 	/* HW requires to unmap LVCMDQs in descending order */
518 	while (lidx--) {
519 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
520 			tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
521 			tegra241_vcmdq_unmap_lvcmdq(vintf->lvcmdqs[lidx]);
522 		}
523 	}
524 	vintf_write_config(vintf, 0);
525 	for (sidx = 0; sidx < vintf->cmdqv->num_sids_per_vintf; sidx++) {
526 		writel(0, REG_VINTF(vintf, SID_MATCH(sidx)));
527 		writel(0, REG_VINTF(vintf, SID_REPLACE(sidx)));
528 	}
529 }
530 
531 /* Map a global VCMDQ to the pre-assigned LVCMDQ */
tegra241_vcmdq_map_lvcmdq(struct tegra241_vcmdq * vcmdq)532 static void tegra241_vcmdq_map_lvcmdq(struct tegra241_vcmdq *vcmdq)
533 {
534 	u32 regval = readl(REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
535 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
536 
537 	writel(regval | CMDQV_CMDQ_ALLOCATED,
538 	       REG_CMDQV(vcmdq->cmdqv, CMDQ_ALLOC(vcmdq->idx)));
539 	dev_dbg(vcmdq->cmdqv->dev, "%smapped\n", h);
540 }
541 
tegra241_vintf_hw_init(struct tegra241_vintf * vintf,bool hyp_own)542 static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
543 {
544 	u32 regval;
545 	u16 lidx;
546 	int ret;
547 
548 	/* Reset VINTF */
549 	tegra241_vintf_hw_deinit(vintf);
550 
551 	/* Configure and enable VINTF */
552 	/*
553 	 * Note that HYP_OWN bit is wired to zero when running in guest kernel,
554 	 * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
555 	 * restricted set of supported commands.
556 	 */
557 	regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own) |
558 		 FIELD_PREP(VINTF_VMID, vintf->vsmmu.vmid);
559 	writel(regval, REG_VINTF(vintf, CONFIG));
560 
561 	ret = vintf_write_config(vintf, regval | VINTF_EN);
562 	if (ret)
563 		return ret;
564 	/*
565 	 * As being mentioned above, HYP_OWN bit is wired to zero for a guest
566 	 * kernel, so read it back from HW to ensure that reflects in hyp_own
567 	 */
568 	vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
569 
570 	/* HW requires to map LVCMDQs in ascending order */
571 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
572 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
573 			tegra241_vcmdq_map_lvcmdq(vintf->lvcmdqs[lidx]);
574 			ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
575 			if (ret) {
576 				tegra241_vintf_hw_deinit(vintf);
577 				return ret;
578 			}
579 		}
580 	}
581 
582 	return 0;
583 }
584 
tegra241_cmdqv_hw_reset(struct arm_smmu_device * smmu)585 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
586 {
587 	struct tegra241_cmdqv *cmdqv =
588 		container_of(smmu, struct tegra241_cmdqv, smmu);
589 	u16 qidx, lidx, idx;
590 	u32 regval;
591 	int ret;
592 
593 	/* Reset CMDQV */
594 	regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG));
595 	ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN);
596 	if (ret)
597 		return ret;
598 	ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN);
599 	if (ret)
600 		return ret;
601 
602 	/* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
603 	for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) {
604 		for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
605 			regval  = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
606 			regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
607 			writel_relaxed(regval,
608 				       REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
609 		}
610 	}
611 
612 	return tegra241_vintf_hw_init(cmdqv->vintfs[0], true);
613 }
614 
615 /* VCMDQ Resource Helpers */
616 
tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq * vcmdq)617 static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
618 {
619 	struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
620 	struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
621 	struct arm_smmu_queue *q = &cmdq->q;
622 	char name[16];
623 	u32 regval;
624 	int ret;
625 
626 	snprintf(name, 16, "vcmdq%u", vcmdq->idx);
627 
628 	/* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
629 	regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
630 	q->llq.max_n_shift =
631 		min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
632 
633 	/* Use the common helper to init the VCMDQ, and then... */
634 	ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
635 				      TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
636 				      CMDQ_ENT_DWORDS, name);
637 	if (ret)
638 		return ret;
639 
640 	/* ...override q_base to write VCMDQ_BASE registers */
641 	q->q_base = q->base_dma & VCMDQ_ADDR;
642 	q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
643 
644 	if (!vcmdq->vintf->hyp_own)
645 		cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
646 
647 	return arm_smmu_cmdq_init(smmu, cmdq);
648 }
649 
650 /* VINTF Logical VCMDQ Resource Helpers */
651 
tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)652 static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
653 {
654 	vintf->lvcmdqs[lidx] = NULL;
655 }
656 
tegra241_vintf_init_lvcmdq(struct tegra241_vintf * vintf,u16 lidx,struct tegra241_vcmdq * vcmdq)657 static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx,
658 				      struct tegra241_vcmdq *vcmdq)
659 {
660 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
661 	u16 idx = vintf->idx;
662 
663 	vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx;
664 	vcmdq->lidx = lidx;
665 	vcmdq->cmdqv = cmdqv;
666 	vcmdq->vintf = vintf;
667 	vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx);
668 	vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx);
669 
670 	vintf->lvcmdqs[lidx] = vcmdq;
671 	return 0;
672 }
673 
tegra241_vintf_free_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)674 static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
675 {
676 	struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
677 	char header[64];
678 
679 	/* Note that the lvcmdq queue memory space is managed by devres */
680 
681 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
682 
683 	dev_dbg(vintf->cmdqv->dev,
684 		"%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
685 	/* Guest-owned VCMDQ is free-ed with hw_queue by iommufd core */
686 	if (vcmdq->vintf->hyp_own)
687 		kfree(vcmdq);
688 }
689 
690 static struct tegra241_vcmdq *
tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)691 tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
692 {
693 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
694 	struct tegra241_vcmdq *vcmdq;
695 	char header[64];
696 	int ret;
697 
698 	vcmdq = kzalloc(sizeof(*vcmdq), GFP_KERNEL);
699 	if (!vcmdq)
700 		return ERR_PTR(-ENOMEM);
701 
702 	ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
703 	if (ret)
704 		goto free_vcmdq;
705 
706 	/* Build an arm_smmu_cmdq for each LVCMDQ */
707 	ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq);
708 	if (ret)
709 		goto deinit_lvcmdq;
710 
711 	dev_dbg(cmdqv->dev,
712 		"%sallocated\n", lvcmdq_error_header(vcmdq, header, 64));
713 	return vcmdq;
714 
715 deinit_lvcmdq:
716 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
717 free_vcmdq:
718 	kfree(vcmdq);
719 	return ERR_PTR(ret);
720 }
721 
722 /* VINTF Resource Helpers */
723 
tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)724 static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
725 {
726 	kfree(cmdqv->vintfs[idx]->lvcmdqs);
727 	ida_free(&cmdqv->vintf_ids, idx);
728 	cmdqv->vintfs[idx] = NULL;
729 }
730 
tegra241_cmdqv_init_vintf(struct tegra241_cmdqv * cmdqv,u16 max_idx,struct tegra241_vintf * vintf)731 static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
732 				     struct tegra241_vintf *vintf)
733 {
734 
735 	u16 idx;
736 	int ret;
737 
738 	ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL);
739 	if (ret < 0)
740 		return ret;
741 	idx = ret;
742 
743 	vintf->idx = idx;
744 	vintf->cmdqv = cmdqv;
745 	vintf->base = cmdqv->base + TEGRA241_VINTF(idx);
746 
747 	vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf,
748 				 sizeof(*vintf->lvcmdqs), GFP_KERNEL);
749 	if (!vintf->lvcmdqs) {
750 		ida_free(&cmdqv->vintf_ids, idx);
751 		return -ENOMEM;
752 	}
753 
754 	cmdqv->vintfs[idx] = vintf;
755 	return ret;
756 }
757 
758 /* Remove Helpers */
759 
tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)760 static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
761 {
762 	struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
763 	u16 lidx;
764 
765 	tegra241_vintf_hw_deinit(vintf);
766 
767 	/* Remove LVCMDQ resources */
768 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
769 		if (vintf->lvcmdqs[lidx])
770 			tegra241_vintf_free_lvcmdq(vintf, lidx);
771 
772 	dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
773 	tegra241_cmdqv_deinit_vintf(cmdqv, idx);
774 	if (!vintf->hyp_own) {
775 		mutex_destroy(&vintf->lvcmdq_mutex);
776 		ida_destroy(&vintf->sids);
777 		/* Guest-owned VINTF is free-ed with viommu by iommufd core */
778 	} else {
779 		kfree(vintf);
780 	}
781 }
782 
tegra241_cmdqv_remove(struct arm_smmu_device * smmu)783 static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
784 {
785 	struct tegra241_cmdqv *cmdqv =
786 		container_of(smmu, struct tegra241_cmdqv, smmu);
787 	u16 idx;
788 
789 	/* Remove VINTF resources */
790 	for (idx = 0; idx < cmdqv->num_vintfs; idx++) {
791 		if (cmdqv->vintfs[idx]) {
792 			/* Only vintf0 should remain at this stage */
793 			WARN_ON(idx > 0);
794 			tegra241_cmdqv_remove_vintf(cmdqv, idx);
795 		}
796 	}
797 
798 	/* Remove cmdqv resources */
799 	ida_destroy(&cmdqv->vintf_ids);
800 
801 	if (cmdqv->irq > 0)
802 		free_irq(cmdqv->irq, cmdqv);
803 	iounmap(cmdqv->base);
804 	kfree(cmdqv->vintfs);
805 	put_device(cmdqv->dev); /* smmu->impl_dev */
806 }
807 
808 static int
809 tegra241_cmdqv_init_vintf_user(struct arm_vsmmu *vsmmu,
810 			       const struct iommu_user_data *user_data);
811 
tegra241_cmdqv_hw_info(struct arm_smmu_device * smmu,u32 * length,enum iommu_hw_info_type * type)812 static void *tegra241_cmdqv_hw_info(struct arm_smmu_device *smmu, u32 *length,
813 				    enum iommu_hw_info_type *type)
814 {
815 	struct tegra241_cmdqv *cmdqv =
816 		container_of(smmu, struct tegra241_cmdqv, smmu);
817 	struct iommu_hw_info_tegra241_cmdqv *info;
818 	u32 regval;
819 
820 	if (*type != IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV)
821 		return ERR_PTR(-EOPNOTSUPP);
822 
823 	info = kzalloc(sizeof(*info), GFP_KERNEL);
824 	if (!info)
825 		return ERR_PTR(-ENOMEM);
826 
827 	regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
828 	info->log2vcmdqs = ilog2(cmdqv->num_lvcmdqs_per_vintf);
829 	info->log2vsids = ilog2(cmdqv->num_sids_per_vintf);
830 	info->version = FIELD_GET(CMDQV_VER, regval);
831 
832 	*length = sizeof(*info);
833 	*type = IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV;
834 	return info;
835 }
836 
tegra241_cmdqv_get_vintf_size(enum iommu_viommu_type viommu_type)837 static size_t tegra241_cmdqv_get_vintf_size(enum iommu_viommu_type viommu_type)
838 {
839 	if (viommu_type != IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV)
840 		return 0;
841 	return VIOMMU_STRUCT_SIZE(struct tegra241_vintf, vsmmu.core);
842 }
843 
844 static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
845 	/* For in-kernel use */
846 	.get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
847 	.device_reset = tegra241_cmdqv_hw_reset,
848 	.device_remove = tegra241_cmdqv_remove,
849 	/* For user-space use */
850 	.hw_info = tegra241_cmdqv_hw_info,
851 	.get_viommu_size = tegra241_cmdqv_get_vintf_size,
852 	.vsmmu_init = tegra241_cmdqv_init_vintf_user,
853 };
854 
855 /* Probe Functions */
856 
tegra241_cmdqv_acpi_is_memory(struct acpi_resource * res,void * data)857 static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource *res, void *data)
858 {
859 	struct resource_win win;
860 
861 	return !acpi_dev_resource_address_space(res, &win);
862 }
863 
tegra241_cmdqv_acpi_get_irqs(struct acpi_resource * ares,void * data)864 static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource *ares, void *data)
865 {
866 	struct resource r;
867 	int *irq = data;
868 
869 	if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
870 		*irq = r.start;
871 	return 1; /* No need to add resource to the list */
872 }
873 
874 static struct resource *
tegra241_cmdqv_find_acpi_resource(struct device * dev,int * irq)875 tegra241_cmdqv_find_acpi_resource(struct device *dev, int *irq)
876 {
877 	struct acpi_device *adev = to_acpi_device(dev);
878 	struct list_head resource_list;
879 	struct resource_entry *rentry;
880 	struct resource *res = NULL;
881 	int ret;
882 
883 	INIT_LIST_HEAD(&resource_list);
884 	ret = acpi_dev_get_resources(adev, &resource_list,
885 				     tegra241_cmdqv_acpi_is_memory, NULL);
886 	if (ret < 0) {
887 		dev_err(dev, "failed to get memory resource: %d\n", ret);
888 		return NULL;
889 	}
890 
891 	rentry = list_first_entry_or_null(&resource_list,
892 					  struct resource_entry, node);
893 	if (!rentry) {
894 		dev_err(dev, "failed to get memory resource entry\n");
895 		goto free_list;
896 	}
897 
898 	/* Caller must free the res */
899 	res = kzalloc(sizeof(*res), GFP_KERNEL);
900 	if (!res)
901 		goto free_list;
902 
903 	*res = *rentry->res;
904 
905 	acpi_dev_free_resource_list(&resource_list);
906 
907 	INIT_LIST_HEAD(&resource_list);
908 
909 	if (irq)
910 		ret = acpi_dev_get_resources(adev, &resource_list,
911 					     tegra241_cmdqv_acpi_get_irqs, irq);
912 	if (ret < 0 || !irq || *irq <= 0)
913 		dev_warn(dev, "no interrupt. errors will not be reported\n");
914 
915 free_list:
916 	acpi_dev_free_resource_list(&resource_list);
917 	return res;
918 }
919 
tegra241_cmdqv_init_structures(struct arm_smmu_device * smmu)920 static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
921 {
922 	struct tegra241_cmdqv *cmdqv =
923 		container_of(smmu, struct tegra241_cmdqv, smmu);
924 	struct tegra241_vintf *vintf;
925 	int lidx;
926 	int ret;
927 
928 	vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
929 	if (!vintf)
930 		return -ENOMEM;
931 
932 	/* Init VINTF0 for in-kernel use */
933 	ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
934 	if (ret) {
935 		dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
936 		return ret;
937 	}
938 
939 	/* Preallocate logical VCMDQs to VINTF0 */
940 	for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
941 		struct tegra241_vcmdq *vcmdq;
942 
943 		vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
944 		if (IS_ERR(vcmdq))
945 			return PTR_ERR(vcmdq);
946 	}
947 
948 	/* Now, we are ready to run all the impl ops */
949 	smmu->impl_ops = &tegra241_cmdqv_impl_ops;
950 	return 0;
951 }
952 
953 #ifdef CONFIG_IOMMU_DEBUGFS
954 static struct dentry *cmdqv_debugfs_dir;
955 #endif
956 
957 static struct arm_smmu_device *
__tegra241_cmdqv_probe(struct arm_smmu_device * smmu,struct resource * res,int irq)958 __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
959 		       int irq)
960 {
961 	static const struct arm_smmu_impl_ops init_ops = {
962 		.init_structures = tegra241_cmdqv_init_structures,
963 		.device_remove = tegra241_cmdqv_remove,
964 	};
965 	struct tegra241_cmdqv *cmdqv = NULL;
966 	struct arm_smmu_device *new_smmu;
967 	void __iomem *base;
968 	u32 regval;
969 	int ret;
970 
971 	static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0);
972 
973 	base = ioremap(res->start, resource_size(res));
974 	if (!base) {
975 		dev_err(smmu->dev, "failed to ioremap\n");
976 		return NULL;
977 	}
978 
979 	regval = readl(base + TEGRA241_CMDQV_CONFIG);
980 	if (disable_cmdqv) {
981 		dev_info(smmu->dev, "Detected disable_cmdqv=true\n");
982 		writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG);
983 		goto iounmap;
984 	}
985 
986 	cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL);
987 	if (!cmdqv)
988 		goto iounmap;
989 	new_smmu = &cmdqv->smmu;
990 
991 	cmdqv->irq = irq;
992 	cmdqv->base = base;
993 	cmdqv->dev = smmu->impl_dev;
994 	cmdqv->base_phys = res->start;
995 
996 	if (cmdqv->irq > 0) {
997 		ret = request_threaded_irq(irq, NULL, tegra241_cmdqv_isr,
998 					   IRQF_ONESHOT, "tegra241-cmdqv",
999 					   cmdqv);
1000 		if (ret) {
1001 			dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
1002 				cmdqv->irq, ret);
1003 			goto iounmap;
1004 		}
1005 	}
1006 
1007 	regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
1008 	cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
1009 	cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
1010 	cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
1011 	cmdqv->num_sids_per_vintf =
1012 		1 << FIELD_GET(CMDQV_NUM_SID_PER_VM_LOG2, regval);
1013 
1014 	cmdqv->vintfs =
1015 		kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL);
1016 	if (!cmdqv->vintfs)
1017 		goto free_irq;
1018 
1019 	ida_init(&cmdqv->vintf_ids);
1020 
1021 #ifdef CONFIG_IOMMU_DEBUGFS
1022 	if (!cmdqv_debugfs_dir) {
1023 		cmdqv_debugfs_dir =
1024 			debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir);
1025 		debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir,
1026 				    &bypass_vcmdq);
1027 	}
1028 #endif
1029 
1030 	/* Provide init-level ops only, until tegra241_cmdqv_init_structures */
1031 	new_smmu->impl_ops = &init_ops;
1032 
1033 	return new_smmu;
1034 
1035 free_irq:
1036 	if (cmdqv->irq > 0)
1037 		free_irq(cmdqv->irq, cmdqv);
1038 iounmap:
1039 	iounmap(base);
1040 	return NULL;
1041 }
1042 
tegra241_cmdqv_probe(struct arm_smmu_device * smmu)1043 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
1044 {
1045 	struct arm_smmu_device *new_smmu;
1046 	struct resource *res = NULL;
1047 	int irq;
1048 
1049 	if (!smmu->dev->of_node)
1050 		res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq);
1051 	if (!res)
1052 		goto out_fallback;
1053 
1054 	new_smmu = __tegra241_cmdqv_probe(smmu, res, irq);
1055 	kfree(res);
1056 
1057 	if (new_smmu)
1058 		return new_smmu;
1059 
1060 out_fallback:
1061 	dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
1062 	smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
1063 	put_device(smmu->impl_dev);
1064 	return ERR_PTR(-ENODEV);
1065 }
1066 
1067 /* User space VINTF and VCMDQ Functions */
1068 
tegra241_vintf_get_vcmdq_size(struct iommufd_viommu * viommu,enum iommu_hw_queue_type queue_type)1069 static size_t tegra241_vintf_get_vcmdq_size(struct iommufd_viommu *viommu,
1070 					    enum iommu_hw_queue_type queue_type)
1071 {
1072 	if (queue_type != IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV)
1073 		return 0;
1074 	return HW_QUEUE_STRUCT_SIZE(struct tegra241_vcmdq, core);
1075 }
1076 
tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq * vcmdq)1077 static int tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq *vcmdq)
1078 {
1079 	char header[64];
1080 
1081 	/* Configure the vcmdq only; User space does the enabling */
1082 	writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
1083 
1084 	dev_dbg(vcmdq->cmdqv->dev, "%sinited at host PA 0x%llx size 0x%lx\n",
1085 		lvcmdq_error_header(vcmdq, header, 64),
1086 		vcmdq->cmdq.q.q_base & VCMDQ_ADDR,
1087 		1UL << (vcmdq->cmdq.q.q_base & VCMDQ_LOG2SIZE));
1088 	return 0;
1089 }
1090 
1091 static void
tegra241_vintf_destroy_lvcmdq_user(struct iommufd_hw_queue * hw_queue)1092 tegra241_vintf_destroy_lvcmdq_user(struct iommufd_hw_queue *hw_queue)
1093 {
1094 	struct tegra241_vcmdq *vcmdq = hw_queue_to_vcmdq(hw_queue);
1095 
1096 	mutex_lock(&vcmdq->vintf->lvcmdq_mutex);
1097 	tegra241_vcmdq_hw_deinit(vcmdq);
1098 	tegra241_vcmdq_unmap_lvcmdq(vcmdq);
1099 	tegra241_vintf_free_lvcmdq(vcmdq->vintf, vcmdq->lidx);
1100 	if (vcmdq->prev)
1101 		iommufd_hw_queue_undepend(vcmdq, vcmdq->prev, core);
1102 	mutex_unlock(&vcmdq->vintf->lvcmdq_mutex);
1103 }
1104 
tegra241_vintf_alloc_lvcmdq_user(struct iommufd_hw_queue * hw_queue,u32 lidx,phys_addr_t base_addr_pa)1105 static int tegra241_vintf_alloc_lvcmdq_user(struct iommufd_hw_queue *hw_queue,
1106 					    u32 lidx, phys_addr_t base_addr_pa)
1107 {
1108 	struct tegra241_vintf *vintf = viommu_to_vintf(hw_queue->viommu);
1109 	struct tegra241_vcmdq *vcmdq = hw_queue_to_vcmdq(hw_queue);
1110 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
1111 	struct arm_smmu_device *smmu = &cmdqv->smmu;
1112 	struct tegra241_vcmdq *prev = NULL;
1113 	u32 log2size, max_n_shift;
1114 	char header[64];
1115 	int ret;
1116 
1117 	if (hw_queue->type != IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV)
1118 		return -EOPNOTSUPP;
1119 	if (lidx >= cmdqv->num_lvcmdqs_per_vintf)
1120 		return -EINVAL;
1121 
1122 	mutex_lock(&vintf->lvcmdq_mutex);
1123 
1124 	if (vintf->lvcmdqs[lidx]) {
1125 		ret = -EEXIST;
1126 		goto unlock;
1127 	}
1128 
1129 	/*
1130 	 * HW requires to map LVCMDQs in ascending order, so reject if the
1131 	 * previous lvcmdqs is not allocated yet.
1132 	 */
1133 	if (lidx) {
1134 		prev = vintf->lvcmdqs[lidx - 1];
1135 		if (!prev) {
1136 			ret = -EIO;
1137 			goto unlock;
1138 		}
1139 	}
1140 
1141 	/*
1142 	 * hw_queue->length must be a power of 2, in range of
1143 	 *   [ 32, 2 ^ (idr[1].CMDQS + CMDQ_ENT_SZ_SHIFT) ]
1144 	 */
1145 	max_n_shift = FIELD_GET(IDR1_CMDQS,
1146 				readl_relaxed(smmu->base + ARM_SMMU_IDR1));
1147 	if (!is_power_of_2(hw_queue->length) || hw_queue->length < 32 ||
1148 	    hw_queue->length > (1 << (max_n_shift + CMDQ_ENT_SZ_SHIFT))) {
1149 		ret = -EINVAL;
1150 		goto unlock;
1151 	}
1152 	log2size = ilog2(hw_queue->length) - CMDQ_ENT_SZ_SHIFT;
1153 
1154 	/* base_addr_pa must be aligned to hw_queue->length */
1155 	if (base_addr_pa & ~VCMDQ_ADDR ||
1156 	    base_addr_pa & (hw_queue->length - 1)) {
1157 		ret = -EINVAL;
1158 		goto unlock;
1159 	}
1160 
1161 	/*
1162 	 * HW requires to unmap LVCMDQs in descending order, so destroy() must
1163 	 * follow this rule. Set a dependency on its previous LVCMDQ so iommufd
1164 	 * core will help enforce it.
1165 	 */
1166 	if (prev) {
1167 		ret = iommufd_hw_queue_depend(vcmdq, prev, core);
1168 		if (ret)
1169 			goto unlock;
1170 	}
1171 	vcmdq->prev = prev;
1172 
1173 	ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
1174 	if (ret)
1175 		goto undepend_vcmdq;
1176 
1177 	dev_dbg(cmdqv->dev, "%sallocated\n",
1178 		lvcmdq_error_header(vcmdq, header, 64));
1179 
1180 	tegra241_vcmdq_map_lvcmdq(vcmdq);
1181 
1182 	vcmdq->cmdq.q.q_base = base_addr_pa & VCMDQ_ADDR;
1183 	vcmdq->cmdq.q.q_base |= log2size;
1184 
1185 	ret = tegra241_vcmdq_hw_init_user(vcmdq);
1186 	if (ret)
1187 		goto unmap_lvcmdq;
1188 
1189 	hw_queue->destroy = &tegra241_vintf_destroy_lvcmdq_user;
1190 	mutex_unlock(&vintf->lvcmdq_mutex);
1191 	return 0;
1192 
1193 unmap_lvcmdq:
1194 	tegra241_vcmdq_unmap_lvcmdq(vcmdq);
1195 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
1196 undepend_vcmdq:
1197 	if (vcmdq->prev)
1198 		iommufd_hw_queue_undepend(vcmdq, vcmdq->prev, core);
1199 unlock:
1200 	mutex_unlock(&vintf->lvcmdq_mutex);
1201 	return ret;
1202 }
1203 
tegra241_cmdqv_destroy_vintf_user(struct iommufd_viommu * viommu)1204 static void tegra241_cmdqv_destroy_vintf_user(struct iommufd_viommu *viommu)
1205 {
1206 	struct tegra241_vintf *vintf = viommu_to_vintf(viommu);
1207 
1208 	if (vintf->mmap_offset)
1209 		iommufd_viommu_destroy_mmap(&vintf->vsmmu.core,
1210 					    vintf->mmap_offset);
1211 	tegra241_cmdqv_remove_vintf(vintf->cmdqv, vintf->idx);
1212 }
1213 
tegra241_vintf_destroy_vsid(struct iommufd_vdevice * vdev)1214 static void tegra241_vintf_destroy_vsid(struct iommufd_vdevice *vdev)
1215 {
1216 	struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev);
1217 	struct tegra241_vintf *vintf = vsid->vintf;
1218 
1219 	writel(0, REG_VINTF(vintf, SID_MATCH(vsid->idx)));
1220 	writel(0, REG_VINTF(vintf, SID_REPLACE(vsid->idx)));
1221 	ida_free(&vintf->sids, vsid->idx);
1222 	dev_dbg(vintf->cmdqv->dev,
1223 		"VINTF%u: deallocated SID_REPLACE%d for pSID=%x\n", vintf->idx,
1224 		vsid->idx, vsid->sid);
1225 }
1226 
tegra241_vintf_init_vsid(struct iommufd_vdevice * vdev)1227 static int tegra241_vintf_init_vsid(struct iommufd_vdevice *vdev)
1228 {
1229 	struct device *dev = iommufd_vdevice_to_device(vdev);
1230 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
1231 	struct tegra241_vintf *vintf = viommu_to_vintf(vdev->viommu);
1232 	struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev);
1233 	struct arm_smmu_stream *stream = &master->streams[0];
1234 	u64 virt_sid = vdev->virt_id;
1235 	int sidx;
1236 
1237 	if (virt_sid > UINT_MAX)
1238 		return -EINVAL;
1239 
1240 	WARN_ON_ONCE(master->num_streams != 1);
1241 
1242 	/* Find an empty pair of SID_REPLACE and SID_MATCH */
1243 	sidx = ida_alloc_max(&vintf->sids, vintf->cmdqv->num_sids_per_vintf - 1,
1244 			     GFP_KERNEL);
1245 	if (sidx < 0)
1246 		return sidx;
1247 
1248 	writel(stream->id, REG_VINTF(vintf, SID_REPLACE(sidx)));
1249 	writel(virt_sid << 1 | 0x1, REG_VINTF(vintf, SID_MATCH(sidx)));
1250 	dev_dbg(vintf->cmdqv->dev,
1251 		"VINTF%u: allocated SID_REPLACE%d for pSID=%x, vSID=%x\n",
1252 		vintf->idx, sidx, stream->id, (u32)virt_sid);
1253 
1254 	vsid->idx = sidx;
1255 	vsid->vintf = vintf;
1256 	vsid->sid = stream->id;
1257 
1258 	vdev->destroy = &tegra241_vintf_destroy_vsid;
1259 	return 0;
1260 }
1261 
1262 static struct iommufd_viommu_ops tegra241_cmdqv_viommu_ops = {
1263 	.destroy = tegra241_cmdqv_destroy_vintf_user,
1264 	.alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
1265 	/* Non-accelerated commands will be still handled by the kernel */
1266 	.cache_invalidate = arm_vsmmu_cache_invalidate,
1267 	.vdevice_size = VDEVICE_STRUCT_SIZE(struct tegra241_vintf_sid, core),
1268 	.vdevice_init = tegra241_vintf_init_vsid,
1269 	.get_hw_queue_size = tegra241_vintf_get_vcmdq_size,
1270 	.hw_queue_init_phys = tegra241_vintf_alloc_lvcmdq_user,
1271 };
1272 
1273 static int
tegra241_cmdqv_init_vintf_user(struct arm_vsmmu * vsmmu,const struct iommu_user_data * user_data)1274 tegra241_cmdqv_init_vintf_user(struct arm_vsmmu *vsmmu,
1275 			       const struct iommu_user_data *user_data)
1276 {
1277 	struct tegra241_cmdqv *cmdqv =
1278 		container_of(vsmmu->smmu, struct tegra241_cmdqv, smmu);
1279 	struct tegra241_vintf *vintf = viommu_to_vintf(&vsmmu->core);
1280 	struct iommu_viommu_tegra241_cmdqv data;
1281 	phys_addr_t page0_base;
1282 	int ret;
1283 
1284 	/*
1285 	 * Unsupported type should be rejected by tegra241_cmdqv_get_vintf_size.
1286 	 * Seeing one here indicates a kernel bug or some data corruption.
1287 	 */
1288 	if (WARN_ON(vsmmu->core.type != IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV))
1289 		return -EOPNOTSUPP;
1290 
1291 	if (!user_data)
1292 		return -EINVAL;
1293 
1294 	ret = iommu_copy_struct_from_user(&data, user_data,
1295 					  IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
1296 					  out_vintf_mmap_length);
1297 	if (ret)
1298 		return ret;
1299 
1300 	ret = tegra241_cmdqv_init_vintf(cmdqv, cmdqv->num_vintfs - 1, vintf);
1301 	if (ret < 0) {
1302 		dev_err(cmdqv->dev, "no more available vintf\n");
1303 		return ret;
1304 	}
1305 
1306 	/*
1307 	 * Initialize the user-owned VINTF without a LVCMDQ, as it cannot pre-
1308 	 * allocate a LVCMDQ until user space wants one, for security reasons.
1309 	 * It is different than the kernel-owned VINTF0, which had pre-assigned
1310 	 * and pre-allocated global VCMDQs that would be mapped to the LVCMDQs
1311 	 * by the tegra241_vintf_hw_init() call.
1312 	 */
1313 	ret = tegra241_vintf_hw_init(vintf, false);
1314 	if (ret)
1315 		goto deinit_vintf;
1316 
1317 	page0_base = cmdqv->base_phys + TEGRA241_VINTFi_PAGE0(vintf->idx);
1318 	ret = iommufd_viommu_alloc_mmap(&vintf->vsmmu.core, page0_base, SZ_64K,
1319 					&vintf->mmap_offset);
1320 	if (ret)
1321 		goto hw_deinit_vintf;
1322 
1323 	data.out_vintf_mmap_length = SZ_64K;
1324 	data.out_vintf_mmap_offset = vintf->mmap_offset;
1325 	ret = iommu_copy_struct_to_user(user_data, &data,
1326 					IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
1327 					out_vintf_mmap_length);
1328 	if (ret)
1329 		goto free_mmap;
1330 
1331 	ida_init(&vintf->sids);
1332 	mutex_init(&vintf->lvcmdq_mutex);
1333 
1334 	dev_dbg(cmdqv->dev, "VINTF%u: allocated with vmid (%d)\n", vintf->idx,
1335 		vintf->vsmmu.vmid);
1336 
1337 	vsmmu->core.ops = &tegra241_cmdqv_viommu_ops;
1338 	return 0;
1339 
1340 free_mmap:
1341 	iommufd_viommu_destroy_mmap(&vintf->vsmmu.core, vintf->mmap_offset);
1342 hw_deinit_vintf:
1343 	tegra241_vintf_hw_deinit(vintf);
1344 deinit_vintf:
1345 	tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
1346 	return ret;
1347 }
1348 
1349 MODULE_IMPORT_NS("IOMMUFD");
1350