1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
3
4 #define dev_fmt(fmt) "tegra241_cmdqv: " fmt
5
6 #include <linux/acpi.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/iommu.h>
11 #include <linux/iopoll.h>
12
13 #include <acpi/acpixf.h>
14
15 #include "arm-smmu-v3.h"
16
17 /* CMDQV register page base and size defines */
18 #define TEGRA241_CMDQV_CONFIG_BASE (0)
19 #define TEGRA241_CMDQV_CONFIG_SIZE (SZ_64K)
20 #define TEGRA241_VCMDQ_PAGE0_BASE (TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
21 #define TEGRA241_VCMDQ_PAGE1_BASE (TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
22 #define TEGRA241_VINTF_PAGE_BASE (TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
23
24 /* CMDQV global base regs */
25 #define TEGRA241_CMDQV_CONFIG 0x0000
26 #define CMDQV_EN BIT(0)
27
28 #define TEGRA241_CMDQV_PARAM 0x0004
29 #define CMDQV_NUM_VINTF_LOG2 GENMASK(11, 8)
30 #define CMDQV_NUM_VCMDQ_LOG2 GENMASK(7, 4)
31
32 #define TEGRA241_CMDQV_STATUS 0x0008
33 #define CMDQV_ENABLED BIT(0)
34
35 #define TEGRA241_CMDQV_VINTF_ERR_MAP 0x0014
36 #define TEGRA241_CMDQV_VINTF_INT_MASK 0x001C
37 #define TEGRA241_CMDQV_CMDQ_ERR_MAP(m) (0x0024 + 0x4*(m))
38
39 #define TEGRA241_CMDQV_CMDQ_ALLOC(q) (0x0200 + 0x4*(q))
40 #define CMDQV_CMDQ_ALLOC_VINTF GENMASK(20, 15)
41 #define CMDQV_CMDQ_ALLOC_LVCMDQ GENMASK(7, 1)
42 #define CMDQV_CMDQ_ALLOCATED BIT(0)
43
44 /* VINTF base regs */
45 #define TEGRA241_VINTF(v) (0x1000 + 0x100*(v))
46
47 #define TEGRA241_VINTF_CONFIG 0x0000
48 #define VINTF_HYP_OWN BIT(17)
49 #define VINTF_VMID GENMASK(16, 1)
50 #define VINTF_EN BIT(0)
51
52 #define TEGRA241_VINTF_STATUS 0x0004
53 #define VINTF_STATUS GENMASK(3, 1)
54 #define VINTF_ENABLED BIT(0)
55
56 #define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
57 (0x00C0 + 0x8*(m))
58 #define LVCMDQ_ERR_MAP_NUM_64 2
59
60 /* VCMDQ base regs */
61 /* -- PAGE0 -- */
62 #define TEGRA241_VCMDQ_PAGE0(q) (TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
63
64 #define TEGRA241_VCMDQ_CONS 0x00000
65 #define VCMDQ_CONS_ERR GENMASK(30, 24)
66
67 #define TEGRA241_VCMDQ_PROD 0x00004
68
69 #define TEGRA241_VCMDQ_CONFIG 0x00008
70 #define VCMDQ_EN BIT(0)
71
72 #define TEGRA241_VCMDQ_STATUS 0x0000C
73 #define VCMDQ_ENABLED BIT(0)
74
75 #define TEGRA241_VCMDQ_GERROR 0x00010
76 #define TEGRA241_VCMDQ_GERRORN 0x00014
77
78 /* -- PAGE1 -- */
79 #define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
80 #define VCMDQ_ADDR GENMASK(47, 5)
81 #define VCMDQ_LOG2SIZE GENMASK(4, 0)
82
83 #define TEGRA241_VCMDQ_BASE 0x00000
84 #define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008
85
86 /* VINTF logical-VCMDQ pages */
87 #define TEGRA241_VINTFi_PAGE0(i) (TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
88 #define TEGRA241_VINTFi_PAGE1(i) (TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
89 #define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
90 (TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
91 #define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
92 (TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
93
94 /* MMIO helpers */
95 #define REG_CMDQV(_cmdqv, _regname) \
96 ((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
97 #define REG_VINTF(_vintf, _regname) \
98 ((_vintf)->base + TEGRA241_VINTF_##_regname)
99 #define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
100 ((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
101 #define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
102 ((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
103
104
105 static bool disable_cmdqv;
106 module_param(disable_cmdqv, bool, 0444);
107 MODULE_PARM_DESC(disable_cmdqv,
108 "This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
109
110 static bool bypass_vcmdq;
111 module_param(bypass_vcmdq, bool, 0444);
112 MODULE_PARM_DESC(bypass_vcmdq,
113 "This allows to bypass VCMDQ for debugging use or perf comparison.");
114
115 /**
116 * struct tegra241_vcmdq - Virtual Command Queue
117 * @idx: Global index in the CMDQV
118 * @lidx: Local index in the VINTF
119 * @enabled: Enable status
120 * @cmdqv: Parent CMDQV pointer
121 * @vintf: Parent VINTF pointer
122 * @cmdq: Command Queue struct
123 * @page0: MMIO Page0 base address
124 * @page1: MMIO Page1 base address
125 */
126 struct tegra241_vcmdq {
127 u16 idx;
128 u16 lidx;
129
130 bool enabled;
131
132 struct tegra241_cmdqv *cmdqv;
133 struct tegra241_vintf *vintf;
134 struct arm_smmu_cmdq cmdq;
135
136 void __iomem *page0;
137 void __iomem *page1;
138 };
139
140 /**
141 * struct tegra241_vintf - Virtual Interface
142 * @idx: Global index in the CMDQV
143 * @enabled: Enable status
144 * @hyp_own: Owned by hypervisor (in-kernel)
145 * @cmdqv: Parent CMDQV pointer
146 * @lvcmdqs: List of logical VCMDQ pointers
147 * @base: MMIO base address
148 */
149 struct tegra241_vintf {
150 u16 idx;
151
152 bool enabled;
153 bool hyp_own;
154
155 struct tegra241_cmdqv *cmdqv;
156 struct tegra241_vcmdq **lvcmdqs;
157
158 void __iomem *base;
159 };
160
161 /**
162 * struct tegra241_cmdqv - CMDQ-V for SMMUv3
163 * @smmu: SMMUv3 device
164 * @dev: CMDQV device
165 * @base: MMIO base address
166 * @irq: IRQ number
167 * @num_vintfs: Total number of VINTFs
168 * @num_vcmdqs: Total number of VCMDQs
169 * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
170 * @vintf_ids: VINTF id allocator
171 * @vintfs: List of VINTFs
172 */
173 struct tegra241_cmdqv {
174 struct arm_smmu_device smmu;
175 struct device *dev;
176
177 void __iomem *base;
178 int irq;
179
180 /* CMDQV Hardware Params */
181 u16 num_vintfs;
182 u16 num_vcmdqs;
183 u16 num_lvcmdqs_per_vintf;
184
185 struct ida vintf_ids;
186
187 struct tegra241_vintf **vintfs;
188 };
189
190 /* Config and Polling Helpers */
191
tegra241_cmdqv_write_config(struct tegra241_cmdqv * cmdqv,void __iomem * addr_config,void __iomem * addr_status,u32 regval,const char * header,bool * out_enabled)192 static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv,
193 void __iomem *addr_config,
194 void __iomem *addr_status,
195 u32 regval, const char *header,
196 bool *out_enabled)
197 {
198 bool en = regval & BIT(0);
199 int ret;
200
201 writel(regval, addr_config);
202 ret = readl_poll_timeout(addr_status, regval,
203 en ? regval & BIT(0) : !(regval & BIT(0)),
204 1, ARM_SMMU_POLL_TIMEOUT_US);
205 if (ret)
206 dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n",
207 header, en ? "en" : "dis", regval);
208 if (out_enabled)
209 WRITE_ONCE(*out_enabled, regval & BIT(0));
210 return ret;
211 }
212
cmdqv_write_config(struct tegra241_cmdqv * cmdqv,u32 regval)213 static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval)
214 {
215 return tegra241_cmdqv_write_config(cmdqv,
216 REG_CMDQV(cmdqv, CONFIG),
217 REG_CMDQV(cmdqv, STATUS),
218 regval, "CMDQV: ", NULL);
219 }
220
vintf_write_config(struct tegra241_vintf * vintf,u32 regval)221 static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval)
222 {
223 char header[16];
224
225 snprintf(header, 16, "VINTF%u: ", vintf->idx);
226 return tegra241_cmdqv_write_config(vintf->cmdqv,
227 REG_VINTF(vintf, CONFIG),
228 REG_VINTF(vintf, STATUS),
229 regval, header, &vintf->enabled);
230 }
231
lvcmdq_error_header(struct tegra241_vcmdq * vcmdq,char * header,int hlen)232 static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq,
233 char *header, int hlen)
234 {
235 WARN_ON(hlen < 64);
236 if (WARN_ON(!vcmdq->vintf))
237 return "";
238 snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
239 vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx);
240 return header;
241 }
242
vcmdq_write_config(struct tegra241_vcmdq * vcmdq,u32 regval)243 static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
244 {
245 char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
246
247 return tegra241_cmdqv_write_config(vcmdq->cmdqv,
248 REG_VCMDQ_PAGE0(vcmdq, CONFIG),
249 REG_VCMDQ_PAGE0(vcmdq, STATUS),
250 regval, h, &vcmdq->enabled);
251 }
252
253 /* ISR Functions */
254
tegra241_vintf0_handle_error(struct tegra241_vintf * vintf)255 static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
256 {
257 int i;
258
259 for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
260 u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
261
262 while (map) {
263 unsigned long lidx = __ffs64(map);
264 struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
265 u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
266
267 __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq);
268 writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
269 map &= ~BIT_ULL(lidx);
270 }
271 }
272 }
273
tegra241_cmdqv_isr(int irq,void * devid)274 static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
275 {
276 struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid;
277 void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP);
278 char err_str[256];
279 u64 vintf_map;
280
281 /* Use readl_relaxed() as register addresses are not 64-bit aligned */
282 vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
283 (u64)readl_relaxed(reg_vintf_map);
284
285 snprintf(err_str, sizeof(err_str),
286 "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map,
287 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))),
288 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))),
289 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))),
290 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0))));
291
292 dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str);
293
294 /* Handle VINTF0 and its LVCMDQs */
295 if (vintf_map & BIT_ULL(0)) {
296 tegra241_vintf0_handle_error(cmdqv->vintfs[0]);
297 vintf_map &= ~BIT_ULL(0);
298 }
299
300 return IRQ_HANDLED;
301 }
302
303 /* Command Queue Function */
304
tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent * ent)305 static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
306 {
307 switch (ent->opcode) {
308 case CMDQ_OP_TLBI_NH_ASID:
309 case CMDQ_OP_TLBI_NH_VA:
310 case CMDQ_OP_ATC_INV:
311 return true;
312 default:
313 return false;
314 }
315 }
316
317 static struct arm_smmu_cmdq *
tegra241_cmdqv_get_cmdq(struct arm_smmu_device * smmu,struct arm_smmu_cmdq_ent * ent)318 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
319 struct arm_smmu_cmdq_ent *ent)
320 {
321 struct tegra241_cmdqv *cmdqv =
322 container_of(smmu, struct tegra241_cmdqv, smmu);
323 struct tegra241_vintf *vintf = cmdqv->vintfs[0];
324 struct tegra241_vcmdq *vcmdq;
325 u16 lidx;
326
327 if (READ_ONCE(bypass_vcmdq))
328 return NULL;
329
330 /* Use SMMU CMDQ if VINTF0 is uninitialized */
331 if (!READ_ONCE(vintf->enabled))
332 return NULL;
333
334 /*
335 * Select a LVCMDQ to use. Here we use a temporal solution to
336 * balance out traffic on cmdq issuing: each cmdq has its own
337 * lock, if all cpus issue cmdlist using the same cmdq, only
338 * one CPU at a time can enter the process, while the others
339 * will be spinning at the same lock.
340 */
341 lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
342 vcmdq = vintf->lvcmdqs[lidx];
343 if (!vcmdq || !READ_ONCE(vcmdq->enabled))
344 return NULL;
345
346 /* Unsupported CMD goes for smmu->cmdq pathway */
347 if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
348 return NULL;
349 return &vcmdq->cmdq;
350 }
351
352 /* HW Reset Functions */
353
tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq * vcmdq)354 static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
355 {
356 char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
357 u32 gerrorn, gerror;
358
359 if (vcmdq_write_config(vcmdq, 0)) {
360 dev_err(vcmdq->cmdqv->dev,
361 "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
362 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
363 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
364 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
365 }
366 writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
367 writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
368 writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
369 writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE));
370
371 gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN));
372 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
373 if (gerror != gerrorn) {
374 dev_warn(vcmdq->cmdqv->dev,
375 "%suncleared error detected, resetting\n", h);
376 writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
377 }
378
379 dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
380 }
381
tegra241_vcmdq_hw_init(struct tegra241_vcmdq * vcmdq)382 static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
383 {
384 char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
385 int ret;
386
387 /* Reset VCMDQ */
388 tegra241_vcmdq_hw_deinit(vcmdq);
389
390 /* Configure and enable VCMDQ */
391 writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
392
393 ret = vcmdq_write_config(vcmdq, VCMDQ_EN);
394 if (ret) {
395 dev_err(vcmdq->cmdqv->dev,
396 "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
397 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
398 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
399 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
400 return ret;
401 }
402
403 dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h);
404 return 0;
405 }
406
tegra241_vintf_hw_deinit(struct tegra241_vintf * vintf)407 static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
408 {
409 u16 lidx;
410
411 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
412 if (vintf->lvcmdqs && vintf->lvcmdqs[lidx])
413 tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
414 vintf_write_config(vintf, 0);
415 }
416
tegra241_vintf_hw_init(struct tegra241_vintf * vintf,bool hyp_own)417 static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
418 {
419 u32 regval;
420 u16 lidx;
421 int ret;
422
423 /* Reset VINTF */
424 tegra241_vintf_hw_deinit(vintf);
425
426 /* Configure and enable VINTF */
427 /*
428 * Note that HYP_OWN bit is wired to zero when running in guest kernel,
429 * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
430 * restricted set of supported commands.
431 */
432 regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
433 writel(regval, REG_VINTF(vintf, CONFIG));
434
435 ret = vintf_write_config(vintf, regval | VINTF_EN);
436 if (ret)
437 return ret;
438 /*
439 * As being mentioned above, HYP_OWN bit is wired to zero for a guest
440 * kernel, so read it back from HW to ensure that reflects in hyp_own
441 */
442 vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
443
444 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
445 if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
446 ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
447 if (ret) {
448 tegra241_vintf_hw_deinit(vintf);
449 return ret;
450 }
451 }
452 }
453
454 return 0;
455 }
456
tegra241_cmdqv_hw_reset(struct arm_smmu_device * smmu)457 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
458 {
459 struct tegra241_cmdqv *cmdqv =
460 container_of(smmu, struct tegra241_cmdqv, smmu);
461 u16 qidx, lidx, idx;
462 u32 regval;
463 int ret;
464
465 /* Reset CMDQV */
466 regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG));
467 ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN);
468 if (ret)
469 return ret;
470 ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN);
471 if (ret)
472 return ret;
473
474 /* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
475 for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) {
476 for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
477 regval = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
478 regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
479 regval |= CMDQV_CMDQ_ALLOCATED;
480 writel_relaxed(regval,
481 REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
482 }
483 }
484
485 return tegra241_vintf_hw_init(cmdqv->vintfs[0], true);
486 }
487
488 /* VCMDQ Resource Helpers */
489
tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq * vcmdq)490 static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
491 {
492 struct arm_smmu_queue *q = &vcmdq->cmdq.q;
493 size_t nents = 1 << q->llq.max_n_shift;
494 size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
495
496 if (!q->base)
497 return;
498 dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
499 }
500
tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq * vcmdq)501 static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
502 {
503 struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
504 struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
505 struct arm_smmu_queue *q = &cmdq->q;
506 char name[16];
507 u32 regval;
508 int ret;
509
510 snprintf(name, 16, "vcmdq%u", vcmdq->idx);
511
512 /* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
513 regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
514 q->llq.max_n_shift =
515 min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
516
517 /* Use the common helper to init the VCMDQ, and then... */
518 ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
519 TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
520 CMDQ_ENT_DWORDS, name);
521 if (ret)
522 return ret;
523
524 /* ...override q_base to write VCMDQ_BASE registers */
525 q->q_base = q->base_dma & VCMDQ_ADDR;
526 q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
527
528 if (!vcmdq->vintf->hyp_own)
529 cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
530
531 return arm_smmu_cmdq_init(smmu, cmdq);
532 }
533
534 /* VINTF Logical VCMDQ Resource Helpers */
535
tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)536 static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
537 {
538 vintf->lvcmdqs[lidx] = NULL;
539 }
540
tegra241_vintf_init_lvcmdq(struct tegra241_vintf * vintf,u16 lidx,struct tegra241_vcmdq * vcmdq)541 static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx,
542 struct tegra241_vcmdq *vcmdq)
543 {
544 struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
545 u16 idx = vintf->idx;
546
547 vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx;
548 vcmdq->lidx = lidx;
549 vcmdq->cmdqv = cmdqv;
550 vcmdq->vintf = vintf;
551 vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx);
552 vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx);
553
554 vintf->lvcmdqs[lidx] = vcmdq;
555 return 0;
556 }
557
tegra241_vintf_free_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)558 static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
559 {
560 struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
561 char header[64];
562
563 tegra241_vcmdq_free_smmu_cmdq(vcmdq);
564 tegra241_vintf_deinit_lvcmdq(vintf, lidx);
565
566 dev_dbg(vintf->cmdqv->dev,
567 "%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
568 kfree(vcmdq);
569 }
570
571 static struct tegra241_vcmdq *
tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)572 tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
573 {
574 struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
575 struct tegra241_vcmdq *vcmdq;
576 char header[64];
577 int ret;
578
579 vcmdq = kzalloc(sizeof(*vcmdq), GFP_KERNEL);
580 if (!vcmdq)
581 return ERR_PTR(-ENOMEM);
582
583 ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
584 if (ret)
585 goto free_vcmdq;
586
587 /* Build an arm_smmu_cmdq for each LVCMDQ */
588 ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq);
589 if (ret)
590 goto deinit_lvcmdq;
591
592 dev_dbg(cmdqv->dev,
593 "%sallocated\n", lvcmdq_error_header(vcmdq, header, 64));
594 return vcmdq;
595
596 deinit_lvcmdq:
597 tegra241_vintf_deinit_lvcmdq(vintf, lidx);
598 free_vcmdq:
599 kfree(vcmdq);
600 return ERR_PTR(ret);
601 }
602
603 /* VINTF Resource Helpers */
604
tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)605 static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
606 {
607 kfree(cmdqv->vintfs[idx]->lvcmdqs);
608 ida_free(&cmdqv->vintf_ids, idx);
609 cmdqv->vintfs[idx] = NULL;
610 }
611
tegra241_cmdqv_init_vintf(struct tegra241_cmdqv * cmdqv,u16 max_idx,struct tegra241_vintf * vintf)612 static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
613 struct tegra241_vintf *vintf)
614 {
615
616 u16 idx;
617 int ret;
618
619 ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL);
620 if (ret < 0)
621 return ret;
622 idx = ret;
623
624 vintf->idx = idx;
625 vintf->cmdqv = cmdqv;
626 vintf->base = cmdqv->base + TEGRA241_VINTF(idx);
627
628 vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf,
629 sizeof(*vintf->lvcmdqs), GFP_KERNEL);
630 if (!vintf->lvcmdqs) {
631 ida_free(&cmdqv->vintf_ids, idx);
632 return -ENOMEM;
633 }
634
635 cmdqv->vintfs[idx] = vintf;
636 return ret;
637 }
638
639 /* Remove Helpers */
640
tegra241_vintf_remove_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)641 static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
642 {
643 tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
644 tegra241_vintf_free_lvcmdq(vintf, lidx);
645 }
646
tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)647 static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
648 {
649 struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
650 u16 lidx;
651
652 /* Remove LVCMDQ resources */
653 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
654 if (vintf->lvcmdqs[lidx])
655 tegra241_vintf_remove_lvcmdq(vintf, lidx);
656
657 /* Remove VINTF resources */
658 tegra241_vintf_hw_deinit(vintf);
659
660 dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
661 tegra241_cmdqv_deinit_vintf(cmdqv, idx);
662 kfree(vintf);
663 }
664
tegra241_cmdqv_remove(struct arm_smmu_device * smmu)665 static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
666 {
667 struct tegra241_cmdqv *cmdqv =
668 container_of(smmu, struct tegra241_cmdqv, smmu);
669 u16 idx;
670
671 /* Remove VINTF resources */
672 for (idx = 0; idx < cmdqv->num_vintfs; idx++) {
673 if (cmdqv->vintfs[idx]) {
674 /* Only vintf0 should remain at this stage */
675 WARN_ON(idx > 0);
676 tegra241_cmdqv_remove_vintf(cmdqv, idx);
677 }
678 }
679
680 /* Remove cmdqv resources */
681 ida_destroy(&cmdqv->vintf_ids);
682
683 if (cmdqv->irq > 0)
684 free_irq(cmdqv->irq, cmdqv);
685 iounmap(cmdqv->base);
686 kfree(cmdqv->vintfs);
687 put_device(cmdqv->dev); /* smmu->impl_dev */
688 }
689
690 static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
691 .get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
692 .device_reset = tegra241_cmdqv_hw_reset,
693 .device_remove = tegra241_cmdqv_remove,
694 };
695
696 /* Probe Functions */
697
tegra241_cmdqv_acpi_is_memory(struct acpi_resource * res,void * data)698 static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource *res, void *data)
699 {
700 struct resource_win win;
701
702 return !acpi_dev_resource_address_space(res, &win);
703 }
704
tegra241_cmdqv_acpi_get_irqs(struct acpi_resource * ares,void * data)705 static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource *ares, void *data)
706 {
707 struct resource r;
708 int *irq = data;
709
710 if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
711 *irq = r.start;
712 return 1; /* No need to add resource to the list */
713 }
714
715 static struct resource *
tegra241_cmdqv_find_acpi_resource(struct device * dev,int * irq)716 tegra241_cmdqv_find_acpi_resource(struct device *dev, int *irq)
717 {
718 struct acpi_device *adev = to_acpi_device(dev);
719 struct list_head resource_list;
720 struct resource_entry *rentry;
721 struct resource *res = NULL;
722 int ret;
723
724 INIT_LIST_HEAD(&resource_list);
725 ret = acpi_dev_get_resources(adev, &resource_list,
726 tegra241_cmdqv_acpi_is_memory, NULL);
727 if (ret < 0) {
728 dev_err(dev, "failed to get memory resource: %d\n", ret);
729 return NULL;
730 }
731
732 rentry = list_first_entry_or_null(&resource_list,
733 struct resource_entry, node);
734 if (!rentry) {
735 dev_err(dev, "failed to get memory resource entry\n");
736 goto free_list;
737 }
738
739 /* Caller must free the res */
740 res = kzalloc(sizeof(*res), GFP_KERNEL);
741 if (!res)
742 goto free_list;
743
744 *res = *rentry->res;
745
746 acpi_dev_free_resource_list(&resource_list);
747
748 INIT_LIST_HEAD(&resource_list);
749
750 if (irq)
751 ret = acpi_dev_get_resources(adev, &resource_list,
752 tegra241_cmdqv_acpi_get_irqs, irq);
753 if (ret < 0 || !irq || *irq <= 0)
754 dev_warn(dev, "no interrupt. errors will not be reported\n");
755
756 free_list:
757 acpi_dev_free_resource_list(&resource_list);
758 return res;
759 }
760
tegra241_cmdqv_init_structures(struct arm_smmu_device * smmu)761 static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
762 {
763 struct tegra241_cmdqv *cmdqv =
764 container_of(smmu, struct tegra241_cmdqv, smmu);
765 struct tegra241_vintf *vintf;
766 int lidx;
767 int ret;
768
769 vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
770 if (!vintf)
771 goto out_fallback;
772
773 /* Init VINTF0 for in-kernel use */
774 ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
775 if (ret) {
776 dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
777 goto free_vintf;
778 }
779
780 /* Preallocate logical VCMDQs to VINTF0 */
781 for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
782 struct tegra241_vcmdq *vcmdq;
783
784 vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
785 if (IS_ERR(vcmdq))
786 goto free_lvcmdq;
787 }
788
789 /* Now, we are ready to run all the impl ops */
790 smmu->impl_ops = &tegra241_cmdqv_impl_ops;
791 return 0;
792
793 free_lvcmdq:
794 for (lidx--; lidx >= 0; lidx--)
795 tegra241_vintf_free_lvcmdq(vintf, lidx);
796 tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
797 free_vintf:
798 kfree(vintf);
799 out_fallback:
800 dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
801 smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
802 tegra241_cmdqv_remove(smmu);
803 return 0;
804 }
805
806 #ifdef CONFIG_IOMMU_DEBUGFS
807 static struct dentry *cmdqv_debugfs_dir;
808 #endif
809
810 static struct arm_smmu_device *
__tegra241_cmdqv_probe(struct arm_smmu_device * smmu,struct resource * res,int irq)811 __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
812 int irq)
813 {
814 static const struct arm_smmu_impl_ops init_ops = {
815 .init_structures = tegra241_cmdqv_init_structures,
816 .device_remove = tegra241_cmdqv_remove,
817 };
818 struct tegra241_cmdqv *cmdqv = NULL;
819 struct arm_smmu_device *new_smmu;
820 void __iomem *base;
821 u32 regval;
822 int ret;
823
824 static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0);
825
826 base = ioremap(res->start, resource_size(res));
827 if (!base) {
828 dev_err(smmu->dev, "failed to ioremap\n");
829 return NULL;
830 }
831
832 regval = readl(base + TEGRA241_CMDQV_CONFIG);
833 if (disable_cmdqv) {
834 dev_info(smmu->dev, "Detected disable_cmdqv=true\n");
835 writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG);
836 goto iounmap;
837 }
838
839 cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL);
840 if (!cmdqv)
841 goto iounmap;
842 new_smmu = &cmdqv->smmu;
843
844 cmdqv->irq = irq;
845 cmdqv->base = base;
846 cmdqv->dev = smmu->impl_dev;
847
848 if (cmdqv->irq > 0) {
849 ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv",
850 cmdqv);
851 if (ret) {
852 dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
853 cmdqv->irq, ret);
854 goto iounmap;
855 }
856 }
857
858 regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
859 cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
860 cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
861 cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
862
863 cmdqv->vintfs =
864 kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL);
865 if (!cmdqv->vintfs)
866 goto free_irq;
867
868 ida_init(&cmdqv->vintf_ids);
869
870 #ifdef CONFIG_IOMMU_DEBUGFS
871 if (!cmdqv_debugfs_dir) {
872 cmdqv_debugfs_dir =
873 debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir);
874 debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir,
875 &bypass_vcmdq);
876 }
877 #endif
878
879 /* Provide init-level ops only, until tegra241_cmdqv_init_structures */
880 new_smmu->impl_ops = &init_ops;
881
882 return new_smmu;
883
884 free_irq:
885 if (cmdqv->irq > 0)
886 free_irq(cmdqv->irq, cmdqv);
887 iounmap:
888 iounmap(base);
889 return NULL;
890 }
891
tegra241_cmdqv_probe(struct arm_smmu_device * smmu)892 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
893 {
894 struct arm_smmu_device *new_smmu;
895 struct resource *res = NULL;
896 int irq;
897
898 if (!smmu->dev->of_node)
899 res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq);
900 if (!res)
901 goto out_fallback;
902
903 new_smmu = __tegra241_cmdqv_probe(smmu, res, irq);
904 kfree(res);
905
906 if (new_smmu)
907 return new_smmu;
908
909 out_fallback:
910 dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
911 smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
912 put_device(smmu->impl_dev);
913 return ERR_PTR(-ENODEV);
914 }
915