xref: /linux/drivers/gpu/drm/tegra/vic.c (revision 561add0da6d3d07c9bccb0832fb6ed5619167d26)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, NVIDIA Corporation.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/iommu.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/reset.h>
16 
17 #include <soc/tegra/pmc.h>
18 
19 #include "drm.h"
20 #include "falcon.h"
21 #include "vic.h"
22 
23 struct vic_config {
24 	const char *firmware;
25 	unsigned int version;
26 	bool supports_sid;
27 };
28 
29 struct vic {
30 	struct falcon falcon;
31 
32 	void __iomem *regs;
33 	struct tegra_drm_client client;
34 	struct host1x_channel *channel;
35 	struct device *dev;
36 	struct clk *clk;
37 	struct reset_control *rst;
38 
39 	bool can_use_context;
40 
41 	/* Platform configuration */
42 	const struct vic_config *config;
43 };
44 
45 static inline struct vic *to_vic(struct tegra_drm_client *client)
46 {
47 	return container_of(client, struct vic, client);
48 }
49 
50 static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
51 {
52 	writel(value, vic->regs + offset);
53 }
54 
55 static int vic_boot(struct vic *vic)
56 {
57 	u32 fce_ucode_size, fce_bin_data_offset, stream_id;
58 	void *hdr;
59 	int err = 0;
60 
61 	if (vic->config->supports_sid && tegra_dev_iommu_get_stream_id(vic->dev, &stream_id)) {
62 		u32 value;
63 
64 		value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
65 			TRANSCFG_ATT(0, TRANSCFG_SID_HW);
66 		vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
67 
68 		/*
69 		 * STREAMID0 is used for input/output buffers. Initialize it to SID_VIC in case
70 		 * context isolation is not enabled, and SID_VIC is used for both firmware and
71 		 * data buffers.
72 		 *
73 		 * If context isolation is enabled, it will be overridden by the SETSTREAMID
74 		 * opcode as part of each job.
75 		 */
76 		vic_writel(vic, stream_id, VIC_THI_STREAMID0);
77 
78 		/* STREAMID1 is used for firmware loading. */
79 		vic_writel(vic, stream_id, VIC_THI_STREAMID1);
80 	}
81 
82 	/* setup clockgating registers */
83 	vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
84 			CG_IDLE_CG_EN |
85 			CG_WAKEUP_DLY_CNT(4),
86 		   NV_PVIC_MISC_PRI_VIC_CG);
87 
88 	err = falcon_boot(&vic->falcon);
89 	if (err < 0)
90 		return err;
91 
92 	hdr = vic->falcon.firmware.virt;
93 	fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
94 
95 	/* Old VIC firmware needs kernel help with setting up FCE microcode. */
96 	if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
97 		hdr = vic->falcon.firmware.virt +
98 			*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
99 		fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
100 
101 		falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
102 				      fce_ucode_size);
103 		falcon_execute_method(
104 			&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
105 			(vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
106 	}
107 
108 	err = falcon_wait_idle(&vic->falcon);
109 	if (err < 0) {
110 		dev_err(vic->dev,
111 			"failed to set application ID and FCE base\n");
112 		return err;
113 	}
114 
115 	return 0;
116 }
117 
118 static int vic_init(struct host1x_client *client)
119 {
120 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
121 	struct drm_device *dev = dev_get_drvdata(client->host);
122 	struct tegra_drm *tegra = dev->dev_private;
123 	struct vic *vic = to_vic(drm);
124 	int err;
125 
126 	err = host1x_client_iommu_attach(client);
127 	if (err < 0 && err != -ENODEV) {
128 		dev_err(vic->dev, "failed to attach to domain: %d\n", err);
129 		return err;
130 	}
131 
132 	vic->channel = host1x_channel_request(client);
133 	if (!vic->channel) {
134 		err = -ENOMEM;
135 		goto detach;
136 	}
137 
138 	client->syncpts[0] = host1x_syncpt_request(client, 0);
139 	if (!client->syncpts[0]) {
140 		err = -ENOMEM;
141 		goto free_channel;
142 	}
143 
144 	err = tegra_drm_register_client(tegra, drm);
145 	if (err < 0)
146 		goto free_syncpt;
147 
148 	/*
149 	 * Inherit the DMA parameters (such as maximum segment size) from the
150 	 * parent host1x device.
151 	 */
152 	client->dev->dma_parms = client->host->dma_parms;
153 
154 	return 0;
155 
156 free_syncpt:
157 	host1x_syncpt_put(client->syncpts[0]);
158 free_channel:
159 	host1x_channel_put(vic->channel);
160 detach:
161 	host1x_client_iommu_detach(client);
162 
163 	return err;
164 }
165 
166 static int vic_exit(struct host1x_client *client)
167 {
168 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
169 	struct drm_device *dev = dev_get_drvdata(client->host);
170 	struct tegra_drm *tegra = dev->dev_private;
171 	struct vic *vic = to_vic(drm);
172 	int err;
173 
174 	/* avoid a dangling pointer just in case this disappears */
175 	client->dev->dma_parms = NULL;
176 
177 	err = tegra_drm_unregister_client(tegra, drm);
178 	if (err < 0)
179 		return err;
180 
181 	pm_runtime_dont_use_autosuspend(client->dev);
182 	pm_runtime_force_suspend(client->dev);
183 
184 	host1x_syncpt_put(client->syncpts[0]);
185 	host1x_channel_put(vic->channel);
186 	host1x_client_iommu_detach(client);
187 
188 	vic->channel = NULL;
189 
190 	if (client->group) {
191 		dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
192 				 vic->falcon.firmware.size, DMA_TO_DEVICE);
193 		tegra_drm_free(tegra, vic->falcon.firmware.size,
194 			       vic->falcon.firmware.virt,
195 			       vic->falcon.firmware.iova);
196 	} else {
197 		dma_free_coherent(vic->dev, vic->falcon.firmware.size,
198 				  vic->falcon.firmware.virt,
199 				  vic->falcon.firmware.iova);
200 	}
201 
202 	return 0;
203 }
204 
205 static const struct host1x_client_ops vic_client_ops = {
206 	.init = vic_init,
207 	.exit = vic_exit,
208 };
209 
210 static int vic_load_firmware(struct vic *vic)
211 {
212 	struct host1x_client *client = &vic->client.base;
213 	struct tegra_drm *tegra = vic->client.drm;
214 	static DEFINE_MUTEX(lock);
215 	u32 fce_bin_data_offset;
216 	dma_addr_t iova;
217 	size_t size;
218 	void *virt;
219 	int err;
220 
221 	mutex_lock(&lock);
222 
223 	if (vic->falcon.firmware.virt) {
224 		err = 0;
225 		goto unlock;
226 	}
227 
228 	err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
229 	if (err < 0)
230 		goto unlock;
231 
232 	size = vic->falcon.firmware.size;
233 
234 	if (!client->group) {
235 		virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
236 		if (!virt) {
237 			err = -ENOMEM;
238 			goto unlock;
239 		}
240 	} else {
241 		virt = tegra_drm_alloc(tegra, size, &iova);
242 		if (IS_ERR(virt)) {
243 			err = PTR_ERR(virt);
244 			goto unlock;
245 		}
246 	}
247 
248 	vic->falcon.firmware.virt = virt;
249 	vic->falcon.firmware.iova = iova;
250 
251 	err = falcon_load_firmware(&vic->falcon);
252 	if (err < 0)
253 		goto cleanup;
254 
255 	/*
256 	 * In this case we have received an IOVA from the shared domain, so we
257 	 * need to make sure to get the physical address so that the DMA API
258 	 * knows what memory pages to flush the cache for.
259 	 */
260 	if (client->group) {
261 		dma_addr_t phys;
262 
263 		phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
264 
265 		err = dma_mapping_error(vic->dev, phys);
266 		if (err < 0)
267 			goto cleanup;
268 
269 		vic->falcon.firmware.phys = phys;
270 	}
271 
272 	/*
273 	 * Check if firmware is new enough to not require mapping firmware
274 	 * to data buffer domains.
275 	 */
276 	fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET);
277 
278 	if (!vic->config->supports_sid) {
279 		vic->can_use_context = false;
280 	} else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
281 		/*
282 		 * Firmware will access FCE through STREAMID0, so context
283 		 * isolation cannot be used.
284 		 */
285 		vic->can_use_context = false;
286 		dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n");
287 	} else {
288 		vic->can_use_context = true;
289 	}
290 
291 unlock:
292 	mutex_unlock(&lock);
293 	return err;
294 
295 cleanup:
296 	if (!client->group)
297 		dma_free_coherent(vic->dev, size, virt, iova);
298 	else
299 		tegra_drm_free(tegra, size, virt, iova);
300 
301 	mutex_unlock(&lock);
302 	return err;
303 }
304 
305 
306 static int __maybe_unused vic_runtime_resume(struct device *dev)
307 {
308 	struct vic *vic = dev_get_drvdata(dev);
309 	int err;
310 
311 	err = clk_prepare_enable(vic->clk);
312 	if (err < 0)
313 		return err;
314 
315 	usleep_range(10, 20);
316 
317 	err = reset_control_deassert(vic->rst);
318 	if (err < 0)
319 		goto disable;
320 
321 	usleep_range(10, 20);
322 
323 	err = vic_load_firmware(vic);
324 	if (err < 0)
325 		goto assert;
326 
327 	err = vic_boot(vic);
328 	if (err < 0)
329 		goto assert;
330 
331 	return 0;
332 
333 assert:
334 	reset_control_assert(vic->rst);
335 disable:
336 	clk_disable_unprepare(vic->clk);
337 	return err;
338 }
339 
340 static int __maybe_unused vic_runtime_suspend(struct device *dev)
341 {
342 	struct vic *vic = dev_get_drvdata(dev);
343 	int err;
344 
345 	host1x_channel_stop(vic->channel);
346 
347 	err = reset_control_assert(vic->rst);
348 	if (err < 0)
349 		return err;
350 
351 	usleep_range(2000, 4000);
352 
353 	clk_disable_unprepare(vic->clk);
354 
355 	return 0;
356 }
357 
358 static int vic_open_channel(struct tegra_drm_client *client,
359 			    struct tegra_drm_context *context)
360 {
361 	struct vic *vic = to_vic(client);
362 
363 	context->channel = host1x_channel_get(vic->channel);
364 	if (!context->channel)
365 		return -ENOMEM;
366 
367 	return 0;
368 }
369 
370 static void vic_close_channel(struct tegra_drm_context *context)
371 {
372 	host1x_channel_put(context->channel);
373 }
374 
375 static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
376 {
377 	struct vic *vic = to_vic(client);
378 	int err;
379 
380 	/* This doesn't access HW so it's safe to call without powering up. */
381 	err = vic_load_firmware(vic);
382 	if (err < 0)
383 		return err;
384 
385 	*supported = vic->can_use_context;
386 
387 	return 0;
388 }
389 
390 static const struct tegra_drm_client_ops vic_ops = {
391 	.open_channel = vic_open_channel,
392 	.close_channel = vic_close_channel,
393 	.submit = tegra_drm_submit,
394 	.get_streamid_offset = tegra_drm_get_streamid_offset_thi,
395 	.can_use_memory_ctx = vic_can_use_memory_ctx,
396 };
397 
398 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
399 
400 static const struct vic_config vic_t124_config = {
401 	.firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
402 	.version = 0x40,
403 	.supports_sid = false,
404 };
405 
406 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
407 
408 static const struct vic_config vic_t210_config = {
409 	.firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
410 	.version = 0x21,
411 	.supports_sid = false,
412 };
413 
414 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
415 
416 static const struct vic_config vic_t186_config = {
417 	.firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
418 	.version = 0x18,
419 	.supports_sid = true,
420 };
421 
422 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
423 
424 static const struct vic_config vic_t194_config = {
425 	.firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
426 	.version = 0x19,
427 	.supports_sid = true,
428 };
429 
430 #define NVIDIA_TEGRA_234_VIC_FIRMWARE "nvidia/tegra234/vic.bin"
431 
432 static const struct vic_config vic_t234_config = {
433 	.firmware = NVIDIA_TEGRA_234_VIC_FIRMWARE,
434 	.version = 0x23,
435 	.supports_sid = true,
436 };
437 
438 static const struct of_device_id tegra_vic_of_match[] = {
439 	{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
440 	{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
441 	{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
442 	{ .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
443 	{ .compatible = "nvidia,tegra234-vic", .data = &vic_t234_config },
444 	{ },
445 };
446 MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
447 
448 static int vic_probe(struct platform_device *pdev)
449 {
450 	struct device *dev = &pdev->dev;
451 	struct host1x_syncpt **syncpts;
452 	struct vic *vic;
453 	int err;
454 
455 	/* inherit DMA mask from host1x parent */
456 	err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
457 	if (err < 0) {
458 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
459 		return err;
460 	}
461 
462 	vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
463 	if (!vic)
464 		return -ENOMEM;
465 
466 	vic->config = of_device_get_match_data(dev);
467 
468 	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
469 	if (!syncpts)
470 		return -ENOMEM;
471 
472 	vic->regs = devm_platform_ioremap_resource(pdev, 0);
473 	if (IS_ERR(vic->regs))
474 		return PTR_ERR(vic->regs);
475 
476 	vic->clk = devm_clk_get(dev, NULL);
477 	if (IS_ERR(vic->clk)) {
478 		dev_err(&pdev->dev, "failed to get clock\n");
479 		return PTR_ERR(vic->clk);
480 	}
481 
482 	err = clk_set_rate(vic->clk, ULONG_MAX);
483 	if (err < 0) {
484 		dev_err(&pdev->dev, "failed to set clock rate\n");
485 		return err;
486 	}
487 
488 	if (!dev->pm_domain) {
489 		vic->rst = devm_reset_control_get(dev, "vic");
490 		if (IS_ERR(vic->rst)) {
491 			dev_err(&pdev->dev, "failed to get reset\n");
492 			return PTR_ERR(vic->rst);
493 		}
494 	}
495 
496 	vic->falcon.dev = dev;
497 	vic->falcon.regs = vic->regs;
498 
499 	err = falcon_init(&vic->falcon);
500 	if (err < 0)
501 		return err;
502 
503 	platform_set_drvdata(pdev, vic);
504 
505 	INIT_LIST_HEAD(&vic->client.base.list);
506 	vic->client.base.ops = &vic_client_ops;
507 	vic->client.base.dev = dev;
508 	vic->client.base.class = HOST1X_CLASS_VIC;
509 	vic->client.base.syncpts = syncpts;
510 	vic->client.base.num_syncpts = 1;
511 	vic->dev = dev;
512 
513 	INIT_LIST_HEAD(&vic->client.list);
514 	vic->client.version = vic->config->version;
515 	vic->client.ops = &vic_ops;
516 
517 	err = host1x_client_register(&vic->client.base);
518 	if (err < 0) {
519 		dev_err(dev, "failed to register host1x client: %d\n", err);
520 		goto exit_falcon;
521 	}
522 
523 	pm_runtime_enable(dev);
524 	pm_runtime_use_autosuspend(dev);
525 	pm_runtime_set_autosuspend_delay(dev, 500);
526 
527 	return 0;
528 
529 exit_falcon:
530 	falcon_exit(&vic->falcon);
531 
532 	return err;
533 }
534 
535 static void vic_remove(struct platform_device *pdev)
536 {
537 	struct vic *vic = platform_get_drvdata(pdev);
538 
539 	pm_runtime_disable(&pdev->dev);
540 	host1x_client_unregister(&vic->client.base);
541 	falcon_exit(&vic->falcon);
542 }
543 
544 static const struct dev_pm_ops vic_pm_ops = {
545 	RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
546 	SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
547 };
548 
549 struct platform_driver tegra_vic_driver = {
550 	.driver = {
551 		.name = "tegra-vic",
552 		.of_match_table = tegra_vic_of_match,
553 		.pm = &vic_pm_ops
554 	},
555 	.probe = vic_probe,
556 	.remove_new = vic_remove,
557 };
558 
559 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
560 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
561 #endif
562 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
563 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
564 #endif
565 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
566 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
567 #endif
568 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
569 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
570 #endif
571 #if IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
572 MODULE_FIRMWARE(NVIDIA_TEGRA_234_VIC_FIRMWARE);
573 #endif
574