1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/clk.h>
4 #include <linux/delay.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/host1x.h>
7 #include <linux/iommu.h>
8 #include <linux/module.h>
9 #include <linux/of.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12
13 #include "drm.h"
14 #include "falcon.h"
15
16 struct nvjpg_config {
17 const char *firmware;
18 unsigned int version;
19 };
20
21 struct nvjpg {
22 struct falcon falcon;
23
24 void __iomem *regs;
25 struct tegra_drm_client client;
26 struct device *dev;
27 struct clk *clk;
28
29 /* Platform configuration */
30 const struct nvjpg_config *config;
31 };
32
to_nvjpg(struct tegra_drm_client * client)33 static inline struct nvjpg *to_nvjpg(struct tegra_drm_client *client)
34 {
35 return container_of(client, struct nvjpg, client);
36 }
37
nvjpg_init(struct host1x_client * client)38 static int nvjpg_init(struct host1x_client *client)
39 {
40 struct tegra_drm_client *drm = host1x_to_drm_client(client);
41 struct drm_device *dev = dev_get_drvdata(client->host);
42 struct tegra_drm *tegra = dev->dev_private;
43 struct nvjpg *nvjpg = to_nvjpg(drm);
44 int err;
45
46 err = host1x_client_iommu_attach(client);
47 if (err < 0 && err != -ENODEV) {
48 dev_err(nvjpg->dev, "failed to attach to domain: %d\n", err);
49 return err;
50 }
51
52 err = tegra_drm_register_client(tegra, drm);
53 if (err < 0)
54 goto detach;
55
56 /*
57 * Inherit the DMA parameters (such as maximum segment size) from the
58 * parent host1x device.
59 */
60 client->dev->dma_parms = client->host->dma_parms;
61
62 return 0;
63
64 detach:
65 host1x_client_iommu_detach(client);
66
67 return err;
68 }
69
nvjpg_exit(struct host1x_client * client)70 static int nvjpg_exit(struct host1x_client *client)
71 {
72 struct tegra_drm_client *drm = host1x_to_drm_client(client);
73 struct drm_device *dev = dev_get_drvdata(client->host);
74 struct tegra_drm *tegra = dev->dev_private;
75 struct nvjpg *nvjpg = to_nvjpg(drm);
76 int err;
77
78 /* avoid a dangling pointer just in case this disappears */
79 client->dev->dma_parms = NULL;
80
81 err = tegra_drm_unregister_client(tegra, drm);
82 if (err < 0)
83 return err;
84
85 pm_runtime_dont_use_autosuspend(client->dev);
86 pm_runtime_force_suspend(client->dev);
87
88 host1x_client_iommu_detach(client);
89
90 if (client->group) {
91 dma_unmap_single(nvjpg->dev, nvjpg->falcon.firmware.phys,
92 nvjpg->falcon.firmware.size, DMA_TO_DEVICE);
93 tegra_drm_free(tegra, nvjpg->falcon.firmware.size,
94 nvjpg->falcon.firmware.virt,
95 nvjpg->falcon.firmware.iova);
96 } else {
97 dma_free_coherent(nvjpg->dev, nvjpg->falcon.firmware.size,
98 nvjpg->falcon.firmware.virt,
99 nvjpg->falcon.firmware.iova);
100 }
101
102 return 0;
103 }
104
105 static const struct host1x_client_ops nvjpg_client_ops = {
106 .init = nvjpg_init,
107 .exit = nvjpg_exit,
108 };
109
nvjpg_load_falcon_firmware(struct nvjpg * nvjpg)110 static int nvjpg_load_falcon_firmware(struct nvjpg *nvjpg)
111 {
112 struct host1x_client *client = &nvjpg->client.base;
113 struct tegra_drm *tegra = nvjpg->client.drm;
114 dma_addr_t iova;
115 size_t size;
116 void *virt;
117 int err;
118
119 if (nvjpg->falcon.firmware.virt)
120 return 0;
121
122 err = falcon_read_firmware(&nvjpg->falcon, nvjpg->config->firmware);
123 if (err < 0)
124 return err;
125
126 size = nvjpg->falcon.firmware.size;
127
128 if (!client->group) {
129 virt = dma_alloc_coherent(nvjpg->dev, size, &iova, GFP_KERNEL);
130 if (!virt)
131 return -ENOMEM;
132 } else {
133 virt = tegra_drm_alloc(tegra, size, &iova);
134 if (IS_ERR(virt))
135 return PTR_ERR(virt);
136 }
137
138 nvjpg->falcon.firmware.virt = virt;
139 nvjpg->falcon.firmware.iova = iova;
140
141 err = falcon_load_firmware(&nvjpg->falcon);
142 if (err < 0)
143 goto cleanup;
144
145 /*
146 * In this case we have received an IOVA from the shared domain, so we
147 * need to make sure to get the physical address so that the DMA API
148 * knows what memory pages to flush the cache for.
149 */
150 if (client->group) {
151 dma_addr_t phys;
152
153 phys = dma_map_single(nvjpg->dev, virt, size, DMA_TO_DEVICE);
154
155 err = dma_mapping_error(nvjpg->dev, phys);
156 if (err < 0)
157 goto cleanup;
158
159 nvjpg->falcon.firmware.phys = phys;
160 }
161
162 return 0;
163
164 cleanup:
165 if (!client->group)
166 dma_free_coherent(nvjpg->dev, size, virt, iova);
167 else
168 tegra_drm_free(tegra, size, virt, iova);
169
170 return err;
171 }
172
nvjpg_runtime_resume(struct device * dev)173 static __maybe_unused int nvjpg_runtime_resume(struct device *dev)
174 {
175 struct nvjpg *nvjpg = dev_get_drvdata(dev);
176 int err;
177
178 err = clk_prepare_enable(nvjpg->clk);
179 if (err < 0)
180 return err;
181
182 usleep_range(20, 30);
183
184 err = nvjpg_load_falcon_firmware(nvjpg);
185 if (err < 0)
186 goto disable_clk;
187
188 err = falcon_boot(&nvjpg->falcon);
189 if (err < 0)
190 goto disable_clk;
191
192 return 0;
193
194 disable_clk:
195 clk_disable_unprepare(nvjpg->clk);
196 return err;
197 }
198
nvjpg_runtime_suspend(struct device * dev)199 static __maybe_unused int nvjpg_runtime_suspend(struct device *dev)
200 {
201 struct nvjpg *nvjpg = dev_get_drvdata(dev);
202
203 clk_disable_unprepare(nvjpg->clk);
204
205 return 0;
206 }
207
nvjpg_can_use_memory_ctx(struct tegra_drm_client * client,bool * supported)208 static int nvjpg_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
209 {
210 *supported = false;
211
212 return 0;
213 }
214
215 static const struct tegra_drm_client_ops nvjpg_ops = {
216 .get_streamid_offset = NULL,
217 .can_use_memory_ctx = nvjpg_can_use_memory_ctx,
218 };
219
220 #define NVIDIA_TEGRA_210_NVJPG_FIRMWARE "nvidia/tegra210/nvjpg.bin"
221
222 static const struct nvjpg_config tegra210_nvjpg_config = {
223 .firmware = NVIDIA_TEGRA_210_NVJPG_FIRMWARE,
224 .version = 0x21,
225 };
226
227 static const struct of_device_id tegra_nvjpg_of_match[] = {
228 { .compatible = "nvidia,tegra210-nvjpg", .data = &tegra210_nvjpg_config },
229 { },
230 };
231 MODULE_DEVICE_TABLE(of, tegra_nvjpg_of_match);
232
nvjpg_probe(struct platform_device * pdev)233 static int nvjpg_probe(struct platform_device *pdev)
234 {
235 struct device *dev = &pdev->dev;
236 struct nvjpg *nvjpg;
237 int err;
238
239 /* inherit DMA mask from host1x parent */
240 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
241 if (err < 0) {
242 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
243 return err;
244 }
245
246 nvjpg = devm_kzalloc(dev, sizeof(*nvjpg), GFP_KERNEL);
247 if (!nvjpg)
248 return -ENOMEM;
249
250 nvjpg->config = of_device_get_match_data(dev);
251
252 nvjpg->regs = devm_platform_ioremap_resource(pdev, 0);
253 if (IS_ERR(nvjpg->regs))
254 return PTR_ERR(nvjpg->regs);
255
256 nvjpg->clk = devm_clk_get(dev, "nvjpg");
257 if (IS_ERR(nvjpg->clk)) {
258 dev_err(&pdev->dev, "failed to get clock\n");
259 return PTR_ERR(nvjpg->clk);
260 }
261
262 err = clk_set_rate(nvjpg->clk, ULONG_MAX);
263 if (err < 0) {
264 dev_err(&pdev->dev, "failed to set clock rate\n");
265 return err;
266 }
267
268 nvjpg->falcon.dev = dev;
269 nvjpg->falcon.regs = nvjpg->regs;
270
271 err = falcon_init(&nvjpg->falcon);
272 if (err < 0)
273 return err;
274
275 platform_set_drvdata(pdev, nvjpg);
276
277 INIT_LIST_HEAD(&nvjpg->client.base.list);
278 nvjpg->client.base.ops = &nvjpg_client_ops;
279 nvjpg->client.base.dev = dev;
280 nvjpg->client.base.class = HOST1X_CLASS_NVJPG;
281 nvjpg->dev = dev;
282
283 INIT_LIST_HEAD(&nvjpg->client.list);
284 nvjpg->client.version = nvjpg->config->version;
285 nvjpg->client.ops = &nvjpg_ops;
286
287 err = host1x_client_register(&nvjpg->client.base);
288 if (err < 0) {
289 dev_err(dev, "failed to register host1x client: %d\n", err);
290 goto exit_falcon;
291 }
292
293 pm_runtime_use_autosuspend(dev);
294 pm_runtime_set_autosuspend_delay(dev, 500);
295 devm_pm_runtime_enable(dev);
296
297 return 0;
298
299 exit_falcon:
300 falcon_exit(&nvjpg->falcon);
301
302 return err;
303 }
304
nvjpg_remove(struct platform_device * pdev)305 static void nvjpg_remove(struct platform_device *pdev)
306 {
307 struct nvjpg *nvjpg = platform_get_drvdata(pdev);
308
309 host1x_client_unregister(&nvjpg->client.base);
310 falcon_exit(&nvjpg->falcon);
311 }
312
313 static const struct dev_pm_ops nvjpg_pm_ops = {
314 RUNTIME_PM_OPS(nvjpg_runtime_suspend, nvjpg_runtime_resume, NULL)
315 SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
316 };
317
318 struct platform_driver tegra_nvjpg_driver = {
319 .driver = {
320 .name = "tegra-nvjpg",
321 .of_match_table = tegra_nvjpg_of_match,
322 .pm = &nvjpg_pm_ops
323 },
324 .probe = nvjpg_probe,
325 .remove = nvjpg_remove,
326 };
327
328 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
329 MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVJPG_FIRMWARE);
330 #endif
331