xref: /linux/drivers/media/platform/qcom/venus/firmware.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 Linaro Ltd.
4  */
5 
6 #include <linux/device.h>
7 #include <linux/firmware.h>
8 #include <linux/kernel.h>
9 #include <linux/iommu.h>
10 #include <linux/io.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/platform_device.h>
15 #include <linux/of_device.h>
16 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/sizes.h>
18 #include <linux/soc/qcom/mdt_loader.h>
19 
20 #include "core.h"
21 #include "firmware.h"
22 #include "hfi_venus_io.h"
23 
24 #define VENUS_PAS_ID			9
25 #define VENUS_FW_MEM_SIZE		(6 * SZ_1M)
26 #define VENUS_FW_START_ADDR		0x0
27 
28 static void venus_reset_cpu(struct venus_core *core)
29 {
30 	u32 fw_size = core->fw.mapped_mem_size;
31 	void __iomem *wrapper_base;
32 
33 	if (IS_IRIS2_1(core))
34 		wrapper_base = core->wrapper_tz_base;
35 	else
36 		wrapper_base = core->wrapper_base;
37 
38 	writel(0, wrapper_base + WRAPPER_FW_START_ADDR);
39 	writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
40 	writel(0, wrapper_base + WRAPPER_CPA_START_ADDR);
41 	writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
42 	writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
43 	writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
44 
45 	if (IS_IRIS2_1(core)) {
46 		/* Bring XTSS out of reset */
47 		writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
48 	} else {
49 		writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
50 		writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
51 
52 		/* Bring ARM9 out of reset */
53 		writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
54 	}
55 }
56 
57 int venus_set_hw_state(struct venus_core *core, bool resume)
58 {
59 	int ret;
60 
61 	if (core->use_tz) {
62 		ret = qcom_scm_set_remote_state(resume, 0);
63 		if (resume && ret == -EINVAL)
64 			ret = 0;
65 		return ret;
66 	}
67 
68 	if (resume) {
69 		venus_reset_cpu(core);
70 	} else {
71 		if (IS_IRIS2_1(core))
72 			writel(WRAPPER_XTSS_SW_RESET_BIT,
73 			       core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
74 		else
75 			writel(WRAPPER_A9SS_SW_RESET_BIT,
76 			       core->wrapper_base + WRAPPER_A9SS_SW_RESET);
77 	}
78 
79 	return 0;
80 }
81 
82 static int venus_load_fw(struct venus_core *core, const char *fwname,
83 			 phys_addr_t *mem_phys, size_t *mem_size)
84 {
85 	const struct firmware *mdt;
86 	struct reserved_mem *rmem;
87 	struct device_node *node;
88 	struct device *dev;
89 	ssize_t fw_size;
90 	void *mem_va;
91 	int ret;
92 
93 	*mem_phys = 0;
94 	*mem_size = 0;
95 
96 	dev = core->dev;
97 	node = of_parse_phandle(dev->of_node, "memory-region", 0);
98 	if (!node) {
99 		dev_err(dev, "no memory-region specified\n");
100 		return -EINVAL;
101 	}
102 
103 	rmem = of_reserved_mem_lookup(node);
104 	of_node_put(node);
105 	if (!rmem) {
106 		dev_err(dev, "failed to lookup reserved memory-region\n");
107 		return -EINVAL;
108 	}
109 
110 	ret = request_firmware(&mdt, fwname, dev);
111 	if (ret < 0)
112 		return ret;
113 
114 	fw_size = qcom_mdt_get_size(mdt);
115 	if (fw_size < 0) {
116 		ret = fw_size;
117 		goto err_release_fw;
118 	}
119 
120 	*mem_phys = rmem->base;
121 	*mem_size = rmem->size;
122 
123 	if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) {
124 		ret = -EINVAL;
125 		goto err_release_fw;
126 	}
127 
128 	mem_va = memremap(*mem_phys, *mem_size, MEMREMAP_WC);
129 	if (!mem_va) {
130 		dev_err(dev, "unable to map memory region %pa size %#zx\n", mem_phys, *mem_size);
131 		ret = -ENOMEM;
132 		goto err_release_fw;
133 	}
134 
135 	if (core->use_tz)
136 		ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID,
137 				    mem_va, *mem_phys, *mem_size, NULL);
138 	else
139 		ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID,
140 					    mem_va, *mem_phys, *mem_size, NULL);
141 
142 	memunmap(mem_va);
143 err_release_fw:
144 	release_firmware(mdt);
145 	return ret;
146 }
147 
148 static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
149 			    size_t mem_size)
150 {
151 	struct iommu_domain *iommu;
152 	struct device *dev;
153 	int ret;
154 
155 	dev = core->fw.dev;
156 	if (!dev)
157 		return -EPROBE_DEFER;
158 
159 	iommu = core->fw.iommu_domain;
160 	core->fw.mapped_mem_size = mem_size;
161 
162 	ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
163 			IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL);
164 	if (ret) {
165 		dev_err(dev, "could not map video firmware region\n");
166 		return ret;
167 	}
168 
169 	venus_reset_cpu(core);
170 
171 	return 0;
172 }
173 
174 static int venus_shutdown_no_tz(struct venus_core *core)
175 {
176 	const size_t mapped = core->fw.mapped_mem_size;
177 	struct iommu_domain *iommu;
178 	size_t unmapped;
179 	u32 reg;
180 	struct device *dev = core->fw.dev;
181 	void __iomem *wrapper_base = core->wrapper_base;
182 	void __iomem *wrapper_tz_base = core->wrapper_tz_base;
183 
184 	if (IS_IRIS2_1(core)) {
185 		/* Assert the reset to XTSS */
186 		reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
187 		reg |= WRAPPER_XTSS_SW_RESET_BIT;
188 		writel(reg, wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
189 	} else {
190 		/* Assert the reset to ARM9 */
191 		reg = readl(wrapper_base + WRAPPER_A9SS_SW_RESET);
192 		reg |= WRAPPER_A9SS_SW_RESET_BIT;
193 		writel(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
194 	}
195 
196 	iommu = core->fw.iommu_domain;
197 
198 	if (core->fw.mapped_mem_size && iommu) {
199 		unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped);
200 
201 		if (unmapped != mapped)
202 			dev_err(dev, "failed to unmap firmware\n");
203 		else
204 			core->fw.mapped_mem_size = 0;
205 	}
206 
207 	return 0;
208 }
209 
210 int venus_boot(struct venus_core *core)
211 {
212 	struct device *dev = core->dev;
213 	const struct venus_resources *res = core->res;
214 	const char *fwpath = NULL;
215 	phys_addr_t mem_phys;
216 	size_t mem_size;
217 	int ret;
218 
219 	if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) ||
220 	    (core->use_tz && !qcom_scm_is_available()))
221 		return -EPROBE_DEFER;
222 
223 	ret = of_property_read_string_index(dev->of_node, "firmware-name", 0,
224 					    &fwpath);
225 	if (ret)
226 		fwpath = core->res->fwname;
227 
228 	ret = venus_load_fw(core, fwpath, &mem_phys, &mem_size);
229 	if (ret) {
230 		dev_err(dev, "fail to load video firmware\n");
231 		return -EINVAL;
232 	}
233 
234 	core->fw.mem_size = mem_size;
235 	core->fw.mem_phys = mem_phys;
236 
237 	if (core->use_tz)
238 		ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
239 	else
240 		ret = venus_boot_no_tz(core, mem_phys, mem_size);
241 
242 	if (ret)
243 		return ret;
244 
245 	if (core->use_tz && res->cp_size) {
246 		/*
247 		 * Clues for porting using downstream data:
248 		 * cp_start = 0
249 		 * cp_size = venus_ns/virtual-addr-pool[0] - yes, address and not size!
250 		 *   This works, as the non-secure context bank is placed
251 		 *   contiguously right after the Content Protection region.
252 		 *
253 		 * cp_nonpixel_start = venus_sec_non_pixel/virtual-addr-pool[0]
254 		 * cp_nonpixel_size = venus_sec_non_pixel/virtual-addr-pool[1]
255 		 */
256 		ret = qcom_scm_mem_protect_video_var(res->cp_start,
257 						     res->cp_size,
258 						     res->cp_nonpixel_start,
259 						     res->cp_nonpixel_size);
260 		if (ret) {
261 			qcom_scm_pas_shutdown(VENUS_PAS_ID);
262 			dev_err(dev, "set virtual address ranges fail (%d)\n",
263 				ret);
264 			return ret;
265 		}
266 	}
267 
268 	return 0;
269 }
270 
271 int venus_shutdown(struct venus_core *core)
272 {
273 	int ret;
274 
275 	if (core->use_tz)
276 		ret = qcom_scm_pas_shutdown(VENUS_PAS_ID);
277 	else
278 		ret = venus_shutdown_no_tz(core);
279 
280 	return ret;
281 }
282 
283 int venus_firmware_init(struct venus_core *core)
284 {
285 	struct platform_device_info info;
286 	struct iommu_domain *iommu_dom;
287 	struct platform_device *pdev;
288 	struct device_node *np;
289 	int ret;
290 
291 	np = of_get_child_by_name(core->dev->of_node, "video-firmware");
292 	if (!np) {
293 		core->use_tz = true;
294 		return 0;
295 	}
296 
297 	memset(&info, 0, sizeof(info));
298 	info.fwnode = &np->fwnode;
299 	info.parent = core->dev;
300 	info.name = np->name;
301 	info.dma_mask = DMA_BIT_MASK(32);
302 
303 	pdev = platform_device_register_full(&info);
304 	if (IS_ERR(pdev)) {
305 		of_node_put(np);
306 		return PTR_ERR(pdev);
307 	}
308 
309 	pdev->dev.of_node = np;
310 
311 	ret = of_dma_configure(&pdev->dev, np, true);
312 	if (ret) {
313 		dev_err(core->dev, "dma configure fail\n");
314 		goto err_unregister;
315 	}
316 
317 	core->fw.dev = &pdev->dev;
318 
319 	iommu_dom = iommu_paging_domain_alloc(core->fw.dev);
320 	if (IS_ERR(iommu_dom)) {
321 		dev_err(core->fw.dev, "Failed to allocate iommu domain\n");
322 		ret = PTR_ERR(iommu_dom);
323 		goto err_unregister;
324 	}
325 
326 	ret = iommu_attach_device(iommu_dom, core->fw.dev);
327 	if (ret) {
328 		dev_err(core->fw.dev, "could not attach device\n");
329 		goto err_iommu_free;
330 	}
331 
332 	core->fw.iommu_domain = iommu_dom;
333 
334 	of_node_put(np);
335 
336 	return 0;
337 
338 err_iommu_free:
339 	iommu_domain_free(iommu_dom);
340 err_unregister:
341 	platform_device_unregister(pdev);
342 	of_node_put(np);
343 	return ret;
344 }
345 
346 void venus_firmware_deinit(struct venus_core *core)
347 {
348 	struct iommu_domain *iommu;
349 
350 	if (!core->fw.dev)
351 		return;
352 
353 	iommu = core->fw.iommu_domain;
354 
355 	iommu_detach_device(iommu, core->fw.dev);
356 
357 	if (core->fw.iommu_domain) {
358 		iommu_domain_free(iommu);
359 		core->fw.iommu_domain = NULL;
360 	}
361 
362 	platform_device_unregister(to_platform_device(core->fw.dev));
363 }
364