xref: /linux/drivers/gpu/drm/nouveau/nvkm/core/firmware.c (revision 79a899e3d643a256b120d3e9cbf518b55e6f3686)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include <core/device.h>
23 #include <core/firmware.h>
24 
25 #include <subdev/fb.h>
26 #include <subdev/mmu.h>
27 
28 int
nvkm_firmware_load_name(const struct nvkm_subdev * subdev,const char * base,const char * name,int ver,const struct firmware ** pfw)29 nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base,
30 			const char *name, int ver, const struct firmware **pfw)
31 {
32 	char path[64];
33 	int ret;
34 
35 	snprintf(path, sizeof(path), "%s%s", base, name);
36 	ret = nvkm_firmware_get(subdev, path, ver, pfw);
37 	if (ret < 0)
38 		return ret;
39 
40 	return 0;
41 }
42 
43 int
nvkm_firmware_load_blob(const struct nvkm_subdev * subdev,const char * base,const char * name,int ver,struct nvkm_blob * blob)44 nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base,
45 			const char *name, int ver, struct nvkm_blob *blob)
46 {
47 	const struct firmware *fw;
48 	int ret;
49 
50 	ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw);
51 	if (ret == 0) {
52 		blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
53 		blob->size = fw->size;
54 		nvkm_firmware_put(fw);
55 		if (!blob->data)
56 			return -ENOMEM;
57 	}
58 
59 	return ret;
60 }
61 
62 /**
63  * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
64  * @subdev:	subdevice that will use that firmware
65  * @fwname:	name of firmware file to load
66  * @ver:	firmware version to load
67  * @fw:		firmware structure to load to
68  *
69  * Use this function to load firmware files in the form nvidia/chip/fwname.bin.
70  * Firmware files released by NVIDIA will always follow this format.
71  */
72 int
nvkm_firmware_get(const struct nvkm_subdev * subdev,const char * fwname,int ver,const struct firmware ** fw)73 nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
74 		  const struct firmware **fw)
75 {
76 	struct nvkm_device *device = subdev->device;
77 	char f[64];
78 	char cname[16];
79 	int i;
80 
81 	/* Convert device name to lowercase */
82 	strscpy(cname, device->chip->name, sizeof(cname));
83 	i = strlen(cname);
84 	while (i) {
85 		--i;
86 		cname[i] = tolower(cname[i]);
87 	}
88 
89 	if (ver != 0)
90 		snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver);
91 	else
92 		snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
93 
94 	if (!firmware_request_nowarn(fw, f, device->dev)) {
95 		nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n",
96 			   f, (*fw)->size);
97 		return 0;
98 	}
99 
100 	nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
101 	return -ENOENT;
102 }
103 
104 /*
105  * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
106  */
107 void
nvkm_firmware_put(const struct firmware * fw)108 nvkm_firmware_put(const struct firmware *fw)
109 {
110 	release_firmware(fw);
111 }
112 
113 #define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory)
114 
115 static struct scatterlist *
nvkm_firmware_mem_sgl(struct nvkm_memory * memory)116 nvkm_firmware_mem_sgl(struct nvkm_memory *memory)
117 {
118 	struct nvkm_firmware *fw = nvkm_firmware_mem(memory);
119 
120 	switch (fw->func->type) {
121 	case NVKM_FIRMWARE_IMG_DMA: return &fw->mem.sgl;
122 	case NVKM_FIRMWARE_IMG_SGT: return  fw->mem.sgt.sgl;
123 	default:
124 		WARN_ON(1);
125 		break;
126 	}
127 
128 	return NULL;
129 }
130 
131 static int
nvkm_firmware_mem_map(struct nvkm_memory * memory,u64 offset,struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc)132 nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
133 		      struct nvkm_vma *vma, void *argv, u32 argc)
134 {
135 	struct nvkm_firmware *fw = nvkm_firmware_mem(memory);
136 	struct nvkm_vmm_map map = {
137 		.memory = &fw->mem.memory,
138 		.offset = offset,
139 		.sgl = nvkm_firmware_mem_sgl(memory),
140 	};
141 
142 	if (!map.sgl)
143 		return -ENOSYS;
144 
145 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
146 }
147 
148 static u64
nvkm_firmware_mem_size(struct nvkm_memory * memory)149 nvkm_firmware_mem_size(struct nvkm_memory *memory)
150 {
151 	struct scatterlist *sgl = nvkm_firmware_mem_sgl(memory);
152 
153 	return sgl ? sg_dma_len(sgl) : 0;
154 }
155 
156 static u64
nvkm_firmware_mem_addr(struct nvkm_memory * memory)157 nvkm_firmware_mem_addr(struct nvkm_memory *memory)
158 {
159 	BUG_ON(nvkm_firmware_mem(memory)->func->type != NVKM_FIRMWARE_IMG_DMA);
160 	return nvkm_firmware_mem(memory)->phys;
161 }
162 
163 static u8
nvkm_firmware_mem_page(struct nvkm_memory * memory)164 nvkm_firmware_mem_page(struct nvkm_memory *memory)
165 {
166 	return PAGE_SHIFT;
167 }
168 
169 static enum nvkm_memory_target
nvkm_firmware_mem_target(struct nvkm_memory * memory)170 nvkm_firmware_mem_target(struct nvkm_memory *memory)
171 {
172 	if (nvkm_firmware_mem(memory)->device->func->tegra)
173 		return NVKM_MEM_TARGET_NCOH;
174 
175 	return NVKM_MEM_TARGET_HOST;
176 }
177 
178 static void *
nvkm_firmware_mem_dtor(struct nvkm_memory * memory)179 nvkm_firmware_mem_dtor(struct nvkm_memory *memory)
180 {
181 	return NULL;
182 }
183 
184 static const struct nvkm_memory_func
185 nvkm_firmware_mem = {
186 	.dtor = nvkm_firmware_mem_dtor,
187 	.target = nvkm_firmware_mem_target,
188 	.page = nvkm_firmware_mem_page,
189 	.addr = nvkm_firmware_mem_addr,
190 	.size = nvkm_firmware_mem_size,
191 	.map = nvkm_firmware_mem_map,
192 };
193 
194 void
nvkm_firmware_dtor(struct nvkm_firmware * fw)195 nvkm_firmware_dtor(struct nvkm_firmware *fw)
196 {
197 	struct nvkm_memory *memory = &fw->mem.memory;
198 
199 	if (!fw->img)
200 		return;
201 
202 	switch (fw->func->type) {
203 	case NVKM_FIRMWARE_IMG_RAM:
204 		kfree(fw->img);
205 		break;
206 	case NVKM_FIRMWARE_IMG_DMA:
207 		nvkm_memory_unref(&memory);
208 		dma_free_noncoherent(fw->device->dev, sg_dma_len(&fw->mem.sgl),
209 				     fw->img, fw->phys, DMA_TO_DEVICE);
210 		break;
211 	case NVKM_FIRMWARE_IMG_SGT:
212 		nvkm_memory_unref(&memory);
213 		dma_unmap_sgtable(fw->device->dev, &fw->mem.sgt, DMA_TO_DEVICE, 0);
214 		sg_free_table(&fw->mem.sgt);
215 		vfree(fw->img);
216 		break;
217 	default:
218 		WARN_ON(1);
219 		break;
220 	}
221 
222 	fw->img = NULL;
223 }
224 
225 int
nvkm_firmware_ctor(const struct nvkm_firmware_func * func,const char * name,struct nvkm_device * device,const void * src,int len,struct nvkm_firmware * fw)226 nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
227 		   struct nvkm_device *device, const void *src, int len, struct nvkm_firmware *fw)
228 {
229 	fw->func = func;
230 	fw->name = name;
231 	fw->device = device;
232 	fw->len = len;
233 
234 	switch (fw->func->type) {
235 	case NVKM_FIRMWARE_IMG_RAM:
236 		fw->img = kmemdup(src, fw->len, GFP_KERNEL);
237 		break;
238 	case NVKM_FIRMWARE_IMG_DMA: {
239 		dma_addr_t addr;
240 		len = ALIGN(fw->len, PAGE_SIZE);
241 
242 		fw->img = dma_alloc_noncoherent(fw->device->dev,
243 						len, &addr,
244 						DMA_TO_DEVICE,
245 						GFP_KERNEL);
246 		if (fw->img) {
247 			memcpy(fw->img, src, fw->len);
248 			fw->phys = addr;
249 		}
250 
251 		sg_init_one(&fw->mem.sgl, fw->img, len);
252 		sg_dma_address(&fw->mem.sgl) = fw->phys;
253 		sg_dma_len(&fw->mem.sgl) = len;
254 	}
255 		break;
256 	case NVKM_FIRMWARE_IMG_SGT:
257 		len = ALIGN(fw->len, PAGE_SIZE);
258 
259 		fw->img = vmalloc(len);
260 		if (fw->img) {
261 			int pages = len >> PAGE_SHIFT;
262 			int ret = 0;
263 
264 			memcpy(fw->img, src, fw->len);
265 
266 			ret = sg_alloc_table(&fw->mem.sgt, pages, GFP_KERNEL);
267 			if (ret == 0) {
268 				struct scatterlist *sgl;
269 				u8 *data = fw->img;
270 				int i;
271 
272 				for_each_sgtable_sg(&fw->mem.sgt, sgl, i) {
273 					struct page *page = vmalloc_to_page(data);
274 
275 					if (!page) {
276 						ret = -EFAULT;
277 						break;
278 					}
279 
280 					sg_set_page(sgl, page, PAGE_SIZE, 0);
281 					data += PAGE_SIZE;
282 				}
283 
284 				if (ret == 0) {
285 					ret = dma_map_sgtable(fw->device->dev, &fw->mem.sgt,
286 							      DMA_TO_DEVICE, 0);
287 				}
288 
289 				if (ret)
290 					sg_free_table(&fw->mem.sgt);
291 			}
292 
293 			if (ret) {
294 				vfree(fw->img);
295 				fw->img = NULL;
296 			}
297 		}
298 		break;
299 	default:
300 		WARN_ON(1);
301 		return -EINVAL;
302 	}
303 
304 	if (!fw->img)
305 		return -ENOMEM;
306 
307 	nvkm_memory_ctor(&nvkm_firmware_mem, &fw->mem.memory);
308 	return 0;
309 }
310