xref: /linux/drivers/gpu/drm/nouveau/nvkm/falcon/base.c (revision 176fdcbddfd288408ce8571c1760ad618d962096)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/mc.h>
25 #include <subdev/timer.h>
26 #include <subdev/top.h>
27 
28 bool
29 nvkm_falcon_riscv_active(struct nvkm_falcon *falcon)
30 {
31 	if (!falcon->func->riscv_active)
32 		return false;
33 
34 	return falcon->func->riscv_active(falcon);
35 }
36 
37 static const struct nvkm_falcon_func_dma *
38 nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
39 {
40 	switch (*mem_type) {
41 	case IMEM: return falcon->func->imem_dma;
42 	case DMEM: return falcon->func->dmem_dma;
43 	default:
44 		return NULL;
45 	}
46 }
47 
48 int
49 nvkm_falcon_dma_wr(struct nvkm_falcon *falcon, const u8 *img, u64 dma_addr, u32 dma_base,
50 		   enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec)
51 {
52 	const struct nvkm_falcon_func_dma *dma = nvkm_falcon_dma(falcon, &mem_type, &mem_base);
53 	const char *type = nvkm_falcon_mem(mem_type);
54 	const int dmalen = 256;
55 	u32 dma_start = 0;
56 	u32 dst, src, cmd;
57 	int ret, i;
58 
59 	if (WARN_ON(!dma->xfer))
60 		return -EINVAL;
61 
62 	if (mem_type == DMEM) {
63 		dma_start = dma_base;
64 		dma_addr += dma_base;
65 	}
66 
67 	FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x (%010llx %08x)",
68 		 type, mem_base, len, dma_base, dma_addr - dma_base, dma_start);
69 	if (WARN_ON(!len || (len & (dmalen - 1))))
70 		return -EINVAL;
71 
72 	ret = dma->init(falcon, dma_addr, dmalen, mem_type, sec, &cmd);
73 	if (ret)
74 		return ret;
75 
76 	dst = mem_base;
77 	src = dma_base;
78 	if (len) {
79 		while (len >= dmalen) {
80 			dma->xfer(falcon, dst, src - dma_start, cmd);
81 
82 			if (img && nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
83 				for (i = 0; i < dmalen; i += 4, mem_base += 4) {
84 					const int w = 8, x = (i / 4) % w;
85 
86 					if (x == 0)
87 						printk(KERN_INFO "%s %08x <-", type, mem_base);
88 					printk(KERN_CONT " %08x", *(u32 *)(img + src + i));
89 					if (x == (w - 1) || ((i + 4) == dmalen))
90 						printk(KERN_CONT " <- %08x+%08x", dma_base,
91 						       src + i - dma_base - (x * 4));
92 					if (i == (7 * 4))
93 						printk(KERN_CONT " *");
94 				}
95 			}
96 
97 			if (nvkm_msec(falcon->owner->device, 2000,
98 				if (dma->done(falcon))
99 					break;
100 			) < 0)
101 				return -ETIMEDOUT;
102 
103 			src += dmalen;
104 			dst += dmalen;
105 			len -= dmalen;
106 		}
107 		WARN_ON(len);
108 	}
109 
110 	return 0;
111 }
112 
113 static const struct nvkm_falcon_func_pio *
114 nvkm_falcon_pio(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
115 {
116 	switch (*mem_type) {
117 	case IMEM:
118 		return falcon->func->imem_pio;
119 	case DMEM:
120 		if (!falcon->func->emem_addr || *mem_base < falcon->func->emem_addr)
121 			return falcon->func->dmem_pio;
122 
123 		*mem_base -= falcon->func->emem_addr;
124 		fallthrough;
125 	case EMEM:
126 		return falcon->func->emem_pio;
127 	default:
128 		return NULL;
129 	}
130 }
131 
132 int
133 nvkm_falcon_pio_rd(struct nvkm_falcon *falcon, u8 port, enum nvkm_falcon_mem mem_type, u32 mem_base,
134 		   const u8 *img, u32 img_base, int len)
135 {
136 	const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
137 	const char *type = nvkm_falcon_mem(mem_type);
138 	int xfer_len;
139 
140 	if (WARN_ON(!pio || !pio->rd))
141 		return -EINVAL;
142 
143 	FLCN_DBG(falcon, "%s %08x -> %08x bytes at %08x", type, mem_base, len, img_base);
144 	if (WARN_ON(!len || (len & (pio->min - 1))))
145 		return -EINVAL;
146 
147 	pio->rd_init(falcon, port, mem_base);
148 	do {
149 		xfer_len = min(len, pio->max);
150 		pio->rd(falcon, port, img, xfer_len);
151 
152 		if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
153 			for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
154 				if (((img_base / 4) % 8) == 0)
155 					printk(KERN_INFO "%s %08x ->", type, mem_base);
156 				printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
157 			}
158 		}
159 
160 		img += xfer_len;
161 		len -= xfer_len;
162 	} while (len);
163 
164 	return 0;
165 }
166 
167 int
168 nvkm_falcon_pio_wr(struct nvkm_falcon *falcon, const u8 *img, u32 img_base, u8 port,
169 		   enum nvkm_falcon_mem mem_type, u32 mem_base, int len, u16 tag, bool sec)
170 {
171 	const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
172 	const char *type = nvkm_falcon_mem(mem_type);
173 	int xfer_len;
174 
175 	if (WARN_ON(!pio || !pio->wr))
176 		return -EINVAL;
177 
178 	FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x", type, mem_base, len, img_base);
179 	if (WARN_ON(!len || (len & (pio->min - 1))))
180 		return -EINVAL;
181 
182 	pio->wr_init(falcon, port, sec, mem_base);
183 	do {
184 		xfer_len = min(len, pio->max);
185 		pio->wr(falcon, port, img, xfer_len, tag++);
186 
187 		if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
188 			for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
189 				if (((img_base / 4) % 8) == 0)
190 					printk(KERN_INFO "%s %08x <-", type, mem_base);
191 				printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
192 				if ((img_base / 4) == 7 && mem_type == IMEM)
193 					printk(KERN_CONT " %04x", tag - 1);
194 			}
195 		}
196 
197 		img += xfer_len;
198 		len -= xfer_len;
199 	} while (len);
200 
201 	return 0;
202 }
203 
204 void
205 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
206 		      u32 size, u16 tag, u8 port, bool secure)
207 {
208 	if (secure && !falcon->secret) {
209 		nvkm_warn(falcon->user,
210 			  "writing with secure tag on a non-secure falcon!\n");
211 		return;
212 	}
213 
214 	falcon->func->load_imem(falcon, data, start, size, tag, port,
215 				secure);
216 }
217 
218 void
219 nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
220 		      u32 size, u8 port)
221 {
222 	mutex_lock(&falcon->dmem_mutex);
223 
224 	falcon->func->load_dmem(falcon, data, start, size, port);
225 
226 	mutex_unlock(&falcon->dmem_mutex);
227 }
228 
229 void
230 nvkm_falcon_start(struct nvkm_falcon *falcon)
231 {
232 	falcon->func->start(falcon);
233 }
234 
235 int
236 nvkm_falcon_reset(struct nvkm_falcon *falcon)
237 {
238 	int ret;
239 
240 	ret = falcon->func->disable(falcon);
241 	if (WARN_ON(ret))
242 		return ret;
243 
244 	return nvkm_falcon_enable(falcon);
245 }
246 
247 static int
248 nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
249 {
250 	const struct nvkm_falcon_func *func = falcon->func;
251 	const struct nvkm_subdev *subdev = falcon->owner;
252 	u32 reg;
253 
254 	if (!falcon->addr) {
255 		falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
256 		if (WARN_ON(!falcon->addr))
257 			return -ENODEV;
258 	}
259 
260 	reg = nvkm_falcon_rd32(falcon, 0x12c);
261 	falcon->version = reg & 0xf;
262 	falcon->secret = (reg >> 4) & 0x3;
263 	falcon->code.ports = (reg >> 8) & 0xf;
264 	falcon->data.ports = (reg >> 12) & 0xf;
265 
266 	reg = nvkm_falcon_rd32(falcon, 0x108);
267 	falcon->code.limit = (reg & 0x1ff) << 8;
268 	falcon->data.limit = (reg & 0x3fe00) >> 1;
269 
270 	if (func->debug) {
271 		u32 val = nvkm_falcon_rd32(falcon, func->debug);
272 		falcon->debug = (val >> 20) & 0x1;
273 	}
274 
275 	return 0;
276 }
277 
278 void
279 nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
280 {
281 	if (unlikely(!falcon))
282 		return;
283 
284 	mutex_lock(&falcon->mutex);
285 	if (falcon->user == user) {
286 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
287 		falcon->user = NULL;
288 	}
289 	mutex_unlock(&falcon->mutex);
290 }
291 
292 int
293 nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
294 {
295 	int ret = 0;
296 
297 	mutex_lock(&falcon->mutex);
298 	if (falcon->user) {
299 		nvkm_error(user, "%s falcon already acquired by %s!\n",
300 			   falcon->name, falcon->user->name);
301 		mutex_unlock(&falcon->mutex);
302 		return -EBUSY;
303 	}
304 
305 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
306 	if (!falcon->oneinit)
307 		ret = nvkm_falcon_oneinit(falcon);
308 	falcon->user = user;
309 	mutex_unlock(&falcon->mutex);
310 	return ret;
311 }
312 
313 void
314 nvkm_falcon_dtor(struct nvkm_falcon *falcon)
315 {
316 }
317 
318 int
319 nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
320 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
321 		 struct nvkm_falcon *falcon)
322 {
323 	falcon->func = func;
324 	falcon->owner = subdev;
325 	falcon->name = name;
326 	falcon->addr = addr;
327 	falcon->addr2 = func->addr2;
328 	mutex_init(&falcon->mutex);
329 	mutex_init(&falcon->dmem_mutex);
330 	return 0;
331 }
332