xref: /linux/drivers/gpu/drm/nouveau/nvkm/falcon/base.c (revision 2541626cfb794e57ba0575a6920826f591f7ced0)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/mc.h>
25 #include <subdev/timer.h>
26 #include <subdev/top.h>
27 
28 static const struct nvkm_falcon_func_pio *
29 nvkm_falcon_pio(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
30 {
31 	switch (*mem_type) {
32 	case IMEM:
33 		return falcon->func->imem_pio;
34 	case DMEM:
35 		if (!falcon->func->emem_addr || *mem_base < falcon->func->emem_addr)
36 			return falcon->func->dmem_pio;
37 
38 		*mem_base -= falcon->func->emem_addr;
39 		fallthrough;
40 	case EMEM:
41 		return falcon->func->emem_pio;
42 	default:
43 		return NULL;
44 	}
45 }
46 
47 int
48 nvkm_falcon_pio_rd(struct nvkm_falcon *falcon, u8 port, enum nvkm_falcon_mem mem_type, u32 mem_base,
49 		   const u8 *img, u32 img_base, int len)
50 {
51 	const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
52 	const char *type = nvkm_falcon_mem(mem_type);
53 	int xfer_len;
54 
55 	if (WARN_ON(!pio || !pio->rd))
56 		return -EINVAL;
57 
58 	FLCN_DBG(falcon, "%s %08x -> %08x bytes at %08x", type, mem_base, len, img_base);
59 	if (WARN_ON(!len || (len & (pio->min - 1))))
60 		return -EINVAL;
61 
62 	pio->rd_init(falcon, port, mem_base);
63 	do {
64 		xfer_len = min(len, pio->max);
65 		pio->rd(falcon, port, img, xfer_len);
66 
67 		if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
68 			for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
69 				if (((img_base / 4) % 8) == 0)
70 					printk(KERN_INFO "%s %08x ->", type, mem_base);
71 				printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
72 			}
73 		}
74 
75 		img += xfer_len;
76 		len -= xfer_len;
77 	} while (len);
78 
79 	return 0;
80 }
81 
82 int
83 nvkm_falcon_pio_wr(struct nvkm_falcon *falcon, const u8 *img, u32 img_base, u8 port,
84 		   enum nvkm_falcon_mem mem_type, u32 mem_base, int len, u16 tag, bool sec)
85 {
86 	const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
87 	const char *type = nvkm_falcon_mem(mem_type);
88 	int xfer_len;
89 
90 	if (WARN_ON(!pio || !pio->wr))
91 		return -EINVAL;
92 
93 	FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x", type, mem_base, len, img_base);
94 	if (WARN_ON(!len || (len & (pio->min - 1))))
95 		return -EINVAL;
96 
97 	pio->wr_init(falcon, port, sec, mem_base);
98 	do {
99 		xfer_len = min(len, pio->max);
100 		pio->wr(falcon, port, img, xfer_len, tag++);
101 
102 		if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
103 			for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
104 				if (((img_base / 4) % 8) == 0)
105 					printk(KERN_INFO "%s %08x <-", type, mem_base);
106 				printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
107 				if ((img_base / 4) == 7 && mem_type == IMEM)
108 					printk(KERN_CONT " %04x", tag - 1);
109 			}
110 		}
111 
112 		img += xfer_len;
113 		len -= xfer_len;
114 	} while (len);
115 
116 	return 0;
117 }
118 
119 void
120 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
121 		      u32 size, u16 tag, u8 port, bool secure)
122 {
123 	if (secure && !falcon->secret) {
124 		nvkm_warn(falcon->user,
125 			  "writing with secure tag on a non-secure falcon!\n");
126 		return;
127 	}
128 
129 	falcon->func->load_imem(falcon, data, start, size, tag, port,
130 				secure);
131 }
132 
133 void
134 nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
135 		      u32 size, u8 port)
136 {
137 	mutex_lock(&falcon->dmem_mutex);
138 
139 	falcon->func->load_dmem(falcon, data, start, size, port);
140 
141 	mutex_unlock(&falcon->dmem_mutex);
142 }
143 
144 void
145 nvkm_falcon_start(struct nvkm_falcon *falcon)
146 {
147 	falcon->func->start(falcon);
148 }
149 
150 int
151 nvkm_falcon_reset(struct nvkm_falcon *falcon)
152 {
153 	int ret;
154 
155 	ret = falcon->func->disable(falcon);
156 	if (WARN_ON(ret))
157 		return ret;
158 
159 	return nvkm_falcon_enable(falcon);
160 }
161 
162 static int
163 nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
164 {
165 	const struct nvkm_falcon_func *func = falcon->func;
166 	const struct nvkm_subdev *subdev = falcon->owner;
167 	u32 reg;
168 
169 	if (!falcon->addr) {
170 		falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
171 		if (WARN_ON(!falcon->addr))
172 			return -ENODEV;
173 	}
174 
175 	reg = nvkm_falcon_rd32(falcon, 0x12c);
176 	falcon->version = reg & 0xf;
177 	falcon->secret = (reg >> 4) & 0x3;
178 	falcon->code.ports = (reg >> 8) & 0xf;
179 	falcon->data.ports = (reg >> 12) & 0xf;
180 
181 	reg = nvkm_falcon_rd32(falcon, 0x108);
182 	falcon->code.limit = (reg & 0x1ff) << 8;
183 	falcon->data.limit = (reg & 0x3fe00) >> 1;
184 
185 	if (func->debug) {
186 		u32 val = nvkm_falcon_rd32(falcon, func->debug);
187 		falcon->debug = (val >> 20) & 0x1;
188 	}
189 
190 	return 0;
191 }
192 
193 void
194 nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
195 {
196 	if (unlikely(!falcon))
197 		return;
198 
199 	mutex_lock(&falcon->mutex);
200 	if (falcon->user == user) {
201 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
202 		falcon->user = NULL;
203 	}
204 	mutex_unlock(&falcon->mutex);
205 }
206 
207 int
208 nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
209 {
210 	int ret = 0;
211 
212 	mutex_lock(&falcon->mutex);
213 	if (falcon->user) {
214 		nvkm_error(user, "%s falcon already acquired by %s!\n",
215 			   falcon->name, falcon->user->name);
216 		mutex_unlock(&falcon->mutex);
217 		return -EBUSY;
218 	}
219 
220 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
221 	if (!falcon->oneinit)
222 		ret = nvkm_falcon_oneinit(falcon);
223 	falcon->user = user;
224 	mutex_unlock(&falcon->mutex);
225 	return ret;
226 }
227 
228 void
229 nvkm_falcon_dtor(struct nvkm_falcon *falcon)
230 {
231 }
232 
233 int
234 nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
235 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
236 		 struct nvkm_falcon *falcon)
237 {
238 	falcon->func = func;
239 	falcon->owner = subdev;
240 	falcon->name = name;
241 	falcon->addr = addr;
242 	mutex_init(&falcon->mutex);
243 	mutex_init(&falcon->dmem_mutex);
244 	return 0;
245 }
246