xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/memory.h>
25 #include <subdev/acr.h>
26 #include <subdev/timer.h>
27 
28 #include <nvfw/flcn.h>
29 #include <nvfw/sec2.h>
30 
31 int
gp102_sec2_nofw(struct nvkm_sec2 * sec2,int ver,const struct nvkm_sec2_fwif * fwif)32 gp102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
33 		const struct nvkm_sec2_fwif *fwif)
34 {
35 	nvkm_warn(&sec2->engine.subdev, "firmware unavailable\n");
36 	return 0;
37 }
38 
39 static int
gp102_sec2_acr_bootstrap_falcon_callback(void * priv,struct nvfw_falcon_msg * hdr)40 gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nvfw_falcon_msg *hdr)
41 {
42 	struct nv_sec2_acr_bootstrap_falcon_msg *msg =
43 		container_of(hdr, typeof(*msg), msg.hdr);
44 	struct nvkm_subdev *subdev = priv;
45 	const char *name = nvkm_acr_lsf_id(msg->falcon_id);
46 
47 	if (msg->error_code) {
48 		nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
49 				   "falcon %d [%s]: %08x\n",
50 			   msg->falcon_id, name, msg->error_code);
51 		return -EINVAL;
52 	}
53 
54 	nvkm_debug(subdev, "%s booted\n", name);
55 	return 0;
56 }
57 
58 static int
gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon * falcon,enum nvkm_acr_lsf_id id)59 gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
60 			        enum nvkm_acr_lsf_id id)
61 {
62 	struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
63 	struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
64 		.cmd.hdr.unit_id = sec2->func->unit_acr,
65 		.cmd.hdr.size = sizeof(cmd),
66 		.cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
67 		.flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
68 		.falcon_id = id,
69 	};
70 
71 	return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
72 				     gp102_sec2_acr_bootstrap_falcon_callback,
73 				     &sec2->engine.subdev,
74 				     msecs_to_jiffies(1000));
75 }
76 
77 static void
gp102_sec2_acr_bld_patch(struct nvkm_acr * acr,u32 bld,s64 adjust)78 gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
79 {
80 	struct loader_config_v1 hdr;
81 	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
82 	hdr.code_dma_base = hdr.code_dma_base + adjust;
83 	hdr.data_dma_base = hdr.data_dma_base + adjust;
84 	hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
85 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
86 	loader_config_v1_dump(&acr->subdev, &hdr);
87 }
88 
89 static void
gp102_sec2_acr_bld_write(struct nvkm_acr * acr,u32 bld,struct nvkm_acr_lsfw * lsfw)90 gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
91 			 struct nvkm_acr_lsfw *lsfw)
92 {
93 	const struct loader_config_v1 hdr = {
94 		.dma_idx = FALCON_SEC2_DMAIDX_UCODE,
95 		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
96 		.code_size_total = lsfw->app_size,
97 		.code_size_to_load = lsfw->app_resident_code_size,
98 		.code_entry_point = lsfw->app_imem_entry,
99 		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
100 				 lsfw->app_resident_data_offset,
101 		.data_size = lsfw->app_resident_data_size,
102 		.overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
103 		.argc = 1,
104 		.argv = lsfw->falcon->func->emem_addr,
105 	};
106 
107 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
108 }
109 
110 static const struct nvkm_acr_lsf_func
111 gp102_sec2_acr_0 = {
112 	.bld_size = sizeof(struct loader_config_v1),
113 	.bld_write = gp102_sec2_acr_bld_write,
114 	.bld_patch = gp102_sec2_acr_bld_patch,
115 	.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
116 			     BIT_ULL(NVKM_ACR_LSF_GPCCS) |
117 			     BIT_ULL(NVKM_ACR_LSF_SEC2),
118 	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
119 };
120 
121 int
gp102_sec2_initmsg(struct nvkm_sec2 * sec2)122 gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
123 {
124 	struct nv_sec2_init_msg msg;
125 	int ret, i;
126 
127 	ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
128 	if (ret)
129 		return ret;
130 
131 	if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
132 	    msg.msg_type != NV_SEC2_INIT_MSG_INIT)
133 		return -EINVAL;
134 
135 	for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
136 		if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
137 			nvkm_falcon_msgq_init(sec2->msgq,
138 					      msg.queue_info[i].index,
139 					      msg.queue_info[i].offset,
140 					      msg.queue_info[i].size);
141 		} else {
142 			nvkm_falcon_cmdq_init(sec2->cmdq,
143 					      msg.queue_info[i].index,
144 					      msg.queue_info[i].offset,
145 					      msg.queue_info[i].size);
146 		}
147 	}
148 
149 	return 0;
150 }
151 
152 irqreturn_t
gp102_sec2_intr(struct nvkm_inth * inth)153 gp102_sec2_intr(struct nvkm_inth *inth)
154 {
155 	struct nvkm_sec2 *sec2 = container_of(inth, typeof(*sec2), engine.subdev.inth);
156 	struct nvkm_subdev *subdev = &sec2->engine.subdev;
157 	struct nvkm_falcon *falcon = &sec2->falcon;
158 	u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
159 	u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
160 
161 	if (intr & 0x00000040) {
162 		if (unlikely(atomic_read(&sec2->initmsg) == 0)) {
163 			int ret = sec2->func->initmsg(sec2);
164 
165 			if (ret)
166 				nvkm_error(subdev, "error parsing init message: %d\n", ret);
167 
168 			atomic_set(&sec2->initmsg, ret ?: 1);
169 		}
170 
171 		if (atomic_read(&sec2->initmsg) > 0) {
172 			if (!nvkm_falcon_msgq_empty(sec2->msgq))
173 				nvkm_falcon_msgq_recv(sec2->msgq);
174 		}
175 
176 		nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
177 		intr &= ~0x00000040;
178 	}
179 
180 	if (intr & 0x00000010) {
181 		if (atomic_read(&sec2->running)) {
182 			FLCN_ERR(falcon, "halted");
183 			gm200_flcn_tracepc(falcon);
184 		}
185 
186 		nvkm_falcon_wr32(falcon, 0x004, 0x00000010);
187 		intr &= ~0x00000010;
188 	}
189 
190 	if (intr) {
191 		nvkm_error(subdev, "unhandled intr %08x\n", intr);
192 		nvkm_falcon_wr32(falcon, 0x004, intr);
193 	}
194 
195 	return IRQ_HANDLED;
196 }
197 
198 static const struct nvkm_falcon_func
199 gp102_sec2_flcn = {
200 	.disable = gm200_flcn_disable,
201 	.enable = gm200_flcn_enable,
202 	.reset_pmc = true,
203 	.reset_eng = gp102_flcn_reset_eng,
204 	.reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
205 	.debug = 0x408,
206 	.bind_inst = gm200_flcn_bind_inst,
207 	.bind_stat = gm200_flcn_bind_stat,
208 	.bind_intr = true,
209 	.imem_pio = &gm200_flcn_imem_pio,
210 	.dmem_pio = &gm200_flcn_dmem_pio,
211 	.emem_addr = 0x01000000,
212 	.emem_pio = &gp102_flcn_emem_pio,
213 	.start = nvkm_falcon_v1_start,
214 	.cmdq = { 0xa00, 0xa04, 8 },
215 	.msgq = { 0xa30, 0xa34, 8 },
216 };
217 
218 const struct nvkm_sec2_func
219 gp102_sec2 = {
220 	.flcn = &gp102_sec2_flcn,
221 	.unit_unload = NV_SEC2_UNIT_UNLOAD,
222 	.unit_acr = NV_SEC2_UNIT_ACR,
223 	.intr = gp102_sec2_intr,
224 	.initmsg = gp102_sec2_initmsg,
225 };
226 
227 MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
228 MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
229 MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
230 MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
231 MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
232 MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
233 MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
234 MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
235 MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
236 MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
237 MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
238 MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
239 
240 void
gp102_sec2_acr_bld_patch_1(struct nvkm_acr * acr,u32 bld,s64 adjust)241 gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
242 {
243 	struct flcn_bl_dmem_desc_v2 hdr;
244 	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
245 	hdr.code_dma_base = hdr.code_dma_base + adjust;
246 	hdr.data_dma_base = hdr.data_dma_base + adjust;
247 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
248 	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
249 }
250 
251 void
gp102_sec2_acr_bld_write_1(struct nvkm_acr * acr,u32 bld,struct nvkm_acr_lsfw * lsfw)252 gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
253 			   struct nvkm_acr_lsfw *lsfw)
254 {
255 	const struct flcn_bl_dmem_desc_v2 hdr = {
256 		.ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
257 		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
258 		.non_sec_code_off = lsfw->app_resident_code_offset,
259 		.non_sec_code_size = lsfw->app_resident_code_size,
260 		.code_entry_point = lsfw->app_imem_entry,
261 		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
262 				 lsfw->app_resident_data_offset,
263 		.data_size = lsfw->app_resident_data_size,
264 		.argc = 1,
265 		.argv = lsfw->falcon->func->emem_addr,
266 	};
267 
268 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
269 }
270 
271 const struct nvkm_acr_lsf_func
272 gp102_sec2_acr_1 = {
273 	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
274 	.bld_write = gp102_sec2_acr_bld_write_1,
275 	.bld_patch = gp102_sec2_acr_bld_patch_1,
276 	.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
277 			     BIT_ULL(NVKM_ACR_LSF_GPCCS) |
278 			     BIT_ULL(NVKM_ACR_LSF_SEC2),
279 	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
280 };
281 
282 int
gp102_sec2_load(struct nvkm_sec2 * sec2,int ver,const struct nvkm_sec2_fwif * fwif)283 gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
284 		const struct nvkm_sec2_fwif *fwif)
285 {
286 	return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
287 						    &sec2->falcon,
288 						    NVKM_ACR_LSF_SEC2, "sec2/",
289 						    ver, fwif->acr);
290 }
291 
292 MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
293 MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
294 MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
295 MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
296 MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
297 MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
298 MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
299 MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
300 MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
301 MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
302 MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
303 MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
304 
305 static const struct nvkm_sec2_fwif
306 gp102_sec2_fwif[] = {
307 	{  1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
308 	{  0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
309 	{ -1, gp102_sec2_nofw, &gp102_sec2 },
310 	{}
311 };
312 
313 int
gp102_sec2_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_sec2 ** psec2)314 gp102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
315 	       struct nvkm_sec2 **psec2)
316 {
317 	return nvkm_sec2_new_(gp102_sec2_fwif, device, type, inst, 0, psec2);
318 }
319