xref: /linux/drivers/gpu/drm/nouveau/nvkm/falcon/base.c (revision 0e44c21708761977dcbea9b846b51a6fb684907a)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/mc.h>
25 #include <subdev/top.h>
26 
27 static const struct nvkm_falcon_func_pio *
28 nvkm_falcon_pio(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
29 {
30 	switch (*mem_type) {
31 	case IMEM:
32 		return falcon->func->imem_pio;
33 	case DMEM:
34 		if (!falcon->func->emem_addr || *mem_base < falcon->func->emem_addr)
35 			return falcon->func->dmem_pio;
36 
37 		*mem_base -= falcon->func->emem_addr;
38 		fallthrough;
39 	default:
40 		return NULL;
41 	}
42 }
43 
44 int
45 nvkm_falcon_pio_wr(struct nvkm_falcon *falcon, const u8 *img, u32 img_base, u8 port,
46 		   enum nvkm_falcon_mem mem_type, u32 mem_base, int len, u16 tag, bool sec)
47 {
48 	const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
49 	const char *type = nvkm_falcon_mem(mem_type);
50 	int xfer_len;
51 
52 	if (WARN_ON(!pio || !pio->wr))
53 		return -EINVAL;
54 
55 	FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x", type, mem_base, len, img_base);
56 	if (WARN_ON(!len || (len & (pio->min - 1))))
57 		return -EINVAL;
58 
59 	pio->wr_init(falcon, port, sec, mem_base);
60 	do {
61 		xfer_len = min(len, pio->max);
62 		pio->wr(falcon, port, img, xfer_len, tag++);
63 
64 		if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
65 			for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
66 				if (((img_base / 4) % 8) == 0)
67 					printk(KERN_INFO "%s %08x <-", type, mem_base);
68 				printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
69 				if ((img_base / 4) == 7 && mem_type == IMEM)
70 					printk(KERN_CONT " %04x", tag - 1);
71 			}
72 		}
73 
74 		img += xfer_len;
75 		len -= xfer_len;
76 	} while (len);
77 
78 	return 0;
79 }
80 
81 void
82 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
83 		      u32 size, u16 tag, u8 port, bool secure)
84 {
85 	if (secure && !falcon->secret) {
86 		nvkm_warn(falcon->user,
87 			  "writing with secure tag on a non-secure falcon!\n");
88 		return;
89 	}
90 
91 	falcon->func->load_imem(falcon, data, start, size, tag, port,
92 				secure);
93 }
94 
95 void
96 nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
97 		      u32 size, u8 port)
98 {
99 	mutex_lock(&falcon->dmem_mutex);
100 
101 	falcon->func->load_dmem(falcon, data, start, size, port);
102 
103 	mutex_unlock(&falcon->dmem_mutex);
104 }
105 
106 void
107 nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
108 		      void *data)
109 {
110 	mutex_lock(&falcon->dmem_mutex);
111 
112 	falcon->func->read_dmem(falcon, start, size, port, data);
113 
114 	mutex_unlock(&falcon->dmem_mutex);
115 }
116 
117 void
118 nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
119 {
120 	if (!falcon->func->bind_context) {
121 		nvkm_error(falcon->user,
122 			   "Context binding not supported on this falcon!\n");
123 		return;
124 	}
125 
126 	falcon->func->bind_context(falcon, inst);
127 }
128 
129 void
130 nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
131 {
132 	falcon->func->set_start_addr(falcon, start_addr);
133 }
134 
135 void
136 nvkm_falcon_start(struct nvkm_falcon *falcon)
137 {
138 	falcon->func->start(falcon);
139 }
140 
141 int
142 nvkm_falcon_reset(struct nvkm_falcon *falcon)
143 {
144 	int ret;
145 
146 	ret = falcon->func->disable(falcon);
147 	if (WARN_ON(ret))
148 		return ret;
149 
150 	return nvkm_falcon_enable(falcon);
151 }
152 
153 int
154 nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
155 {
156 	return falcon->func->wait_for_halt(falcon, ms);
157 }
158 
159 int
160 nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
161 {
162 	return falcon->func->clear_interrupt(falcon, mask);
163 }
164 
165 static int
166 nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
167 {
168 	const struct nvkm_falcon_func *func = falcon->func;
169 	const struct nvkm_subdev *subdev = falcon->owner;
170 	u32 reg;
171 
172 	if (!falcon->addr) {
173 		falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
174 		if (WARN_ON(!falcon->addr))
175 			return -ENODEV;
176 	}
177 
178 	reg = nvkm_falcon_rd32(falcon, 0x12c);
179 	falcon->version = reg & 0xf;
180 	falcon->secret = (reg >> 4) & 0x3;
181 	falcon->code.ports = (reg >> 8) & 0xf;
182 	falcon->data.ports = (reg >> 12) & 0xf;
183 
184 	reg = nvkm_falcon_rd32(falcon, 0x108);
185 	falcon->code.limit = (reg & 0x1ff) << 8;
186 	falcon->data.limit = (reg & 0x3fe00) >> 1;
187 
188 	if (func->debug) {
189 		u32 val = nvkm_falcon_rd32(falcon, func->debug);
190 		falcon->debug = (val >> 20) & 0x1;
191 	}
192 
193 	return 0;
194 }
195 
196 void
197 nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
198 {
199 	if (unlikely(!falcon))
200 		return;
201 
202 	mutex_lock(&falcon->mutex);
203 	if (falcon->user == user) {
204 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
205 		falcon->user = NULL;
206 	}
207 	mutex_unlock(&falcon->mutex);
208 }
209 
210 int
211 nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
212 {
213 	int ret = 0;
214 
215 	mutex_lock(&falcon->mutex);
216 	if (falcon->user) {
217 		nvkm_error(user, "%s falcon already acquired by %s!\n",
218 			   falcon->name, falcon->user->name);
219 		mutex_unlock(&falcon->mutex);
220 		return -EBUSY;
221 	}
222 
223 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
224 	if (!falcon->oneinit)
225 		ret = nvkm_falcon_oneinit(falcon);
226 	falcon->user = user;
227 	mutex_unlock(&falcon->mutex);
228 	return ret;
229 }
230 
231 void
232 nvkm_falcon_dtor(struct nvkm_falcon *falcon)
233 {
234 }
235 
236 int
237 nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
238 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
239 		 struct nvkm_falcon *falcon)
240 {
241 	falcon->func = func;
242 	falcon->owner = subdev;
243 	falcon->name = name;
244 	falcon->addr = addr;
245 	mutex_init(&falcon->mutex);
246 	mutex_init(&falcon->dmem_mutex);
247 	return 0;
248 }
249