xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c (revision 2fc71a0566f63ac3cd43d7cf2d5efbbab6293c5f)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 #include "chid.h"
27 #include "runl.h"
28 #include "runq.h"
29 
30 #include <core/gpuobj.h>
31 #include <subdev/mc.h>
32 
33 #include <nvif/cl0080.h>
34 #include <nvif/unpack.h>
35 
36 void
37 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
38 {
39 	unsigned long flags;
40 	if (WARN_ON(!fifo->func->recover_chan))
41 		return;
42 	spin_lock_irqsave(&fifo->lock, flags);
43 	fifo->func->recover_chan(fifo, chid);
44 	spin_unlock_irqrestore(&fifo->lock, flags);
45 }
46 
47 void
48 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
49 {
50 	return fifo->func->pause(fifo, flags);
51 }
52 
53 void
54 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
55 {
56 	return fifo->func->start(fifo, flags);
57 }
58 
59 void
60 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
61 {
62 	return fifo->func->mmu_fault->recover(fifo, info);
63 }
64 
65 void
66 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
67 		   struct nvkm_fifo_chan **pchan)
68 {
69 	struct nvkm_fifo_chan *chan = *pchan;
70 	if (likely(chan)) {
71 		*pchan = NULL;
72 		spin_unlock_irqrestore(&fifo->lock, flags);
73 	}
74 }
75 
76 struct nvkm_fifo_chan *
77 nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
78 {
79 	struct nvkm_fifo_chan *chan;
80 	list_for_each_entry(chan, &fifo->chan, head) {
81 		if (chan->inst->addr == inst) {
82 			list_del(&chan->head);
83 			list_add(&chan->head, &fifo->chan);
84 			return chan;
85 		}
86 	}
87 	return NULL;
88 }
89 
90 struct nvkm_fifo_chan *
91 nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
92 {
93 	struct nvkm_fifo_chan *chan;
94 	unsigned long flags;
95 	spin_lock_irqsave(&fifo->lock, flags);
96 	if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
97 		*rflags = flags;
98 		return chan;
99 	}
100 	spin_unlock_irqrestore(&fifo->lock, flags);
101 	return NULL;
102 }
103 
104 struct nvkm_fifo_chan *
105 nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
106 {
107 	struct nvkm_fifo_chan *chan;
108 	unsigned long flags;
109 	spin_lock_irqsave(&fifo->lock, flags);
110 	list_for_each_entry(chan, &fifo->chan, head) {
111 		if (chan->chid == chid) {
112 			list_del(&chan->head);
113 			list_add(&chan->head, &fifo->chan);
114 			*rflags = flags;
115 			return chan;
116 		}
117 	}
118 	spin_unlock_irqrestore(&fifo->lock, flags);
119 	return NULL;
120 }
121 
122 void
123 nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
124 {
125 	nvkm_event_ntfy(&fifo->kevent, chid, NVKM_FIFO_EVENT_KILLED);
126 }
127 
128 static const struct nvkm_event_func
129 nvkm_fifo_kevent_func = {
130 };
131 
132 static void
133 nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
134 {
135 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
136 	fifo->func->uevent_fini(fifo);
137 }
138 
139 static void
140 nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
141 {
142 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
143 	fifo->func->uevent_init(fifo);
144 }
145 
146 static const struct nvkm_event_func
147 nvkm_fifo_uevent_func = {
148 	.init = nvkm_fifo_uevent_init,
149 	.fini = nvkm_fifo_uevent_fini,
150 };
151 
152 void
153 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
154 {
155 	nvkm_event_ntfy(&fifo->uevent, 0, NVKM_FIFO_EVENT_NON_STALL_INTR);
156 }
157 
158 static int
159 nvkm_fifo_class_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
160 		    void *argv, u32 argc, struct nvkm_object **pobject)
161 {
162 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
163 
164 	if (oclass->engn == &fifo->func->chan.user)
165 		return nvkm_uchan_new(fifo, NULL, oclass, argv, argc, pobject);
166 
167 	WARN_ON(1);
168 	return -ENOSYS;
169 }
170 
171 static const struct nvkm_device_oclass
172 nvkm_fifo_class = {
173 	.ctor = nvkm_fifo_class_new,
174 };
175 
176 static int
177 nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
178 {
179 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
180 	const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
181 	int c = 0;
182 
183 	/* *_CHANNEL_DMA, *_CHANNEL_GPFIFO_* */
184 	if (chan->user.oclass) {
185 		if (c++ == index) {
186 			oclass->base = chan->user;
187 			oclass->engn = &fifo->func->chan.user;
188 			*class = &nvkm_fifo_class;
189 			return 0;
190 		}
191 	}
192 
193 	return c;
194 }
195 
196 static int
197 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
198 {
199 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
200 
201 	nvkm_inth_block(&fifo->engine.subdev.inth);
202 
203 	if (fifo->func->fini)
204 		fifo->func->fini(fifo);
205 
206 	return 0;
207 }
208 
209 static int
210 nvkm_fifo_init(struct nvkm_engine *engine)
211 {
212 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
213 
214 	fifo->func->init(fifo);
215 
216 	nvkm_inth_allow(&fifo->engine.subdev.inth);
217 	return 0;
218 }
219 
220 static int
221 nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
222 {
223 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
224 	struct nvkm_runl *runl;
225 	struct nvkm_engn *engn;
226 	int ret;
227 
228 	ret = nvkm_subdev_oneinit(&fifo->engine.subdev);
229 	if (ret)
230 		return ret;
231 
232 	switch (mthd) {
233 	case NV_DEVICE_HOST_CHANNELS: *data = fifo->chid ? fifo->chid->nr : 0; return 0;
234 	case NV_DEVICE_HOST_RUNLISTS:
235 		*data = 0;
236 		nvkm_runl_foreach(runl, fifo)
237 			*data |= BIT(runl->id);
238 		return 0;
239 	case NV_DEVICE_HOST_RUNLIST_ENGINES:
240 		runl = nvkm_runl_get(fifo, *data, 0);
241 		if (runl) {
242 			*data = 0;
243 			nvkm_runl_foreach_engn(engn, runl) {
244 #define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
245 				switch (engn->engine->subdev.type) {
246 				case NVKM_ENGINE_DMAOBJ:
247 					break;
248 				CASE(SW    );
249 				CASE(GR    );
250 				CASE(MPEG  );
251 				CASE(ME    );
252 				CASE(CIPHER);
253 				CASE(BSP   );
254 				CASE(VP    );
255 				CASE(CE    );
256 				CASE(SEC   );
257 				CASE(MSVLD );
258 				CASE(MSPDEC);
259 				CASE(MSPPP );
260 				CASE(MSENC );
261 				CASE(VIC   );
262 				CASE(SEC2  );
263 				CASE(NVDEC );
264 				CASE(NVENC );
265 				default:
266 					WARN_ON(1);
267 					break;
268 				}
269 #undef CASE
270 			}
271 			return 0;
272 		}
273 		return -EINVAL;
274 	case NV_DEVICE_HOST_RUNLIST_CHANNELS:
275 		if (!fifo->chid) {
276 			runl = nvkm_runl_get(fifo, *data, 0);
277 			if (runl) {
278 				*data = runl->chid->nr;
279 				return 0;
280 			}
281 		}
282 		return -EINVAL;
283 	default:
284 		break;
285 	}
286 
287 	return -ENOSYS;
288 }
289 
290 static int
291 nvkm_fifo_oneinit(struct nvkm_engine *engine)
292 {
293 	struct nvkm_subdev *subdev = &engine->subdev;
294 	struct nvkm_device *device = subdev->device;
295 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
296 	struct nvkm_runl *runl;
297 	struct nvkm_engn *engn;
298 	int ret, nr, i;
299 
300 	/* Initialise CHID/CGID allocator(s) on GPUs where they aren't per-runlist. */
301 	if (fifo->func->chid_nr) {
302 		ret = fifo->func->chid_ctor(fifo, fifo->func->chid_nr(fifo));
303 		if (ret)
304 			return ret;
305 	}
306 
307 	/* Create runqueues for each PBDMA. */
308 	if (fifo->func->runq_nr) {
309 		for (nr = fifo->func->runq_nr(fifo), i = 0; i < nr; i++) {
310 			if (!nvkm_runq_new(fifo, i))
311 				return -ENOMEM;
312 		}
313 	}
314 
315 	/* Create runlists. */
316 	ret = fifo->func->runl_ctor(fifo);
317 	if (ret)
318 		return ret;
319 
320 	nvkm_runl_foreach(runl, fifo) {
321 		RUNL_DEBUG(runl, "");
322 		nvkm_runl_foreach_engn(engn, runl) {
323 			ENGN_DEBUG(engn, "");
324 		}
325 	}
326 
327 	/* Register interrupt handler. */
328 	if (fifo->func->intr) {
329 		ret = nvkm_inth_add(&device->mc->intr, NVKM_INTR_SUBDEV, NVKM_INTR_PRIO_NORMAL,
330 				    subdev, fifo->func->intr, &subdev->inth);
331 		if (ret) {
332 			nvkm_error(subdev, "intr %d\n", ret);
333 			return ret;
334 		}
335 	}
336 
337 	if (fifo->func->oneinit)
338 		return fifo->func->oneinit(fifo);
339 
340 	return 0;
341 }
342 
343 static void
344 nvkm_fifo_preinit(struct nvkm_engine *engine)
345 {
346 	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
347 }
348 
349 static void *
350 nvkm_fifo_dtor(struct nvkm_engine *engine)
351 {
352 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
353 	struct nvkm_runl *runl, *runt;
354 	struct nvkm_runq *runq, *rtmp;
355 	void *data = fifo;
356 
357 	list_for_each_entry_safe(runl, runt, &fifo->runls, head)
358 		nvkm_runl_del(runl);
359 	list_for_each_entry_safe(runq, rtmp, &fifo->runqs, head)
360 		nvkm_runq_del(runq);
361 
362 	nvkm_chid_unref(&fifo->cgid);
363 	nvkm_chid_unref(&fifo->chid);
364 
365 	if (fifo->func->dtor)
366 		data = fifo->func->dtor(fifo);
367 	nvkm_event_fini(&fifo->kevent);
368 	nvkm_event_fini(&fifo->uevent);
369 	mutex_destroy(&fifo->mutex);
370 	return data;
371 }
372 
373 static const struct nvkm_engine_func
374 nvkm_fifo = {
375 	.dtor = nvkm_fifo_dtor,
376 	.preinit = nvkm_fifo_preinit,
377 	.oneinit = nvkm_fifo_oneinit,
378 	.info = nvkm_fifo_info,
379 	.init = nvkm_fifo_init,
380 	.fini = nvkm_fifo_fini,
381 	.base.sclass = nvkm_fifo_class_get,
382 };
383 
384 int
385 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
386 	       enum nvkm_subdev_type type, int inst, struct nvkm_fifo *fifo)
387 {
388 	int ret, nr;
389 
390 	fifo->func = func;
391 	INIT_LIST_HEAD(&fifo->runqs);
392 	INIT_LIST_HEAD(&fifo->runls);
393 	spin_lock_init(&fifo->lock);
394 	mutex_init(&fifo->mutex);
395 
396 	ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
397 	if (ret)
398 		return ret;
399 
400 	INIT_LIST_HEAD(&fifo->chan);
401 
402 	nr = func->chid_nr(fifo);
403 	fifo->nr = nr;
404 
405 	if (func->uevent_init) {
406 		ret = nvkm_event_init(&nvkm_fifo_uevent_func, &fifo->engine.subdev, 1, 1,
407 				      &fifo->uevent);
408 		if (ret)
409 			return ret;
410 	}
411 
412 	return nvkm_event_init(&nvkm_fifo_kevent_func, &fifo->engine.subdev, 1, nr, &fifo->kevent);
413 }
414