xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c (revision 4e94ddfe2aab72139acb8d5372fac9e6c3f3e383)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "chan.h"
25 #include "chid.h"
26 #include "cgrp.h"
27 #include "chid.h"
28 #include "runl.h"
29 #include "priv.h"
30 
31 #include <core/ramht.h>
32 #include <subdev/mmu.h>
33 #include <engine/dma.h>
34 
35 #include <nvif/if0020.h>
36 
37 const struct nvkm_event_func
38 nvkm_chan_event = {
39 };
40 
41 void
42 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
43 {
44 	struct nvkm_cgrp *cgrp = chan->cgrp;
45 	struct nvkm_runl *runl = cgrp->runl;
46 	struct nvkm_engine *engine = engn->engine;
47 
48 	if (!engn->func->bind)
49 		return;
50 
51 	CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
52 
53 	/* Prevent any channel in channel group from being rescheduled, kick them
54 	 * off host and any engine(s) they're loaded on.
55 	 */
56 	if (cgrp->hw)
57 		nvkm_runl_block(runl);
58 	else
59 		nvkm_chan_block(chan);
60 	nvkm_chan_preempt(chan, true);
61 
62 	/* Update context pointer. */
63 	engn->func->bind(engn, cctx, chan);
64 
65 	/* Resume normal operation. */
66 	if (cgrp->hw)
67 		nvkm_runl_allow(runl);
68 	else
69 		nvkm_chan_allow(chan);
70 }
71 
72 void
73 nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
74 {
75 	struct nvkm_cctx *cctx = *pcctx;
76 
77 	if (cctx) {
78 		struct nvkm_engn *engn = cctx->vctx->ectx->engn;
79 
80 		if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
81 			CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
82 			nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
83 			list_del(&cctx->head);
84 			kfree(cctx);
85 			mutex_unlock(&chan->cgrp->mutex);
86 		}
87 
88 		*pcctx = NULL;
89 	}
90 }
91 
92 int
93 nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
94 		   struct nvkm_client *client)
95 {
96 	struct nvkm_cgrp *cgrp = chan->cgrp;
97 	struct nvkm_vctx *vctx;
98 	struct nvkm_cctx *cctx;
99 	int ret;
100 
101 	/* Look for an existing channel context for this engine+VEID. */
102 	mutex_lock(&cgrp->mutex);
103 	cctx = nvkm_list_find(cctx, &chan->cctxs, head,
104 			      cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
105 	if (cctx) {
106 		refcount_inc(&cctx->refs);
107 		*pcctx = cctx;
108 		mutex_unlock(&chan->cgrp->mutex);
109 		return 0;
110 	}
111 
112 	/* Nope - create a fresh one.  But, sub-context first. */
113 	ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
114 	if (ret) {
115 		CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
116 		goto done;
117 	}
118 
119 	/* Now, create the channel context - to track engine binding. */
120 	CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
121 	if (!(cctx = *pcctx = kzalloc(sizeof(*cctx), GFP_KERNEL))) {
122 		nvkm_cgrp_vctx_put(cgrp, &vctx);
123 		ret = -ENOMEM;
124 		goto done;
125 	}
126 
127 	cctx->vctx = vctx;
128 	refcount_set(&cctx->refs, 1);
129 	refcount_set(&cctx->uses, 0);
130 	list_add_tail(&cctx->head, &chan->cctxs);
131 done:
132 	mutex_unlock(&cgrp->mutex);
133 	return ret;
134 }
135 
136 int
137 nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
138 {
139 	struct nvkm_runl *runl = chan->cgrp->runl;
140 
141 	CHAN_TRACE(chan, "preempt");
142 	chan->func->preempt(chan);
143 	if (!wait)
144 		return 0;
145 
146 	return nvkm_runl_preempt_wait(runl);
147 }
148 
149 int
150 nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
151 {
152 	int ret;
153 
154 	if (!chan->func->preempt)
155 		return 0;
156 
157 	mutex_lock(&chan->cgrp->runl->mutex);
158 	ret = nvkm_chan_preempt_locked(chan, wait);
159 	mutex_unlock(&chan->cgrp->runl->mutex);
160 	return ret;
161 }
162 
163 void
164 nvkm_chan_remove_locked(struct nvkm_chan *chan)
165 {
166 	struct nvkm_cgrp *cgrp = chan->cgrp;
167 	struct nvkm_runl *runl = cgrp->runl;
168 
169 	if (list_empty(&chan->head))
170 		return;
171 
172 	CHAN_TRACE(chan, "remove");
173 	if (!--cgrp->chan_nr) {
174 		runl->cgrp_nr--;
175 		list_del(&cgrp->head);
176 	}
177 	runl->chan_nr--;
178 	list_del_init(&chan->head);
179 	atomic_set(&runl->changed, 1);
180 }
181 
182 void
183 nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
184 {
185 	struct nvkm_runl *runl = chan->cgrp->runl;
186 
187 	mutex_lock(&runl->mutex);
188 	if (preempt && chan->func->preempt)
189 		nvkm_chan_preempt_locked(chan, true);
190 	nvkm_chan_remove_locked(chan);
191 	nvkm_runl_update_locked(runl, true);
192 	mutex_unlock(&runl->mutex);
193 }
194 
195 void
196 nvkm_chan_insert(struct nvkm_chan *chan)
197 {
198 	struct nvkm_cgrp *cgrp = chan->cgrp;
199 	struct nvkm_runl *runl = cgrp->runl;
200 
201 	mutex_lock(&runl->mutex);
202 	if (WARN_ON(!list_empty(&chan->head))) {
203 		mutex_unlock(&runl->mutex);
204 		return;
205 	}
206 
207 	CHAN_TRACE(chan, "insert");
208 	list_add_tail(&chan->head, &cgrp->chans);
209 	runl->chan_nr++;
210 	if (!cgrp->chan_nr++) {
211 		list_add_tail(&cgrp->head, &cgrp->runl->cgrps);
212 		runl->cgrp_nr++;
213 	}
214 	atomic_set(&runl->changed, 1);
215 	nvkm_runl_update_locked(runl, true);
216 	mutex_unlock(&runl->mutex);
217 }
218 
219 static void
220 nvkm_chan_block_locked(struct nvkm_chan *chan)
221 {
222 	CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
223 	if (atomic_inc_return(&chan->blocked) == 1)
224 		chan->func->stop(chan);
225 }
226 
227 void
228 nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
229 {
230 	unsigned long flags;
231 
232 	spin_lock_irqsave(&chan->lock, flags);
233 	if (atomic_inc_return(&chan->errored) == 1) {
234 		CHAN_ERROR(chan, "errored - disabling channel");
235 		nvkm_chan_block_locked(chan);
236 		if (preempt)
237 			chan->func->preempt(chan);
238 		nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
239 	}
240 	spin_unlock_irqrestore(&chan->lock, flags);
241 }
242 
243 void
244 nvkm_chan_block(struct nvkm_chan *chan)
245 {
246 	spin_lock_irq(&chan->lock);
247 	nvkm_chan_block_locked(chan);
248 	spin_unlock_irq(&chan->lock);
249 }
250 
251 void
252 nvkm_chan_allow(struct nvkm_chan *chan)
253 {
254 	spin_lock_irq(&chan->lock);
255 	CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
256 	if (atomic_dec_and_test(&chan->blocked))
257 		chan->func->start(chan);
258 	spin_unlock_irq(&chan->lock);
259 }
260 
261 void
262 nvkm_chan_del(struct nvkm_chan **pchan)
263 {
264 	struct nvkm_chan *chan = *pchan;
265 
266 	if (!chan)
267 		return;
268 
269 	if (chan->func->ramfc->clear)
270 		chan->func->ramfc->clear(chan);
271 
272 	nvkm_ramht_del(&chan->ramht);
273 	nvkm_gpuobj_del(&chan->pgd);
274 	nvkm_gpuobj_del(&chan->eng);
275 	nvkm_gpuobj_del(&chan->cache);
276 	nvkm_gpuobj_del(&chan->ramfc);
277 
278 	if (chan->cgrp) {
279 		if (!chan->func->id_put)
280 			nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
281 		else
282 			chan->func->id_put(chan);
283 
284 		nvkm_cgrp_unref(&chan->cgrp);
285 	}
286 
287 	nvkm_memory_unref(&chan->userd.mem);
288 
289 	if (chan->vmm) {
290 		nvkm_vmm_part(chan->vmm, chan->inst->memory);
291 		nvkm_vmm_unref(&chan->vmm);
292 	}
293 
294 	nvkm_gpuobj_del(&chan->push);
295 	nvkm_gpuobj_del(&chan->inst);
296 	kfree(chan);
297 }
298 
299 void
300 nvkm_chan_put(struct nvkm_chan **pchan, unsigned long irqflags)
301 {
302 	struct nvkm_chan *chan = *pchan;
303 
304 	if (!chan)
305 		return;
306 
307 	*pchan = NULL;
308 	spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
309 }
310 
311 struct nvkm_chan *
312 nvkm_chan_get_inst(struct nvkm_engine *engine, u64 inst, unsigned long *pirqflags)
313 {
314 	struct nvkm_fifo *fifo = engine->subdev.device->fifo;
315 	struct nvkm_runl *runl;
316 	struct nvkm_engn *engn;
317 	struct nvkm_chan *chan;
318 
319 	nvkm_runl_foreach(runl, fifo) {
320 		nvkm_runl_foreach_engn(engn, runl) {
321 			if (engine == &fifo->engine || engn->engine == engine) {
322 				chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
323 				if (chan || engn->engine == engine)
324 					return chan;
325 			}
326 		}
327 	}
328 
329 	return NULL;
330 }
331 
332 struct nvkm_chan *
333 nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
334 {
335 	struct nvkm_fifo *fifo = engine->subdev.device->fifo;
336 	struct nvkm_runl *runl;
337 	struct nvkm_engn *engn;
338 
339 	nvkm_runl_foreach(runl, fifo) {
340 		nvkm_runl_foreach_engn(engn, runl) {
341 			if (fifo->chid || engn->engine == engine)
342 				return nvkm_runl_chan_get_chid(runl, id, pirqflags);
343 		}
344 	}
345 
346 	return NULL;
347 }
348 
349 int
350 nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int runq,
351 	       struct nvkm_cgrp *cgrp, const char *name, bool priv, u32 devm, struct nvkm_vmm *vmm,
352 	       struct nvkm_dmaobj *dmaobj, u64 offset, u64 length,
353 	       struct nvkm_memory *userd, u64 ouserd, struct nvkm_chan **pchan)
354 {
355 	struct nvkm_fifo *fifo = runl->fifo;
356 	struct nvkm_device *device = fifo->engine.subdev.device;
357 	struct nvkm_chan *chan;
358 	int ret;
359 
360 	/* Validate arguments against class requirements. */
361 	if ((runq && runq >= runl->func->runqs) ||
362 	    (!func->inst->vmm != !vmm) ||
363 	    ((func->userd->bar < 0) == !userd) ||
364 	    (!func->ramfc->ctxdma != !dmaobj) ||
365 	    ((func->ramfc->devm < devm) && devm != BIT(0)) ||
366 	    (!func->ramfc->priv && priv)) {
367 		RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
368 				 "push:%d:%p devm:%08x:%08x priv:%d:%d",
369 			   runl->func->runqs, runq, func->inst->vmm, vmm,
370 			   func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
371 			   func->ramfc->devm, devm, func->ramfc->priv, priv);
372 		return -EINVAL;
373 	}
374 
375 	if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL)))
376 		return -ENOMEM;
377 
378 	chan->func = func;
379 	strscpy(chan->name, name, sizeof(chan->name));
380 	chan->runq = runq;
381 	chan->id = -1;
382 	spin_lock_init(&chan->lock);
383 	atomic_set(&chan->blocked, 1);
384 	atomic_set(&chan->errored, 0);
385 	INIT_LIST_HEAD(&chan->cctxs);
386 	INIT_LIST_HEAD(&chan->head);
387 
388 	/* Join channel group.
389 	 *
390 	 * GK110 and newer support channel groups (aka TSGs), where individual channels
391 	 * share a timeslice, and, engine context(s).
392 	 *
393 	 * As such, engine contexts are tracked in nvkm_cgrp and we need them even when
394 	 * channels aren't in an API channel group, and on HW that doesn't support TSGs.
395 	 */
396 	if (!cgrp) {
397 		ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
398 		if (ret) {
399 			RUNL_DEBUG(runl, "cgrp %d", ret);
400 			return ret;
401 		}
402 
403 		cgrp = chan->cgrp;
404 	} else {
405 		if (cgrp->runl != runl || cgrp->vmm != vmm) {
406 			RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
407 			return -EINVAL;
408 		}
409 
410 		chan->cgrp = nvkm_cgrp_ref(cgrp);
411 	}
412 
413 	/* Allocate instance block. */
414 	ret = nvkm_gpuobj_new(device, func->inst->size, 0x1000, func->inst->zero, NULL,
415 			      &chan->inst);
416 	if (ret) {
417 		RUNL_DEBUG(runl, "inst %d", ret);
418 		return ret;
419 	}
420 
421 	/* Initialise virtual address-space. */
422 	if (func->inst->vmm) {
423 		if (WARN_ON(vmm->mmu != device->mmu))
424 			return -EINVAL;
425 
426 		ret = nvkm_vmm_join(vmm, chan->inst->memory);
427 		if (ret) {
428 			RUNL_DEBUG(runl, "vmm %d", ret);
429 			return ret;
430 		}
431 
432 		chan->vmm = nvkm_vmm_ref(vmm);
433 	}
434 
435 	/* Allocate HW ctxdma for push buffer. */
436 	if (func->ramfc->ctxdma) {
437 		ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
438 		if (ret) {
439 			RUNL_DEBUG(runl, "bind %d", ret);
440 			return ret;
441 		}
442 	}
443 
444 	/* Allocate channel ID. */
445 	if (!chan->func->id_get) {
446 		chan->id = nvkm_chid_get(runl->chid, chan);
447 		if (chan->id >= 0) {
448 			if (func->userd->bar < 0) {
449 				if (ouserd + chan->func->userd->size >=
450 					nvkm_memory_size(userd)) {
451 					RUNL_DEBUG(runl, "ouserd %llx", ouserd);
452 					return -EINVAL;
453 				}
454 
455 				ret = nvkm_memory_kmap(userd, &chan->userd.mem);
456 				if (ret) {
457 					RUNL_DEBUG(runl, "userd %d", ret);
458 					return ret;
459 				}
460 
461 				chan->userd.base = ouserd;
462 			} else {
463 				chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
464 				chan->userd.base = chan->id * chan->func->userd->size;
465 			}
466 		}
467 	} else {
468 		chan->id = chan->func->id_get(chan, userd, ouserd);
469 	}
470 
471 	if (chan->id < 0) {
472 		RUNL_ERROR(runl, "!chids");
473 		return -ENOSPC;
474 	}
475 
476 	if (cgrp->id < 0)
477 		cgrp->id = chan->id;
478 
479 	/* Initialise USERD. */
480 	if (chan->func->userd->clear)
481 		chan->func->userd->clear(chan);
482 
483 	/* Initialise RAMFC. */
484 	ret = chan->func->ramfc->write(chan, offset, length, devm, priv);
485 	if (ret) {
486 		RUNL_DEBUG(runl, "ramfc %d", ret);
487 		return ret;
488 	}
489 
490 	return 0;
491 }
492