xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "chan.h"
25 #include "chid.h"
26 #include "cgrp.h"
27 #include "runl.h"
28 #include "priv.h"
29 
30 #include <core/ramht.h>
31 #include <subdev/mmu.h>
32 #include <engine/dma.h>
33 
34 #include <nvif/if0020.h>
35 
36 const struct nvkm_event_func
37 nvkm_chan_event = {
38 };
39 
40 void
nvkm_chan_cctx_bind(struct nvkm_chan * chan,struct nvkm_engn * engn,struct nvkm_cctx * cctx)41 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
42 {
43 	struct nvkm_cgrp *cgrp = chan->cgrp;
44 	struct nvkm_runl *runl = cgrp->runl;
45 	struct nvkm_engine *engine = engn->engine;
46 
47 	if (!engn->func->bind)
48 		return;
49 
50 	CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
51 
52 	/* Prevent any channel in channel group from being rescheduled, kick them
53 	 * off host and any engine(s) they're loaded on.
54 	 */
55 	if (cgrp->hw)
56 		nvkm_runl_block(runl);
57 	else
58 		nvkm_chan_block(chan);
59 	nvkm_chan_preempt(chan, true);
60 
61 	/* Update context pointer. */
62 	engn->func->bind(engn, cctx, chan);
63 
64 	/* Resume normal operation. */
65 	if (cgrp->hw)
66 		nvkm_runl_allow(runl);
67 	else
68 		nvkm_chan_allow(chan);
69 }
70 
71 void
nvkm_chan_cctx_put(struct nvkm_chan * chan,struct nvkm_cctx ** pcctx)72 nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
73 {
74 	struct nvkm_cctx *cctx = *pcctx;
75 
76 	if (cctx) {
77 		struct nvkm_engn *engn = cctx->vctx->ectx->engn;
78 
79 		if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
80 			CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
81 			nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
82 			list_del(&cctx->head);
83 			kfree(cctx);
84 			mutex_unlock(&chan->cgrp->mutex);
85 		}
86 
87 		*pcctx = NULL;
88 	}
89 }
90 
91 int
nvkm_chan_cctx_get(struct nvkm_chan * chan,struct nvkm_engn * engn,struct nvkm_cctx ** pcctx,struct nvkm_client * client)92 nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
93 		   struct nvkm_client *client)
94 {
95 	struct nvkm_cgrp *cgrp = chan->cgrp;
96 	struct nvkm_vctx *vctx;
97 	struct nvkm_cctx *cctx;
98 	int ret;
99 
100 	/* Look for an existing channel context for this engine+VEID. */
101 	mutex_lock(&cgrp->mutex);
102 	cctx = nvkm_list_find(cctx, &chan->cctxs, head,
103 			      cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
104 	if (cctx) {
105 		refcount_inc(&cctx->refs);
106 		*pcctx = cctx;
107 		mutex_unlock(&chan->cgrp->mutex);
108 		return 0;
109 	}
110 
111 	/* Nope - create a fresh one.  But, sub-context first. */
112 	ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
113 	if (ret) {
114 		CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
115 		goto done;
116 	}
117 
118 	/* Now, create the channel context - to track engine binding. */
119 	CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
120 	if (!(cctx = *pcctx = kzalloc(sizeof(*cctx), GFP_KERNEL))) {
121 		nvkm_cgrp_vctx_put(cgrp, &vctx);
122 		ret = -ENOMEM;
123 		goto done;
124 	}
125 
126 	cctx->vctx = vctx;
127 	refcount_set(&cctx->refs, 1);
128 	refcount_set(&cctx->uses, 0);
129 	list_add_tail(&cctx->head, &chan->cctxs);
130 done:
131 	mutex_unlock(&cgrp->mutex);
132 	return ret;
133 }
134 
135 int
nvkm_chan_preempt_locked(struct nvkm_chan * chan,bool wait)136 nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
137 {
138 	struct nvkm_runl *runl = chan->cgrp->runl;
139 
140 	CHAN_TRACE(chan, "preempt");
141 	chan->func->preempt(chan);
142 	if (!wait)
143 		return 0;
144 
145 	return nvkm_runl_preempt_wait(runl);
146 }
147 
148 int
nvkm_chan_preempt(struct nvkm_chan * chan,bool wait)149 nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
150 {
151 	int ret;
152 
153 	if (!chan->func->preempt)
154 		return 0;
155 
156 	mutex_lock(&chan->cgrp->runl->mutex);
157 	ret = nvkm_chan_preempt_locked(chan, wait);
158 	mutex_unlock(&chan->cgrp->runl->mutex);
159 	return ret;
160 }
161 
162 void
nvkm_chan_remove_locked(struct nvkm_chan * chan)163 nvkm_chan_remove_locked(struct nvkm_chan *chan)
164 {
165 	struct nvkm_cgrp *cgrp = chan->cgrp;
166 	struct nvkm_runl *runl = cgrp->runl;
167 
168 	if (list_empty(&chan->head))
169 		return;
170 
171 	CHAN_TRACE(chan, "remove");
172 	if (!--cgrp->chan_nr) {
173 		runl->cgrp_nr--;
174 		list_del(&cgrp->head);
175 	}
176 	runl->chan_nr--;
177 	list_del_init(&chan->head);
178 	atomic_set(&runl->changed, 1);
179 }
180 
181 void
nvkm_chan_remove(struct nvkm_chan * chan,bool preempt)182 nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
183 {
184 	struct nvkm_runl *runl = chan->cgrp->runl;
185 
186 	mutex_lock(&runl->mutex);
187 	if (preempt && chan->func->preempt)
188 		nvkm_chan_preempt_locked(chan, true);
189 	nvkm_chan_remove_locked(chan);
190 	nvkm_runl_update_locked(runl, true);
191 	mutex_unlock(&runl->mutex);
192 }
193 
194 void
nvkm_chan_insert(struct nvkm_chan * chan)195 nvkm_chan_insert(struct nvkm_chan *chan)
196 {
197 	struct nvkm_cgrp *cgrp = chan->cgrp;
198 	struct nvkm_runl *runl = cgrp->runl;
199 
200 	mutex_lock(&runl->mutex);
201 	if (WARN_ON(!list_empty(&chan->head))) {
202 		mutex_unlock(&runl->mutex);
203 		return;
204 	}
205 
206 	CHAN_TRACE(chan, "insert");
207 	list_add_tail(&chan->head, &cgrp->chans);
208 	runl->chan_nr++;
209 	if (!cgrp->chan_nr++) {
210 		list_add_tail(&cgrp->head, &cgrp->runl->cgrps);
211 		runl->cgrp_nr++;
212 	}
213 	atomic_set(&runl->changed, 1);
214 	nvkm_runl_update_locked(runl, true);
215 	mutex_unlock(&runl->mutex);
216 }
217 
218 static void
nvkm_chan_block_locked(struct nvkm_chan * chan)219 nvkm_chan_block_locked(struct nvkm_chan *chan)
220 {
221 	CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
222 	if (atomic_inc_return(&chan->blocked) == 1)
223 		chan->func->stop(chan);
224 }
225 
226 void
nvkm_chan_error(struct nvkm_chan * chan,bool preempt)227 nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
228 {
229 	unsigned long flags;
230 
231 	spin_lock_irqsave(&chan->lock, flags);
232 	if (atomic_inc_return(&chan->errored) == 1) {
233 		CHAN_ERROR(chan, "errored - disabling channel");
234 		nvkm_chan_block_locked(chan);
235 		if (preempt)
236 			chan->func->preempt(chan);
237 		nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
238 	}
239 	spin_unlock_irqrestore(&chan->lock, flags);
240 }
241 
242 void
nvkm_chan_block(struct nvkm_chan * chan)243 nvkm_chan_block(struct nvkm_chan *chan)
244 {
245 	spin_lock_irq(&chan->lock);
246 	nvkm_chan_block_locked(chan);
247 	spin_unlock_irq(&chan->lock);
248 }
249 
250 void
nvkm_chan_allow(struct nvkm_chan * chan)251 nvkm_chan_allow(struct nvkm_chan *chan)
252 {
253 	spin_lock_irq(&chan->lock);
254 	CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
255 	if (atomic_dec_and_test(&chan->blocked))
256 		chan->func->start(chan);
257 	spin_unlock_irq(&chan->lock);
258 }
259 
260 void
nvkm_chan_del(struct nvkm_chan ** pchan)261 nvkm_chan_del(struct nvkm_chan **pchan)
262 {
263 	struct nvkm_chan *chan = *pchan;
264 
265 	if (!chan)
266 		return;
267 
268 	if (chan->func->ramfc->clear)
269 		chan->func->ramfc->clear(chan);
270 
271 	nvkm_ramht_del(&chan->ramht);
272 	nvkm_gpuobj_del(&chan->pgd);
273 	nvkm_gpuobj_del(&chan->eng);
274 	nvkm_gpuobj_del(&chan->cache);
275 	nvkm_gpuobj_del(&chan->ramfc);
276 
277 	if (chan->cgrp) {
278 		if (!chan->func->id_put)
279 			nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
280 		else
281 			chan->func->id_put(chan);
282 
283 		nvkm_cgrp_unref(&chan->cgrp);
284 	}
285 
286 	nvkm_memory_unref(&chan->userd.mem);
287 
288 	if (chan->vmm) {
289 		nvkm_vmm_part(chan->vmm, chan->inst->memory);
290 		nvkm_vmm_unref(&chan->vmm);
291 	}
292 
293 	nvkm_gpuobj_del(&chan->push);
294 	nvkm_gpuobj_del(&chan->inst);
295 	kfree(chan);
296 }
297 
298 void
nvkm_chan_put(struct nvkm_chan ** pchan,unsigned long irqflags)299 nvkm_chan_put(struct nvkm_chan **pchan, unsigned long irqflags)
300 {
301 	struct nvkm_chan *chan = *pchan;
302 
303 	if (!chan)
304 		return;
305 
306 	*pchan = NULL;
307 	spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
308 }
309 
310 struct nvkm_chan *
nvkm_chan_get_inst(struct nvkm_engine * engine,u64 inst,unsigned long * pirqflags)311 nvkm_chan_get_inst(struct nvkm_engine *engine, u64 inst, unsigned long *pirqflags)
312 {
313 	struct nvkm_fifo *fifo = engine->subdev.device->fifo;
314 	struct nvkm_runl *runl;
315 	struct nvkm_engn *engn;
316 	struct nvkm_chan *chan;
317 
318 	nvkm_runl_foreach(runl, fifo) {
319 		nvkm_runl_foreach_engn(engn, runl) {
320 			if (engine == &fifo->engine || engn->engine == engine) {
321 				chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
322 				if (chan || engn->engine == engine)
323 					return chan;
324 			}
325 		}
326 	}
327 
328 	return NULL;
329 }
330 
331 struct nvkm_chan *
nvkm_chan_get_chid(struct nvkm_engine * engine,int id,unsigned long * pirqflags)332 nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
333 {
334 	struct nvkm_fifo *fifo = engine->subdev.device->fifo;
335 	struct nvkm_runl *runl;
336 	struct nvkm_engn *engn;
337 
338 	nvkm_runl_foreach(runl, fifo) {
339 		nvkm_runl_foreach_engn(engn, runl) {
340 			if (fifo->chid || engn->engine == engine)
341 				return nvkm_runl_chan_get_chid(runl, id, pirqflags);
342 		}
343 	}
344 
345 	return NULL;
346 }
347 
348 int
nvkm_chan_new_(const struct nvkm_chan_func * func,struct nvkm_runl * runl,int runq,struct nvkm_cgrp * cgrp,const char * name,bool priv,u32 devm,struct nvkm_vmm * vmm,struct nvkm_dmaobj * dmaobj,u64 offset,u64 length,struct nvkm_memory * userd,u64 ouserd,struct nvkm_chan ** pchan)349 nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int runq,
350 	       struct nvkm_cgrp *cgrp, const char *name, bool priv, u32 devm, struct nvkm_vmm *vmm,
351 	       struct nvkm_dmaobj *dmaobj, u64 offset, u64 length,
352 	       struct nvkm_memory *userd, u64 ouserd, struct nvkm_chan **pchan)
353 {
354 	struct nvkm_fifo *fifo = runl->fifo;
355 	struct nvkm_device *device = fifo->engine.subdev.device;
356 	struct nvkm_chan *chan;
357 	int ret;
358 
359 	/* Validate arguments against class requirements. */
360 	if ((runq && runq >= runl->func->runqs) ||
361 	    (!func->inst->vmm != !vmm) ||
362 	    ((func->userd->bar < 0) == !userd) ||
363 	    (!func->ramfc->ctxdma != !dmaobj) ||
364 	    ((func->ramfc->devm < devm) && devm != BIT(0)) ||
365 	    (!func->ramfc->priv && priv)) {
366 		RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
367 				 "push:%d:%p devm:%08x:%08x priv:%d:%d",
368 			   runl->func->runqs, runq, func->inst->vmm, vmm,
369 			   func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
370 			   func->ramfc->devm, devm, func->ramfc->priv, priv);
371 		return -EINVAL;
372 	}
373 
374 	if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL)))
375 		return -ENOMEM;
376 
377 	chan->func = func;
378 	strscpy(chan->name, name, sizeof(chan->name));
379 	chan->runq = runq;
380 	chan->id = -1;
381 	spin_lock_init(&chan->lock);
382 	atomic_set(&chan->blocked, 1);
383 	atomic_set(&chan->errored, 0);
384 	INIT_LIST_HEAD(&chan->cctxs);
385 	INIT_LIST_HEAD(&chan->head);
386 
387 	/* Join channel group.
388 	 *
389 	 * GK110 and newer support channel groups (aka TSGs), where individual channels
390 	 * share a timeslice, and, engine context(s).
391 	 *
392 	 * As such, engine contexts are tracked in nvkm_cgrp and we need them even when
393 	 * channels aren't in an API channel group, and on HW that doesn't support TSGs.
394 	 */
395 	if (!cgrp) {
396 		ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
397 		if (ret) {
398 			RUNL_DEBUG(runl, "cgrp %d", ret);
399 			return ret;
400 		}
401 
402 		cgrp = chan->cgrp;
403 	} else {
404 		if (cgrp->runl != runl || cgrp->vmm != vmm) {
405 			RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
406 			return -EINVAL;
407 		}
408 
409 		chan->cgrp = nvkm_cgrp_ref(cgrp);
410 	}
411 
412 	/* Allocate instance block. */
413 	ret = nvkm_gpuobj_new(device, func->inst->size, 0x1000, func->inst->zero, NULL,
414 			      &chan->inst);
415 	if (ret) {
416 		RUNL_DEBUG(runl, "inst %d", ret);
417 		return ret;
418 	}
419 
420 	/* Initialise virtual address-space. */
421 	if (func->inst->vmm) {
422 		if (WARN_ON(vmm->mmu != device->mmu))
423 			return -EINVAL;
424 
425 		ret = nvkm_vmm_join(vmm, chan->inst->memory);
426 		if (ret) {
427 			RUNL_DEBUG(runl, "vmm %d", ret);
428 			return ret;
429 		}
430 
431 		chan->vmm = nvkm_vmm_ref(vmm);
432 	}
433 
434 	/* Allocate HW ctxdma for push buffer. */
435 	if (func->ramfc->ctxdma) {
436 		ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
437 		if (ret) {
438 			RUNL_DEBUG(runl, "bind %d", ret);
439 			return ret;
440 		}
441 	}
442 
443 	/* Allocate channel ID. */
444 	if (!chan->func->id_get) {
445 		chan->id = nvkm_chid_get(runl->chid, chan);
446 		if (chan->id >= 0) {
447 			if (func->userd->bar < 0) {
448 				if (ouserd + chan->func->userd->size >=
449 					nvkm_memory_size(userd)) {
450 					RUNL_DEBUG(runl, "ouserd %llx", ouserd);
451 					return -EINVAL;
452 				}
453 
454 				ret = nvkm_memory_kmap(userd, &chan->userd.mem);
455 				if (ret) {
456 					RUNL_DEBUG(runl, "userd %d", ret);
457 					return ret;
458 				}
459 
460 				chan->userd.base = ouserd;
461 			} else {
462 				chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
463 				chan->userd.base = chan->id * chan->func->userd->size;
464 			}
465 		}
466 	} else {
467 		chan->id = chan->func->id_get(chan, userd, ouserd);
468 	}
469 
470 	if (chan->id < 0) {
471 		RUNL_ERROR(runl, "!chids");
472 		return -ENOSPC;
473 	}
474 
475 	if (cgrp->id < 0)
476 		cgrp->id = chan->id;
477 
478 	/* Initialise USERD. */
479 	if (chan->func->userd->clear)
480 		chan->func->userd->clear(chan);
481 
482 	/* Initialise RAMFC. */
483 	ret = chan->func->ramfc->write(chan, offset, length, devm, priv);
484 	if (ret) {
485 		RUNL_DEBUG(runl, "ramfc %d", ret);
486 		return ret;
487 	}
488 
489 	return 0;
490 }
491