xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c (revision 2fc71a0566f63ac3cd43d7cf2d5efbbab6293c5f)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "chan.h"
25 #include "chid.h"
26 #include "cgrp.h"
27 #include "runl.h"
28 #include "priv.h"
29 
30 #include <core/client.h>
31 #include <core/gpuobj.h>
32 #include <core/oproxy.h>
33 #include <subdev/mmu.h>
34 #include <engine/dma.h>
35 
36 #include <nvif/if0020.h>
37 
38 const struct nvkm_event_func
39 nvkm_chan_event = {
40 };
41 
42 struct nvkm_fifo_chan_object {
43 	struct nvkm_oproxy oproxy;
44 	struct nvkm_fifo_chan *chan;
45 	int hash;
46 };
47 
48 static struct nvkm_fifo_engn *
49 nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
50 {
51 	int engi = chan->fifo->func->engine_id(chan->fifo, engine);
52 	if (engi >= 0)
53 		return &chan->engn[engi];
54 	return NULL;
55 }
56 
57 static int
58 nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
59 {
60 	struct nvkm_fifo_chan_object *object =
61 		container_of(base, typeof(*object), oproxy);
62 	struct nvkm_engine *engine  = object->oproxy.object->engine;
63 	struct nvkm_fifo_chan *chan = object->chan;
64 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
65 	const char *name = engine->subdev.name;
66 	int ret = 0;
67 
68 	if (--engn->usecount)
69 		return 0;
70 
71 	if (chan->func->engine_fini) {
72 		ret = chan->func->engine_fini(chan, engine, suspend);
73 		if (ret) {
74 			nvif_error(&chan->object,
75 				   "detach %s failed, %d\n", name, ret);
76 			return ret;
77 		}
78 	}
79 
80 	if (engn->object) {
81 		ret = nvkm_object_fini(engn->object, suspend);
82 		if (ret && suspend)
83 			return ret;
84 	}
85 
86 	nvif_trace(&chan->object, "detached %s\n", name);
87 	return ret;
88 }
89 
90 static int
91 nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
92 {
93 	struct nvkm_fifo_chan_object *object =
94 		container_of(base, typeof(*object), oproxy);
95 	struct nvkm_engine *engine  = object->oproxy.object->engine;
96 	struct nvkm_fifo_chan *chan = object->chan;
97 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
98 	const char *name = engine->subdev.name;
99 	int ret;
100 
101 	if (engn->usecount++)
102 		return 0;
103 
104 	if (engn->object) {
105 		ret = nvkm_object_init(engn->object);
106 		if (ret)
107 			return ret;
108 	}
109 
110 	if (chan->func->engine_init) {
111 		ret = chan->func->engine_init(chan, engine);
112 		if (ret) {
113 			nvif_error(&chan->object,
114 				   "attach %s failed, %d\n", name, ret);
115 			return ret;
116 		}
117 	}
118 
119 	nvif_trace(&chan->object, "attached %s\n", name);
120 	return 0;
121 }
122 
123 static void
124 nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
125 {
126 	struct nvkm_fifo_chan_object *object =
127 		container_of(base, typeof(*object), oproxy);
128 	struct nvkm_engine *engine  = object->oproxy.base.engine;
129 	struct nvkm_fifo_chan *chan = object->chan;
130 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
131 
132 	if (chan->func->object_dtor)
133 		chan->func->object_dtor(chan, object->hash);
134 
135 	if (!--engn->refcount) {
136 		if (chan->func->engine_dtor)
137 			chan->func->engine_dtor(chan, engine);
138 		nvkm_object_del(&engn->object);
139 		if (chan->vmm)
140 			atomic_dec(&chan->vmm->engref[engine->subdev.type]);
141 	}
142 }
143 
144 static const struct nvkm_oproxy_func
145 nvkm_fifo_chan_child_func = {
146 	.dtor[0] = nvkm_fifo_chan_child_del,
147 	.init[0] = nvkm_fifo_chan_child_init,
148 	.fini[0] = nvkm_fifo_chan_child_fini,
149 };
150 
151 int
152 nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
153 			 struct nvkm_object **pobject)
154 {
155 	struct nvkm_engine *engine = oclass->engine;
156 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
157 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
158 	struct nvkm_fifo_chan_object *object;
159 	int ret = 0;
160 
161 	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
162 		return -ENOMEM;
163 	nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
164 	object->chan = chan;
165 	*pobject = &object->oproxy.base;
166 
167 	if (!engn->refcount++) {
168 		struct nvkm_oclass cclass = {
169 			.client = oclass->client,
170 			.engine = oclass->engine,
171 		};
172 
173 		if (chan->vmm)
174 			atomic_inc(&chan->vmm->engref[engine->subdev.type]);
175 
176 		if (engine->func->fifo.cclass) {
177 			ret = engine->func->fifo.cclass(chan, &cclass,
178 							&engn->object);
179 		} else
180 		if (engine->func->cclass) {
181 			ret = nvkm_object_new_(engine->func->cclass, &cclass,
182 					       NULL, 0, &engn->object);
183 		}
184 		if (ret)
185 			return ret;
186 
187 		if (chan->func->engine_ctor) {
188 			ret = chan->func->engine_ctor(chan, oclass->engine,
189 						      engn->object);
190 			if (ret)
191 				return ret;
192 		}
193 	}
194 
195 	ret = oclass->base.ctor(&(const struct nvkm_oclass) {
196 					.base = oclass->base,
197 					.engn = oclass->engn,
198 					.handle = oclass->handle,
199 					.object = oclass->object,
200 					.client = oclass->client,
201 					.parent = engn->object ?
202 						  engn->object :
203 						  oclass->parent,
204 					.engine = engine,
205 				}, data, size, &object->oproxy.object);
206 	if (ret)
207 		return ret;
208 
209 	if (chan->func->object_ctor) {
210 		object->hash =
211 			chan->func->object_ctor(chan, object->oproxy.object);
212 		if (object->hash < 0)
213 			return object->hash;
214 	}
215 
216 	return 0;
217 }
218 
219 static int
220 nvkm_fifo_chan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
221 {
222 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
223 	union nvif_chan_event_args *args = argv;
224 
225 	switch (args->v0.type) {
226 	case NVIF_CHAN_EVENT_V0_NON_STALL_INTR:
227 		return nvkm_uevent_add(uevent, &chan->fifo->uevent, 0,
228 				       NVKM_FIFO_EVENT_NON_STALL_INTR, NULL);
229 	case NVIF_CHAN_EVENT_V0_KILLED:
230 		return nvkm_uevent_add(uevent, &chan->fifo->kevent, chan->chid,
231 				       NVKM_FIFO_EVENT_KILLED, NULL);
232 	default:
233 		break;
234 	}
235 
236 	return -ENOSYS;
237 }
238 
239 static int
240 nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
241 		   enum nvkm_object_map *type, u64 *addr, u64 *size)
242 {
243 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
244 	*type = NVKM_OBJECT_MAP_IO;
245 	*addr = chan->addr;
246 	*size = chan->size;
247 	return 0;
248 }
249 
250 static int
251 nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
252 {
253 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
254 	chan->func->fini(chan);
255 	return 0;
256 }
257 
258 static int
259 nvkm_fifo_chan_init(struct nvkm_object *object)
260 {
261 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
262 	chan->func->init(chan);
263 	return 0;
264 }
265 
266 void
267 nvkm_chan_del(struct nvkm_chan **pchan)
268 {
269 	struct nvkm_chan *chan = *pchan;
270 
271 	if (!chan)
272 		return;
273 
274 	if (chan->cgrp) {
275 		nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
276 		nvkm_cgrp_unref(&chan->cgrp);
277 	}
278 
279 	chan = nvkm_object_dtor(&chan->object);
280 	kfree(chan);
281 }
282 
283 static void *
284 nvkm_fifo_chan_dtor(struct nvkm_object *object)
285 {
286 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
287 	struct nvkm_fifo *fifo = chan->fifo;
288 	void *data = chan->func->dtor(chan);
289 	unsigned long flags;
290 
291 	spin_lock_irqsave(&fifo->lock, flags);
292 	if (!list_empty(&chan->head)) {
293 		list_del(&chan->head);
294 	}
295 	spin_unlock_irqrestore(&fifo->lock, flags);
296 
297 	if (chan->vmm) {
298 		nvkm_vmm_part(chan->vmm, chan->inst->memory);
299 		nvkm_vmm_unref(&chan->vmm);
300 	}
301 
302 	nvkm_gpuobj_del(&chan->push);
303 	nvkm_gpuobj_del(&chan->inst);
304 	kfree(chan->func);
305 	return data;
306 }
307 
308 static const struct nvkm_object_func
309 nvkm_fifo_chan_func = {
310 	.dtor = nvkm_fifo_chan_dtor,
311 	.init = nvkm_fifo_chan_init,
312 	.fini = nvkm_fifo_chan_fini,
313 	.map = nvkm_fifo_chan_map,
314 	.uevent = nvkm_fifo_chan_uevent,
315 };
316 
317 int
318 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
319 		    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
320 		    u64 hvmm, u64 push, u32 engm, int bar, u32 base,
321 		    u32 user, const struct nvkm_oclass *oclass,
322 		    struct nvkm_fifo_chan *chan)
323 {
324 	struct nvkm_chan_func *func;
325 	struct nvkm_client *client = oclass->client;
326 	struct nvkm_device *device = fifo->engine.subdev.device;
327 	struct nvkm_dmaobj *dmaobj;
328 	struct nvkm_cgrp *cgrp = NULL;
329 	struct nvkm_runl *runl;
330 	struct nvkm_engn *engn = NULL;
331 	struct nvkm_vmm *vmm = NULL;
332 	unsigned long flags;
333 	int ret;
334 
335 	nvkm_runl_foreach(runl, fifo) {
336 		engn = nvkm_runl_find_engn(engn, runl, engm & BIT(engn->id));
337 		if (engn)
338 			break;
339 	}
340 
341 	if (!engn)
342 		return -EINVAL;
343 
344 	/*FIXME: temp kludge to ease transition, remove later */
345 	if (!(func = kmalloc(sizeof(*func), GFP_KERNEL)))
346 		return -ENOMEM;
347 
348 	*func = *fifo->func->chan.func;
349 	func->dtor = fn->dtor;
350 	func->init = fn->init;
351 	func->fini = fn->fini;
352 	func->engine_ctor = fn->engine_ctor;
353 	func->engine_dtor = fn->engine_dtor;
354 	func->engine_init = fn->engine_init;
355 	func->engine_fini = fn->engine_fini;
356 	func->object_ctor = fn->object_ctor;
357 	func->object_dtor = fn->object_dtor;
358 	func->submit_token = fn->submit_token;
359 
360 	chan->func = func;
361 	chan->id = -1;
362 
363 	nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
364 	chan->fifo = fifo;
365 	INIT_LIST_HEAD(&chan->head);
366 
367 	/* Join channel group.
368 	 *
369 	 * GK110 and newer support channel groups (aka TSGs), where individual channels
370 	 * share a timeslice, and, engine context(s).
371 	 *
372 	 * As such, engine contexts are tracked in nvkm_cgrp and we need them even when
373 	 * channels aren't in an API channel group, and on HW that doesn't support TSGs.
374 	 */
375 	if (!cgrp) {
376 		ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
377 		if (ret) {
378 			RUNL_DEBUG(runl, "cgrp %d", ret);
379 			return ret;
380 		}
381 
382 		cgrp = chan->cgrp;
383 	} else {
384 		if (cgrp->runl != runl || cgrp->vmm != vmm) {
385 			RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
386 			return -EINVAL;
387 		}
388 
389 		chan->cgrp = nvkm_cgrp_ref(cgrp);
390 	}
391 
392 	/* instance memory */
393 	ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
394 	if (ret)
395 		return ret;
396 
397 	/* allocate push buffer ctxdma instance */
398 	if (push) {
399 		dmaobj = nvkm_dmaobj_search(client, push);
400 		if (IS_ERR(dmaobj))
401 			return PTR_ERR(dmaobj);
402 
403 		ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
404 				       &chan->push);
405 		if (ret)
406 			return ret;
407 	}
408 
409 	/* channel address space */
410 	if (hvmm) {
411 		struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
412 		if (IS_ERR(vmm))
413 			return PTR_ERR(vmm);
414 
415 		if (vmm->mmu != device->mmu)
416 			return -EINVAL;
417 
418 		ret = nvkm_vmm_join(vmm, chan->inst->memory);
419 		if (ret)
420 			return ret;
421 
422 		chan->vmm = nvkm_vmm_ref(vmm);
423 	}
424 
425 	/* Allocate channel ID. */
426 	if (runl->cgid) {
427 		chan->id = chan->cgrp->id;
428 		runl->chid->data[chan->id] = chan;
429 		set_bit(chan->id, runl->chid->used);
430 		goto temp_hack_until_no_chid_eq_cgid_req;
431 	}
432 
433 	chan->id = nvkm_chid_get(runl->chid, chan);
434 	if (chan->id < 0) {
435 		RUNL_ERROR(runl, "!chids");
436 		return -ENOSPC;
437 	}
438 
439 temp_hack_until_no_chid_eq_cgid_req:
440 	spin_lock_irqsave(&fifo->lock, flags);
441 	list_add(&chan->head, &fifo->chan);
442 	spin_unlock_irqrestore(&fifo->lock, flags);
443 
444 	/* determine address of this channel's user registers */
445 	chan->addr = device->func->resource_addr(device, bar) +
446 		     base + user * chan->chid;
447 	chan->size = user;
448 	return 0;
449 }
450