1 /*
2 * Copyright 2021 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #define nvkm_uchan(p) container_of((p), struct nvkm_uchan, object)
23 #include "priv.h"
24 #include "cgrp.h"
25 #include "chan.h"
26 #include "chid.h"
27 #include "runl.h"
28
29 #include <core/gpuobj.h>
30 #include <core/oproxy.h>
31 #include <subdev/mmu.h>
32 #include <engine/dma.h>
33
34 #include <nvif/if0020.h>
35
36 struct nvkm_uchan {
37 struct nvkm_object object;
38 struct nvkm_chan *chan;
39 };
40
41 static int
nvkm_uchan_uevent(struct nvkm_object * object,void * argv,u32 argc,struct nvkm_uevent * uevent)42 nvkm_uchan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
43 {
44 struct nvkm_chan *chan = nvkm_uchan(object)->chan;
45 struct nvkm_runl *runl = chan->cgrp->runl;
46 union nvif_chan_event_args *args = argv;
47
48 if (!uevent)
49 return 0;
50 if (argc != sizeof(args->v0) || args->v0.version != 0)
51 return -ENOSYS;
52
53 switch (args->v0.type) {
54 case NVIF_CHAN_EVENT_V0_NON_STALL_INTR:
55 return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, runl->id,
56 NVKM_FIFO_NONSTALL_EVENT, NULL);
57 case NVIF_CHAN_EVENT_V0_KILLED:
58 return nvkm_uevent_add(uevent, &runl->chid->event, chan->id,
59 NVKM_CHAN_EVENT_ERRORED, NULL);
60 default:
61 break;
62 }
63
64 return -ENOSYS;
65 }
66
67 struct nvkm_uobj {
68 struct nvkm_oproxy oproxy;
69 struct nvkm_chan *chan;
70 struct nvkm_cctx *cctx;
71 int hash;
72 };
73
74 static int
nvkm_uchan_object_fini_1(struct nvkm_oproxy * oproxy,bool suspend)75 nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend)
76 {
77 struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
78 struct nvkm_chan *chan = uobj->chan;
79 struct nvkm_cctx *cctx = uobj->cctx;
80 struct nvkm_ectx *ectx = cctx->vctx->ectx;
81
82 if (!ectx->object)
83 return 0;
84
85 /* Unbind engine context from channel, if no longer required. */
86 if (refcount_dec_and_mutex_lock(&cctx->uses, &chan->cgrp->mutex)) {
87 nvkm_chan_cctx_bind(chan, ectx->engn, NULL);
88
89 if (refcount_dec_and_test(&ectx->uses))
90 nvkm_object_fini(ectx->object, false);
91 mutex_unlock(&chan->cgrp->mutex);
92 }
93
94 return 0;
95 }
96
97 static int
nvkm_uchan_object_init_0(struct nvkm_oproxy * oproxy)98 nvkm_uchan_object_init_0(struct nvkm_oproxy *oproxy)
99 {
100 struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
101 struct nvkm_chan *chan = uobj->chan;
102 struct nvkm_cctx *cctx = uobj->cctx;
103 struct nvkm_ectx *ectx = cctx->vctx->ectx;
104 int ret = 0;
105
106 if (!ectx->object)
107 return 0;
108
109 /* Bind engine context to channel, if it hasn't been already. */
110 if (!refcount_inc_not_zero(&cctx->uses)) {
111 mutex_lock(&chan->cgrp->mutex);
112 if (!refcount_inc_not_zero(&cctx->uses)) {
113 if (!refcount_inc_not_zero(&ectx->uses)) {
114 ret = nvkm_object_init(ectx->object);
115 if (ret == 0)
116 refcount_set(&ectx->uses, 1);
117 }
118
119 if (ret == 0) {
120 nvkm_chan_cctx_bind(chan, ectx->engn, cctx);
121 refcount_set(&cctx->uses, 1);
122 }
123 }
124 mutex_unlock(&chan->cgrp->mutex);
125 }
126
127 return ret;
128 }
129
130 static void
nvkm_uchan_object_dtor(struct nvkm_oproxy * oproxy)131 nvkm_uchan_object_dtor(struct nvkm_oproxy *oproxy)
132 {
133 struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
134 struct nvkm_engn *engn;
135
136 if (!uobj->cctx)
137 return;
138
139 engn = uobj->cctx->vctx->ectx->engn;
140 if (engn->func->ramht_del)
141 engn->func->ramht_del(uobj->chan, uobj->hash);
142
143 nvkm_chan_cctx_put(uobj->chan, &uobj->cctx);
144 }
145
146 static const struct nvkm_oproxy_func
147 nvkm_uchan_object = {
148 .dtor[1] = nvkm_uchan_object_dtor,
149 .init[0] = nvkm_uchan_object_init_0,
150 .fini[1] = nvkm_uchan_object_fini_1,
151 };
152
153 static int
nvkm_uchan_object_new(const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)154 nvkm_uchan_object_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
155 struct nvkm_object **pobject)
156 {
157 struct nvkm_chan *chan = nvkm_uchan(oclass->parent)->chan;
158 struct nvkm_cgrp *cgrp = chan->cgrp;
159 struct nvkm_engn *engn;
160 struct nvkm_uobj *uobj;
161 int ret;
162
163 /* Lookup host engine state for target engine. */
164 engn = nvkm_runl_find_engn(engn, cgrp->runl, engn->engine == oclass->engine);
165 if (WARN_ON(!engn))
166 return -EINVAL;
167
168 /* Allocate SW object. */
169 if (!(uobj = kzalloc(sizeof(*uobj), GFP_KERNEL)))
170 return -ENOMEM;
171
172 nvkm_oproxy_ctor(&nvkm_uchan_object, oclass, &uobj->oproxy);
173 uobj->chan = chan;
174 *pobject = &uobj->oproxy.base;
175
176 /* Ref. channel context for target engine.*/
177 ret = nvkm_chan_cctx_get(chan, engn, &uobj->cctx, oclass->client);
178 if (ret)
179 return ret;
180
181 /* Allocate HW object. */
182 ret = oclass->base.ctor(&(const struct nvkm_oclass) {
183 .base = oclass->base,
184 .engn = oclass->engn,
185 .handle = oclass->handle,
186 .object = oclass->object,
187 .client = oclass->client,
188 .parent = uobj->cctx->vctx->ectx->object ?: oclass->parent,
189 .engine = engn->engine,
190 }, argv, argc, &uobj->oproxy.object);
191 if (ret)
192 return ret;
193
194 if (engn->func->ramht_add) {
195 uobj->hash = engn->func->ramht_add(engn, uobj->oproxy.object, uobj->chan);
196 if (uobj->hash < 0)
197 return uobj->hash;
198 }
199
200 return 0;
201 }
202
203 static int
nvkm_uchan_sclass(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)204 nvkm_uchan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
205 {
206 struct nvkm_chan *chan = nvkm_uchan(object)->chan;
207 struct nvkm_engn *engn;
208 int ret, runq = 0;
209
210 nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
211 struct nvkm_engine *engine = engn->engine;
212 int c = 0;
213
214 /* Each runqueue, on runlists with multiple, has its own LCE. */
215 if (engn->runl->func->runqs) {
216 if (engine->subdev.type == NVKM_ENGINE_CE) {
217 if (chan->runq != runq++)
218 continue;
219 }
220 }
221
222 oclass->engine = engine;
223 oclass->base.oclass = 0;
224
225 if (engine->func->fifo.sclass) {
226 ret = engine->func->fifo.sclass(oclass, index);
227 if (oclass->base.oclass) {
228 if (!oclass->base.ctor)
229 oclass->base.ctor = nvkm_object_new;
230 oclass->ctor = nvkm_uchan_object_new;
231 return 0;
232 }
233
234 index -= ret;
235 continue;
236 }
237
238 while (engine->func->sclass[c].oclass) {
239 if (c++ == index) {
240 oclass->base = engine->func->sclass[index];
241 if (!oclass->base.ctor)
242 oclass->base.ctor = nvkm_object_new;
243 oclass->ctor = nvkm_uchan_object_new;
244 return 0;
245 }
246 }
247
248 index -= c;
249 }
250
251 return -EINVAL;
252 }
253
254 static int
nvkm_uchan_map(struct nvkm_object * object,void * argv,u32 argc,enum nvkm_object_map * type,u64 * addr,u64 * size)255 nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
256 enum nvkm_object_map *type, u64 *addr, u64 *size)
257 {
258 struct nvkm_chan *chan = nvkm_uchan(object)->chan;
259 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
260
261 if (chan->func->userd->bar < 0)
262 return -ENOSYS;
263
264 *type = NVKM_OBJECT_MAP_IO;
265 *addr = device->func->resource_addr(device, chan->func->userd->bar) +
266 chan->func->userd->base + chan->userd.base;
267 *size = chan->func->userd->size;
268 return 0;
269 }
270
271 static int
nvkm_uchan_fini(struct nvkm_object * object,bool suspend)272 nvkm_uchan_fini(struct nvkm_object *object, bool suspend)
273 {
274 struct nvkm_chan *chan = nvkm_uchan(object)->chan;
275
276 nvkm_chan_block(chan);
277 nvkm_chan_remove(chan, true);
278
279 if (chan->func->unbind)
280 chan->func->unbind(chan);
281
282 return 0;
283 }
284
285 static int
nvkm_uchan_init(struct nvkm_object * object)286 nvkm_uchan_init(struct nvkm_object *object)
287 {
288 struct nvkm_chan *chan = nvkm_uchan(object)->chan;
289
290 if (atomic_read(&chan->errored))
291 return 0;
292
293 if (chan->func->bind)
294 chan->func->bind(chan);
295
296 nvkm_chan_allow(chan);
297 nvkm_chan_insert(chan);
298 return 0;
299 }
300
301 static void *
nvkm_uchan_dtor(struct nvkm_object * object)302 nvkm_uchan_dtor(struct nvkm_object *object)
303 {
304 struct nvkm_uchan *uchan = nvkm_uchan(object);
305
306 nvkm_chan_del(&uchan->chan);
307 return uchan;
308 }
309
310 static const struct nvkm_object_func
311 nvkm_uchan = {
312 .dtor = nvkm_uchan_dtor,
313 .init = nvkm_uchan_init,
314 .fini = nvkm_uchan_fini,
315 .map = nvkm_uchan_map,
316 .sclass = nvkm_uchan_sclass,
317 .uevent = nvkm_uchan_uevent,
318 };
319
320 struct nvkm_chan *
nvkm_uchan_chan(struct nvkm_object * object)321 nvkm_uchan_chan(struct nvkm_object *object)
322 {
323 if (WARN_ON(object->func != &nvkm_uchan))
324 return NULL;
325
326 return nvkm_uchan(object)->chan;
327 }
328
329 int
nvkm_uchan_new(struct nvkm_fifo * fifo,struct nvkm_cgrp * cgrp,const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)330 nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass,
331 void *argv, u32 argc, struct nvkm_object **pobject)
332 {
333 union nvif_chan_args *args = argv;
334 struct nvkm_runl *runl;
335 struct nvkm_vmm *vmm = NULL;
336 struct nvkm_dmaobj *ctxdma = NULL;
337 struct nvkm_memory *userd = NULL;
338 struct nvkm_uchan *uchan;
339 struct nvkm_chan *chan;
340 int ret;
341
342 if (argc < sizeof(args->v0) || args->v0.version != 0)
343 return -ENOSYS;
344 argc -= sizeof(args->v0);
345
346 if (args->v0.namelen != argc)
347 return -EINVAL;
348
349 /* Lookup objects referenced in args. */
350 runl = nvkm_runl_get(fifo, args->v0.runlist, 0);
351 if (!runl)
352 return -EINVAL;
353
354 if (args->v0.vmm) {
355 vmm = nvkm_uvmm_search(oclass->client, args->v0.vmm);
356 if (IS_ERR(vmm))
357 return PTR_ERR(vmm);
358 }
359
360 if (args->v0.ctxdma) {
361 ctxdma = nvkm_dmaobj_search(oclass->client, args->v0.ctxdma);
362 if (IS_ERR(ctxdma)) {
363 ret = PTR_ERR(ctxdma);
364 goto done;
365 }
366 }
367
368 if (args->v0.huserd) {
369 userd = nvkm_umem_search(oclass->client, args->v0.huserd);
370 if (IS_ERR(userd)) {
371 ret = PTR_ERR(userd);
372 userd = NULL;
373 goto done;
374 }
375 }
376
377 /* Allocate channel. */
378 if (!(uchan = kzalloc(sizeof(*uchan), GFP_KERNEL))) {
379 ret = -ENOMEM;
380 goto done;
381 }
382
383 nvkm_object_ctor(&nvkm_uchan, oclass, &uchan->object);
384 *pobject = &uchan->object;
385
386 ret = nvkm_chan_new_(fifo->func->chan.func, runl, args->v0.runq, cgrp, args->v0.name,
387 args->v0.priv != 0, args->v0.devm, vmm, ctxdma, args->v0.offset,
388 args->v0.length, userd, args->v0.ouserd, &uchan->chan);
389 if (ret)
390 goto done;
391
392 chan = uchan->chan;
393
394 /* Return channel info to caller. */
395 if (chan->func->doorbell_handle)
396 args->v0.token = chan->func->doorbell_handle(chan);
397 else
398 args->v0.token = ~0;
399
400 args->v0.chid = chan->id;
401
402 switch (nvkm_memory_target(chan->inst->memory)) {
403 case NVKM_MEM_TARGET_INST: args->v0.aper = NVIF_CHAN_V0_INST_APER_INST; break;
404 case NVKM_MEM_TARGET_VRAM: args->v0.aper = NVIF_CHAN_V0_INST_APER_VRAM; break;
405 case NVKM_MEM_TARGET_HOST: args->v0.aper = NVIF_CHAN_V0_INST_APER_HOST; break;
406 case NVKM_MEM_TARGET_NCOH: args->v0.aper = NVIF_CHAN_V0_INST_APER_NCOH; break;
407 default:
408 WARN_ON(1);
409 ret = -EFAULT;
410 break;
411 }
412
413 args->v0.inst = nvkm_memory_addr(chan->inst->memory);
414 done:
415 nvkm_memory_unref(&userd);
416 nvkm_vmm_unref(&vmm);
417 return ret;
418 }
419