Lines Matching full:fifo

44 	nvkm_runl_foreach(runl, engine->subdev.device->fifo) {
55 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
57 return fifo->func->pause(fifo, flags);
61 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
63 return fifo->func->start(fifo, flags);
67 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
69 return fifo->func->mmu_fault->recover(fifo, info);
76 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
78 if (oclass->engn == &fifo->func->cgrp.user)
79 return nvkm_ucgrp_new(fifo, oclass, argv, argc, pobject);
81 if (oclass->engn == &fifo->func->chan.user)
82 return nvkm_uchan_new(fifo, NULL, oclass, argv, argc, pobject);
96 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
97 const struct nvkm_fifo_func_cgrp *cgrp = &fifo->func->cgrp;
98 const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
105 oclass->engn = &fifo->func->cgrp.user;
115 oclass->engn = &fifo->func->chan.user;
127 struct nvkm_fifo *fifo = nvkm_fifo(engine);
130 nvkm_inth_block(&fifo->engine.subdev.inth);
132 nvkm_runl_foreach(runl, fifo)
141 struct nvkm_fifo *fifo = nvkm_fifo(engine);
146 if (fifo->func->init_pbdmas) {
147 nvkm_runq_foreach(runq, fifo)
150 fifo->func->init_pbdmas(fifo, mask);
152 nvkm_runq_foreach(runq, fifo)
156 nvkm_runl_foreach(runl, fifo) {
161 if (fifo->func->init)
162 fifo->func->init(fifo);
164 nvkm_inth_allow(&fifo->engine.subdev.inth);
171 struct nvkm_fifo *fifo = nvkm_fifo(engine);
176 ret = nvkm_subdev_oneinit(&fifo->engine.subdev);
181 case NV_DEVICE_HOST_CHANNELS: *data = fifo->chid ? fifo->chid->nr : 0; return 0;
184 nvkm_runl_foreach(runl, fifo)
188 runl = nvkm_runl_get(fifo, *data, 0);
225 if (!fifo->chid) {
226 runl = nvkm_runl_get(fifo, *data, 0);
245 struct nvkm_fifo *fifo = nvkm_fifo(engine);
251 if (fifo->func->chid_nr) {
252 ret = fifo->func->chid_ctor(fifo, fifo->func->chid_nr(fifo));
258 if (fifo->func->runq_nr) {
259 for (nr = fifo->func->runq_nr(fifo), i = 0; i < nr; i++) {
260 if (!nvkm_runq_new(fifo, i))
266 ret = fifo->func->runl_ctor(fifo);
270 nvkm_runl_foreach(runl, fifo) {
278 if (fifo->func->intr) {
280 subdev, fifo->func->intr, &subdev->inth);
288 if (fifo->func->nonstall) {
289 if (fifo->func->nonstall_ctor) {
290 ret = fifo->func->nonstall_ctor(fifo);
299 ret = nvkm_event_init(fifo->func->nonstall, &fifo->engine.subdev, 1, ret,
300 &fifo->nonstall.event);
306 if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) {
309 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
310 fifo->func->chan.func->userd->size, 0, true,
311 &fifo->userd.mem);
315 ret = nvkm_vmm_get(bar1, 12, nvkm_memory_size(fifo->userd.mem), &fifo->userd.bar1);
319 ret = nvkm_memory_map(fifo->userd.mem, 0, bar1, fifo->userd.bar1, NULL, 0);
336 struct nvkm_fifo *fifo = nvkm_fifo(engine);
340 if (fifo->userd.bar1)
341 nvkm_vmm_put(nvkm_bar_bar1_vmm(engine->subdev.device), &fifo->userd.bar1);
342 nvkm_memory_unref(&fifo->userd.mem);
344 list_for_each_entry_safe(runl, runt, &fifo->runls, head)
346 list_for_each_entry_safe(runq, rtmp, &fifo->runqs, head)
349 nvkm_chid_unref(&fifo->cgid);
350 nvkm_chid_unref(&fifo->chid);
352 nvkm_event_fini(&fifo->nonstall.event);
353 mutex_destroy(&fifo->mutex);
355 if (fifo->func->dtor)
356 fifo->func->dtor(fifo);
358 return fifo;
376 struct nvkm_fifo *fifo;
378 if (!(fifo = *pfifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
381 fifo->func = func;
382 INIT_LIST_HEAD(&fifo->runqs);
383 INIT_LIST_HEAD(&fifo->runls);
388 fifo->timeout.chan_msec = 10000;
389 spin_lock_init(&fifo->lock);
390 mutex_init(&fifo->mutex);
392 return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);