xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c (revision 3a8c3400f3e74638bedd0d2410416aa8b794c0fd)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <engine/fifo.h>
25 
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
34 
35 #include <nvif/class.h>
36 #include <nvif/ioctl.h>
37 #include <nvif/unpack.h>
38 
39 struct gf100_fifo {
40 	struct nvkm_fifo base;
41 
42 	struct work_struct fault;
43 	u64 mask;
44 
45 	struct {
46 		struct nvkm_gpuobj *mem[2];
47 		int active;
48 		wait_queue_head_t wait;
49 	} runlist;
50 
51 	struct {
52 		struct nvkm_gpuobj *mem;
53 		struct nvkm_vma bar;
54 	} user;
55 	int spoon_nr;
56 };
57 
58 struct gf100_fifo_base {
59 	struct nvkm_fifo_base base;
60 	struct nvkm_gpuobj *pgd;
61 	struct nvkm_vm *vm;
62 };
63 
64 struct gf100_fifo_chan {
65 	struct nvkm_fifo_chan base;
66 	enum {
67 		STOPPED,
68 		RUNNING,
69 		KILLED
70 	} state;
71 };
72 
73 /*******************************************************************************
74  * FIFO channel objects
75  ******************************************************************************/
76 
77 static void
78 gf100_fifo_runlist_update(struct gf100_fifo *fifo)
79 {
80 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
81 	struct nvkm_device *device = subdev->device;
82 	struct nvkm_bar *bar = device->bar;
83 	struct nvkm_gpuobj *cur;
84 	int i, p;
85 
86 	mutex_lock(&nv_subdev(fifo)->mutex);
87 	cur = fifo->runlist.mem[fifo->runlist.active];
88 	fifo->runlist.active = !fifo->runlist.active;
89 
90 	nvkm_kmap(cur);
91 	for (i = 0, p = 0; i < 128; i++) {
92 		struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i];
93 		if (chan && chan->state == RUNNING) {
94 			nvkm_wo32(cur, p + 0, i);
95 			nvkm_wo32(cur, p + 4, 0x00000004);
96 			p += 8;
97 		}
98 	}
99 	bar->flush(bar);
100 	nvkm_done(cur);
101 
102 	nvkm_wr32(device, 0x002270, cur->addr >> 12);
103 	nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
104 
105 	if (wait_event_timeout(fifo->runlist.wait,
106 			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
107 			       msecs_to_jiffies(2000)) == 0)
108 		nvkm_error(subdev, "runlist update timeout\n");
109 	mutex_unlock(&nv_subdev(fifo)->mutex);
110 }
111 
112 static int
113 gf100_fifo_context_attach(struct nvkm_object *parent,
114 			  struct nvkm_object *object)
115 {
116 	struct nvkm_bar *bar = nvkm_bar(parent);
117 	struct gf100_fifo_base *base = (void *)parent->parent;
118 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
119 	struct nvkm_engctx *ectx = (void *)object;
120 	u32 addr;
121 	int ret;
122 
123 	switch (nv_engidx(object->engine)) {
124 	case NVDEV_ENGINE_SW    : return 0;
125 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
126 	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
127 	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
128 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
129 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
130 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
131 	default:
132 		return -EINVAL;
133 	}
134 
135 	if (!ectx->vma.node) {
136 		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
137 					 NV_MEM_ACCESS_RW, &ectx->vma);
138 		if (ret)
139 			return ret;
140 
141 		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
142 	}
143 
144 	nvkm_kmap(engn);
145 	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
146 	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
147 	bar->flush(bar);
148 	nvkm_done(engn);
149 	return 0;
150 }
151 
152 static int
153 gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
154 			  struct nvkm_object *object)
155 {
156 	struct gf100_fifo *fifo = (void *)parent->engine;
157 	struct gf100_fifo_base *base = (void *)parent->parent;
158 	struct gf100_fifo_chan *chan = (void *)parent;
159 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
160 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
161 	struct nvkm_device *device = subdev->device;
162 	struct nvkm_bar *bar = device->bar;
163 	u32 addr;
164 
165 	switch (nv_engidx(object->engine)) {
166 	case NVDEV_ENGINE_SW    : return 0;
167 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
168 	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
169 	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
170 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
171 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
172 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
173 	default:
174 		return -EINVAL;
175 	}
176 
177 	nvkm_wr32(device, 0x002634, chan->base.chid);
178 	if (nvkm_msec(device, 2000,
179 		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
180 			break;
181 	) < 0) {
182 		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
183 			   chan->base.chid, nvkm_client_name(chan));
184 		if (suspend)
185 			return -EBUSY;
186 	}
187 
188 	nvkm_kmap(engn);
189 	nvkm_wo32(engn, addr + 0x00, 0x00000000);
190 	nvkm_wo32(engn, addr + 0x04, 0x00000000);
191 	bar->flush(bar);
192 	nvkm_done(engn);
193 	return 0;
194 }
195 
196 static int
197 gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
198 		     struct nvkm_oclass *oclass, void *data, u32 size,
199 		     struct nvkm_object **pobject)
200 {
201 	union {
202 		struct fermi_channel_gpfifo_v0 v0;
203 	} *args = data;
204 	struct nvkm_bar *bar = nvkm_bar(parent);
205 	struct gf100_fifo *fifo = (void *)engine;
206 	struct gf100_fifo_base *base = (void *)parent;
207 	struct gf100_fifo_chan *chan;
208 	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
209 	u64 usermem, ioffset, ilength;
210 	int ret, i;
211 
212 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
213 	if (nvif_unpack(args->v0, 0, 0, false)) {
214 		nvif_ioctl(parent, "create channel gpfifo vers %d "
215 				   "ioffset %016llx ilength %08x\n",
216 			   args->v0.version, args->v0.ioffset,
217 			   args->v0.ilength);
218 		if (args->v0.vm)
219 			return -ENOENT;
220 	} else
221 		return ret;
222 
223 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
224 				       fifo->user.bar.offset, 0x1000, 0,
225 				       (1ULL << NVDEV_ENGINE_SW) |
226 				       (1ULL << NVDEV_ENGINE_GR) |
227 				       (1ULL << NVDEV_ENGINE_CE0) |
228 				       (1ULL << NVDEV_ENGINE_CE1) |
229 				       (1ULL << NVDEV_ENGINE_MSVLD) |
230 				       (1ULL << NVDEV_ENGINE_MSPDEC) |
231 				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
232 	*pobject = nv_object(chan);
233 	if (ret)
234 		return ret;
235 
236 	args->v0.chid = chan->base.chid;
237 
238 	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
239 	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
240 
241 	usermem = chan->base.chid * 0x1000;
242 	ioffset = args->v0.ioffset;
243 	ilength = order_base_2(args->v0.ilength / 8);
244 
245 	nvkm_kmap(fifo->user.mem);
246 	for (i = 0; i < 0x1000; i += 4)
247 		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
248 	nvkm_done(fifo->user.mem);
249 
250 	nvkm_kmap(ramfc);
251 	nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
252 	nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
253 	nvkm_wo32(ramfc, 0x10, 0x0000face);
254 	nvkm_wo32(ramfc, 0x30, 0xfffff902);
255 	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
256 	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
257 	nvkm_wo32(ramfc, 0x54, 0x00000002);
258 	nvkm_wo32(ramfc, 0x84, 0x20400000);
259 	nvkm_wo32(ramfc, 0x94, 0x30000001);
260 	nvkm_wo32(ramfc, 0x9c, 0x00000100);
261 	nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
262 	nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
263 	nvkm_wo32(ramfc, 0xac, 0x0000001f);
264 	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
265 	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
266 	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
267 	bar->flush(bar);
268 	nvkm_done(ramfc);
269 	return 0;
270 }
271 
272 static int
273 gf100_fifo_chan_init(struct nvkm_object *object)
274 {
275 	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
276 	struct gf100_fifo *fifo = (void *)object->engine;
277 	struct gf100_fifo_chan *chan = (void *)object;
278 	struct nvkm_device *device = fifo->base.engine.subdev.device;
279 	u32 chid = chan->base.chid;
280 	int ret;
281 
282 	ret = nvkm_fifo_channel_init(&chan->base);
283 	if (ret)
284 		return ret;
285 
286 	nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
287 
288 	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
289 		nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
290 		gf100_fifo_runlist_update(fifo);
291 	}
292 
293 	return 0;
294 }
295 
296 static void gf100_fifo_intr_engine(struct gf100_fifo *fifo);
297 
298 static int
299 gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
300 {
301 	struct gf100_fifo *fifo = (void *)object->engine;
302 	struct gf100_fifo_chan *chan = (void *)object;
303 	struct nvkm_device *device = fifo->base.engine.subdev.device;
304 	u32 chid = chan->base.chid;
305 
306 	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
307 		nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
308 		gf100_fifo_runlist_update(fifo);
309 	}
310 
311 	gf100_fifo_intr_engine(fifo);
312 
313 	nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
314 	return nvkm_fifo_channel_fini(&chan->base, suspend);
315 }
316 
317 static struct nvkm_ofuncs
318 gf100_fifo_ofuncs = {
319 	.ctor = gf100_fifo_chan_ctor,
320 	.dtor = _nvkm_fifo_channel_dtor,
321 	.init = gf100_fifo_chan_init,
322 	.fini = gf100_fifo_chan_fini,
323 	.map  = _nvkm_fifo_channel_map,
324 	.rd32 = _nvkm_fifo_channel_rd32,
325 	.wr32 = _nvkm_fifo_channel_wr32,
326 	.ntfy = _nvkm_fifo_channel_ntfy
327 };
328 
329 static struct nvkm_oclass
330 gf100_fifo_sclass[] = {
331 	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
332 	{}
333 };
334 
335 /*******************************************************************************
336  * FIFO context - instmem heap and vm setup
337  ******************************************************************************/
338 
339 static int
340 gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
341 			struct nvkm_oclass *oclass, void *data, u32 size,
342 			struct nvkm_object **pobject)
343 {
344 	struct gf100_fifo_base *base;
345 	int ret;
346 
347 	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
348 				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
349 				       NVOBJ_FLAG_HEAP, &base);
350 	*pobject = nv_object(base);
351 	if (ret)
352 		return ret;
353 
354 	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
355 			      &base->pgd);
356 	if (ret)
357 		return ret;
358 
359 	nvkm_kmap(&base->base.gpuobj);
360 	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
361 	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
362 	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
363 	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
364 	nvkm_done(&base->base.gpuobj);
365 
366 	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
367 	if (ret)
368 		return ret;
369 
370 	return 0;
371 }
372 
373 static void
374 gf100_fifo_context_dtor(struct nvkm_object *object)
375 {
376 	struct gf100_fifo_base *base = (void *)object;
377 	nvkm_vm_ref(NULL, &base->vm, base->pgd);
378 	nvkm_gpuobj_ref(NULL, &base->pgd);
379 	nvkm_fifo_context_destroy(&base->base);
380 }
381 
382 static struct nvkm_oclass
383 gf100_fifo_cclass = {
384 	.handle = NV_ENGCTX(FIFO, 0xc0),
385 	.ofuncs = &(struct nvkm_ofuncs) {
386 		.ctor = gf100_fifo_context_ctor,
387 		.dtor = gf100_fifo_context_dtor,
388 		.init = _nvkm_fifo_context_init,
389 		.fini = _nvkm_fifo_context_fini,
390 		.rd32 = _nvkm_fifo_context_rd32,
391 		.wr32 = _nvkm_fifo_context_wr32,
392 	},
393 };
394 
395 /*******************************************************************************
396  * PFIFO engine
397  ******************************************************************************/
398 
399 static inline int
400 gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
401 {
402 	switch (engn) {
403 	case NVDEV_ENGINE_GR    : engn = 0; break;
404 	case NVDEV_ENGINE_MSVLD : engn = 1; break;
405 	case NVDEV_ENGINE_MSPPP : engn = 2; break;
406 	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
407 	case NVDEV_ENGINE_CE0   : engn = 4; break;
408 	case NVDEV_ENGINE_CE1   : engn = 5; break;
409 	default:
410 		return -1;
411 	}
412 
413 	return engn;
414 }
415 
416 static inline struct nvkm_engine *
417 gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
418 {
419 	switch (engn) {
420 	case 0: engn = NVDEV_ENGINE_GR; break;
421 	case 1: engn = NVDEV_ENGINE_MSVLD; break;
422 	case 2: engn = NVDEV_ENGINE_MSPPP; break;
423 	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
424 	case 4: engn = NVDEV_ENGINE_CE0; break;
425 	case 5: engn = NVDEV_ENGINE_CE1; break;
426 	default:
427 		return NULL;
428 	}
429 
430 	return nvkm_engine(fifo, engn);
431 }
432 
433 static void
434 gf100_fifo_recover_work(struct work_struct *work)
435 {
436 	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
437 	struct nvkm_device *device = fifo->base.engine.subdev.device;
438 	struct nvkm_object *engine;
439 	unsigned long flags;
440 	u32 engn, engm = 0;
441 	u64 mask, todo;
442 
443 	spin_lock_irqsave(&fifo->base.lock, flags);
444 	mask = fifo->mask;
445 	fifo->mask = 0ULL;
446 	spin_unlock_irqrestore(&fifo->base.lock, flags);
447 
448 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
449 		engm |= 1 << gf100_fifo_engidx(fifo, engn);
450 	nvkm_mask(device, 0x002630, engm, engm);
451 
452 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
453 		if ((engine = (void *)nvkm_engine(fifo, engn))) {
454 			nv_ofuncs(engine)->fini(engine, false);
455 			WARN_ON(nv_ofuncs(engine)->init(engine));
456 		}
457 	}
458 
459 	gf100_fifo_runlist_update(fifo);
460 	nvkm_wr32(device, 0x00262c, engm);
461 	nvkm_mask(device, 0x002630, engm, 0x00000000);
462 }
463 
464 static void
465 gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
466 		   struct gf100_fifo_chan *chan)
467 {
468 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
469 	struct nvkm_device *device = subdev->device;
470 	u32 chid = chan->base.chid;
471 	unsigned long flags;
472 
473 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
474 		   engine->subdev.name, chid);
475 
476 	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
477 	chan->state = KILLED;
478 
479 	spin_lock_irqsave(&fifo->base.lock, flags);
480 	fifo->mask |= 1ULL << nv_engidx(engine);
481 	spin_unlock_irqrestore(&fifo->base.lock, flags);
482 	schedule_work(&fifo->fault);
483 }
484 
485 static int
486 gf100_fifo_swmthd(struct gf100_fifo *fifo, u32 chid, u32 mthd, u32 data)
487 {
488 	struct gf100_fifo_chan *chan = NULL;
489 	struct nvkm_handle *bind;
490 	unsigned long flags;
491 	int ret = -EINVAL;
492 
493 	spin_lock_irqsave(&fifo->base.lock, flags);
494 	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
495 		chan = (void *)fifo->base.channel[chid];
496 	if (unlikely(!chan))
497 		goto out;
498 
499 	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
500 	if (likely(bind)) {
501 		if (!mthd || !nv_call(bind->object, mthd, data))
502 			ret = 0;
503 		nvkm_namedb_put(bind);
504 	}
505 
506 out:
507 	spin_unlock_irqrestore(&fifo->base.lock, flags);
508 	return ret;
509 }
510 
511 static const struct nvkm_enum
512 gf100_fifo_sched_reason[] = {
513 	{ 0x0a, "CTXSW_TIMEOUT" },
514 	{}
515 };
516 
517 static void
518 gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
519 {
520 	struct nvkm_device *device = fifo->base.engine.subdev.device;
521 	struct nvkm_engine *engine;
522 	struct gf100_fifo_chan *chan;
523 	u32 engn;
524 
525 	for (engn = 0; engn < 6; engn++) {
526 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
527 		u32 busy = (stat & 0x80000000);
528 		u32 save = (stat & 0x00100000); /* maybe? */
529 		u32 unk0 = (stat & 0x00040000);
530 		u32 unk1 = (stat & 0x00001000);
531 		u32 chid = (stat & 0x0000007f);
532 		(void)save;
533 
534 		if (busy && unk0 && unk1) {
535 			if (!(chan = (void *)fifo->base.channel[chid]))
536 				continue;
537 			if (!(engine = gf100_fifo_engine(fifo, engn)))
538 				continue;
539 			gf100_fifo_recover(fifo, engine, chan);
540 		}
541 	}
542 }
543 
544 static void
545 gf100_fifo_intr_sched(struct gf100_fifo *fifo)
546 {
547 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
548 	struct nvkm_device *device = subdev->device;
549 	u32 intr = nvkm_rd32(device, 0x00254c);
550 	u32 code = intr & 0x000000ff;
551 	const struct nvkm_enum *en;
552 
553 	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
554 
555 	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
556 
557 	switch (code) {
558 	case 0x0a:
559 		gf100_fifo_intr_sched_ctxsw(fifo);
560 		break;
561 	default:
562 		break;
563 	}
564 }
565 
566 static const struct nvkm_enum
567 gf100_fifo_fault_engine[] = {
568 	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
569 	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
570 	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
571 	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
572 	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
573 	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
574 	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
575 	{ 0x13, "PCOUNTER" },
576 	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
577 	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
578 	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
579 	{ 0x17, "PDAEMON" },
580 	{}
581 };
582 
583 static const struct nvkm_enum
584 gf100_fifo_fault_reason[] = {
585 	{ 0x00, "PT_NOT_PRESENT" },
586 	{ 0x01, "PT_TOO_SHORT" },
587 	{ 0x02, "PAGE_NOT_PRESENT" },
588 	{ 0x03, "VM_LIMIT_EXCEEDED" },
589 	{ 0x04, "NO_CHANNEL" },
590 	{ 0x05, "PAGE_SYSTEM_ONLY" },
591 	{ 0x06, "PAGE_READ_ONLY" },
592 	{ 0x0a, "COMPRESSED_SYSRAM" },
593 	{ 0x0c, "INVALID_STORAGE_TYPE" },
594 	{}
595 };
596 
597 static const struct nvkm_enum
598 gf100_fifo_fault_hubclient[] = {
599 	{ 0x01, "PCOPY0" },
600 	{ 0x02, "PCOPY1" },
601 	{ 0x04, "DISPATCH" },
602 	{ 0x05, "CTXCTL" },
603 	{ 0x06, "PFIFO" },
604 	{ 0x07, "BAR_READ" },
605 	{ 0x08, "BAR_WRITE" },
606 	{ 0x0b, "PVP" },
607 	{ 0x0c, "PMSPPP" },
608 	{ 0x0d, "PMSVLD" },
609 	{ 0x11, "PCOUNTER" },
610 	{ 0x12, "PDAEMON" },
611 	{ 0x14, "CCACHE" },
612 	{ 0x15, "CCACHE_POST" },
613 	{}
614 };
615 
616 static const struct nvkm_enum
617 gf100_fifo_fault_gpcclient[] = {
618 	{ 0x01, "TEX" },
619 	{ 0x0c, "ESETUP" },
620 	{ 0x0e, "CTXCTL" },
621 	{ 0x0f, "PROP" },
622 	{}
623 };
624 
625 static void
626 gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
627 {
628 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
629 	struct nvkm_device *device = subdev->device;
630 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
631 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
632 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
633 	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
634 	u32 gpc    = (stat & 0x1f000000) >> 24;
635 	u32 client = (stat & 0x00001f00) >> 8;
636 	u32 write  = (stat & 0x00000080);
637 	u32 hub    = (stat & 0x00000040);
638 	u32 reason = (stat & 0x0000000f);
639 	struct nvkm_object *engctx = NULL, *object;
640 	struct nvkm_engine *engine = NULL;
641 	const struct nvkm_enum *er, *eu, *ec;
642 	char gpcid[8] = "";
643 
644 	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
645 	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
646 	if (hub) {
647 		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
648 	} else {
649 		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
650 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
651 	}
652 
653 	if (eu) {
654 		switch (eu->data2) {
655 		case NVDEV_SUBDEV_BAR:
656 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
657 			break;
658 		case NVDEV_SUBDEV_INSTMEM:
659 			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
660 			break;
661 		case NVDEV_ENGINE_IFB:
662 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
663 			break;
664 		default:
665 			engine = nvkm_engine(fifo, eu->data2);
666 			if (engine)
667 				engctx = nvkm_engctx_get(engine, inst);
668 			break;
669 		}
670 	}
671 
672 	nvkm_error(subdev,
673 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
674 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
675 		   write ? "write" : "read", (u64)vahi << 32 | valo,
676 		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
677 		   reason, er ? er->name : "", -1, (u64)inst << 12,
678 		   nvkm_client_name(engctx));
679 
680 	object = engctx;
681 	while (object) {
682 		switch (nv_mclass(object)) {
683 		case FERMI_CHANNEL_GPFIFO:
684 			gf100_fifo_recover(fifo, engine, (void *)object);
685 			break;
686 		}
687 		object = object->parent;
688 	}
689 
690 	nvkm_engctx_put(engctx);
691 }
692 
693 static const struct nvkm_bitfield
694 gf100_fifo_pbdma_intr[] = {
695 /*	{ 0x00008000, "" }	seen with null ib push */
696 	{ 0x00200000, "ILLEGAL_MTHD" },
697 	{ 0x00800000, "EMPTY_SUBC" },
698 	{}
699 };
700 
701 static void
702 gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
703 {
704 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
705 	struct nvkm_device *device = subdev->device;
706 	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
707 	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
708 	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
709 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
710 	u32 subc = (addr & 0x00070000) >> 16;
711 	u32 mthd = (addr & 0x00003ffc);
712 	u32 show= stat;
713 	char msg[128];
714 
715 	if (stat & 0x00800000) {
716 		if (!gf100_fifo_swmthd(fifo, chid, mthd, data))
717 			show &= ~0x00800000;
718 	}
719 
720 	if (show) {
721 		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
722 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
723 				   "mthd %04x data %08x\n",
724 			   unit, show, msg, chid,
725 			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
726 			   subc, mthd, data);
727 	}
728 
729 	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
730 	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
731 }
732 
733 static void
734 gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
735 {
736 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
737 	struct nvkm_device *device = subdev->device;
738 	u32 intr = nvkm_rd32(device, 0x002a00);
739 
740 	if (intr & 0x10000000) {
741 		wake_up(&fifo->runlist.wait);
742 		nvkm_wr32(device, 0x002a00, 0x10000000);
743 		intr &= ~0x10000000;
744 	}
745 
746 	if (intr) {
747 		nvkm_error(subdev, "RUNLIST %08x\n", intr);
748 		nvkm_wr32(device, 0x002a00, intr);
749 	}
750 }
751 
752 static void
753 gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
754 {
755 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
756 	struct nvkm_device *device = subdev->device;
757 	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
758 	u32 inte = nvkm_rd32(device, 0x002628);
759 	u32 unkn;
760 
761 	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
762 
763 	for (unkn = 0; unkn < 8; unkn++) {
764 		u32 ints = (intr >> (unkn * 0x04)) & inte;
765 		if (ints & 0x1) {
766 			nvkm_fifo_uevent(&fifo->base);
767 			ints &= ~1;
768 		}
769 		if (ints) {
770 			nvkm_error(subdev, "ENGINE %d %d %01x",
771 				   engn, unkn, ints);
772 			nvkm_mask(device, 0x002628, ints, 0);
773 		}
774 	}
775 }
776 
777 static void
778 gf100_fifo_intr_engine(struct gf100_fifo *fifo)
779 {
780 	struct nvkm_device *device = fifo->base.engine.subdev.device;
781 	u32 mask = nvkm_rd32(device, 0x0025a4);
782 	while (mask) {
783 		u32 unit = __ffs(mask);
784 		gf100_fifo_intr_engine_unit(fifo, unit);
785 		mask &= ~(1 << unit);
786 	}
787 }
788 
789 static void
790 gf100_fifo_intr(struct nvkm_subdev *subdev)
791 {
792 	struct gf100_fifo *fifo = (void *)subdev;
793 	struct nvkm_device *device = fifo->base.engine.subdev.device;
794 	u32 mask = nvkm_rd32(device, 0x002140);
795 	u32 stat = nvkm_rd32(device, 0x002100) & mask;
796 
797 	if (stat & 0x00000001) {
798 		u32 intr = nvkm_rd32(device, 0x00252c);
799 		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
800 		nvkm_wr32(device, 0x002100, 0x00000001);
801 		stat &= ~0x00000001;
802 	}
803 
804 	if (stat & 0x00000100) {
805 		gf100_fifo_intr_sched(fifo);
806 		nvkm_wr32(device, 0x002100, 0x00000100);
807 		stat &= ~0x00000100;
808 	}
809 
810 	if (stat & 0x00010000) {
811 		u32 intr = nvkm_rd32(device, 0x00256c);
812 		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
813 		nvkm_wr32(device, 0x002100, 0x00010000);
814 		stat &= ~0x00010000;
815 	}
816 
817 	if (stat & 0x01000000) {
818 		u32 intr = nvkm_rd32(device, 0x00258c);
819 		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
820 		nvkm_wr32(device, 0x002100, 0x01000000);
821 		stat &= ~0x01000000;
822 	}
823 
824 	if (stat & 0x10000000) {
825 		u32 mask = nvkm_rd32(device, 0x00259c);
826 		while (mask) {
827 			u32 unit = __ffs(mask);
828 			gf100_fifo_intr_fault(fifo, unit);
829 			nvkm_wr32(device, 0x00259c, (1 << unit));
830 			mask &= ~(1 << unit);
831 		}
832 		stat &= ~0x10000000;
833 	}
834 
835 	if (stat & 0x20000000) {
836 		u32 mask = nvkm_rd32(device, 0x0025a0);
837 		while (mask) {
838 			u32 unit = __ffs(mask);
839 			gf100_fifo_intr_pbdma(fifo, unit);
840 			nvkm_wr32(device, 0x0025a0, (1 << unit));
841 			mask &= ~(1 << unit);
842 		}
843 		stat &= ~0x20000000;
844 	}
845 
846 	if (stat & 0x40000000) {
847 		gf100_fifo_intr_runlist(fifo);
848 		stat &= ~0x40000000;
849 	}
850 
851 	if (stat & 0x80000000) {
852 		gf100_fifo_intr_engine(fifo);
853 		stat &= ~0x80000000;
854 	}
855 
856 	if (stat) {
857 		nvkm_error(subdev, "INTR %08x\n", stat);
858 		nvkm_mask(device, 0x002140, stat, 0x00000000);
859 		nvkm_wr32(device, 0x002100, stat);
860 	}
861 }
862 
863 static void
864 gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
865 {
866 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
867 	struct nvkm_device *device = fifo->engine.subdev.device;
868 	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
869 }
870 
871 static void
872 gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
873 {
874 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
875 	struct nvkm_device *device = fifo->engine.subdev.device;
876 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
877 }
878 
879 static const struct nvkm_event_func
880 gf100_fifo_uevent_func = {
881 	.ctor = nvkm_fifo_uevent_ctor,
882 	.init = gf100_fifo_uevent_init,
883 	.fini = gf100_fifo_uevent_fini,
884 };
885 
886 static int
887 gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
888 		struct nvkm_oclass *oclass, void *data, u32 size,
889 		struct nvkm_object **pobject)
890 {
891 	struct gf100_fifo *fifo;
892 	int ret;
893 
894 	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &fifo);
895 	*pobject = nv_object(fifo);
896 	if (ret)
897 		return ret;
898 
899 	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
900 
901 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
902 			      &fifo->runlist.mem[0]);
903 	if (ret)
904 		return ret;
905 
906 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
907 			      &fifo->runlist.mem[1]);
908 	if (ret)
909 		return ret;
910 
911 	init_waitqueue_head(&fifo->runlist.wait);
912 
913 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 0x1000, 0x1000, 0,
914 			      &fifo->user.mem);
915 	if (ret)
916 		return ret;
917 
918 	ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
919 			      &fifo->user.bar);
920 	if (ret)
921 		return ret;
922 
923 	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent);
924 	if (ret)
925 		return ret;
926 
927 	nv_subdev(fifo)->unit = 0x00000100;
928 	nv_subdev(fifo)->intr = gf100_fifo_intr;
929 	nv_engine(fifo)->cclass = &gf100_fifo_cclass;
930 	nv_engine(fifo)->sclass = gf100_fifo_sclass;
931 	return 0;
932 }
933 
934 static void
935 gf100_fifo_dtor(struct nvkm_object *object)
936 {
937 	struct gf100_fifo *fifo = (void *)object;
938 
939 	nvkm_gpuobj_unmap(&fifo->user.bar);
940 	nvkm_gpuobj_ref(NULL, &fifo->user.mem);
941 	nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[0]);
942 	nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[1]);
943 
944 	nvkm_fifo_destroy(&fifo->base);
945 }
946 
947 static int
948 gf100_fifo_init(struct nvkm_object *object)
949 {
950 	struct gf100_fifo *fifo = (void *)object;
951 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
952 	struct nvkm_device *device = subdev->device;
953 	int ret, i;
954 
955 	ret = nvkm_fifo_init(&fifo->base);
956 	if (ret)
957 		return ret;
958 
959 	nvkm_wr32(device, 0x000204, 0xffffffff);
960 	nvkm_wr32(device, 0x002204, 0xffffffff);
961 
962 	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
963 	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
964 
965 	/* assign engines to PBDMAs */
966 	if (fifo->spoon_nr >= 3) {
967 		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
968 		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
969 		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
970 		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
971 		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
972 		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
973 	}
974 
975 	/* PBDMA[n] */
976 	for (i = 0; i < fifo->spoon_nr; i++) {
977 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
978 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
979 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
980 	}
981 
982 	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
983 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
984 
985 	nvkm_wr32(device, 0x002100, 0xffffffff);
986 	nvkm_wr32(device, 0x002140, 0x7fffffff);
987 	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
988 	return 0;
989 }
990 
991 struct nvkm_oclass *
992 gf100_fifo_oclass = &(struct nvkm_oclass) {
993 	.handle = NV_ENGINE(FIFO, 0xc0),
994 	.ofuncs = &(struct nvkm_ofuncs) {
995 		.ctor = gf100_fifo_ctor,
996 		.dtor = gf100_fifo_dtor,
997 		.init = gf100_fifo_init,
998 		.fini = _nvkm_fifo_fini,
999 	},
1000 };
1001