xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c (revision 4de93a086eb0315f0bd8e1d6da40186842670b57)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv04.h"
25 
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/ramht.h>
29 #include <subdev/fb.h>
30 #include <subdev/instmem/nv04.h>
31 
32 #include <nvif/class.h>
33 #include <nvif/unpack.h>
34 
35 static struct ramfc_desc
36 nv40_ramfc[] = {
37 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
38 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
39 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
40 	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
41 	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
42 	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_STATE },
43 	{ 28,  0, 0x18,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
44 	{  2, 28, 0x18, 28, 0x002058 },
45 	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_ENGINE },
46 	{ 32,  0, 0x20,  0, NV04_PFIFO_CACHE1_PULL1 },
47 	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
48 	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
49 	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
50 	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
51 	{ 32,  0, 0x34,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
52 	{ 32,  0, 0x38,  0, NV40_PFIFO_GRCTX_INSTANCE },
53 	{ 17,  0, 0x3c,  0, NV04_PFIFO_DMA_TIMESLICE },
54 	{ 32,  0, 0x40,  0, 0x0032e4 },
55 	{ 32,  0, 0x44,  0, 0x0032e8 },
56 	{ 32,  0, 0x4c,  0, 0x002088 },
57 	{ 32,  0, 0x50,  0, 0x003300 },
58 	{ 32,  0, 0x54,  0, 0x00330c },
59 	{}
60 };
61 
62 /*******************************************************************************
63  * FIFO channel objects
64  ******************************************************************************/
65 
66 static int
67 nv40_fifo_object_attach(struct nvkm_object *parent,
68 			struct nvkm_object *object, u32 handle)
69 {
70 	struct nv04_fifo *fifo = (void *)parent->engine;
71 	struct nv04_fifo_chan *chan = (void *)parent;
72 	u32 context, chid = chan->base.chid;
73 	int ret;
74 
75 	if (nv_iclass(object, NV_GPUOBJ_CLASS))
76 		context = nv_gpuobj(object)->addr >> 4;
77 	else
78 		context = 0x00000004; /* just non-zero */
79 
80 	switch (nv_engidx(object->engine)) {
81 	case NVDEV_ENGINE_DMAOBJ:
82 	case NVDEV_ENGINE_SW:
83 		context |= 0x00000000;
84 		break;
85 	case NVDEV_ENGINE_GR:
86 		context |= 0x00100000;
87 		break;
88 	case NVDEV_ENGINE_MPEG:
89 		context |= 0x00200000;
90 		break;
91 	default:
92 		return -EINVAL;
93 	}
94 
95 	context |= chid << 23;
96 
97 	mutex_lock(&nv_subdev(fifo)->mutex);
98 	ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context);
99 	mutex_unlock(&nv_subdev(fifo)->mutex);
100 	return ret;
101 }
102 
103 static int
104 nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
105 {
106 	struct nv04_fifo *fifo = (void *)parent->engine;
107 	struct nv04_fifo_chan *chan = (void *)parent;
108 	unsigned long flags;
109 	u32 reg, ctx;
110 
111 	switch (nv_engidx(engctx->engine)) {
112 	case NVDEV_ENGINE_SW:
113 		return 0;
114 	case NVDEV_ENGINE_GR:
115 		reg = 0x32e0;
116 		ctx = 0x38;
117 		break;
118 	case NVDEV_ENGINE_MPEG:
119 		reg = 0x330c;
120 		ctx = 0x54;
121 		break;
122 	default:
123 		return -EINVAL;
124 	}
125 
126 	spin_lock_irqsave(&fifo->base.lock, flags);
127 	nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
128 	nv_mask(fifo, 0x002500, 0x00000001, 0x00000000);
129 
130 	if ((nv_rd32(fifo, 0x003204) & fifo->base.max) == chan->base.chid)
131 		nv_wr32(fifo, reg, nv_engctx(engctx)->addr);
132 	nv_wo32(fifo->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
133 
134 	nv_mask(fifo, 0x002500, 0x00000001, 0x00000001);
135 	spin_unlock_irqrestore(&fifo->base.lock, flags);
136 	return 0;
137 }
138 
139 static int
140 nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
141 			 struct nvkm_object *engctx)
142 {
143 	struct nv04_fifo *fifo = (void *)parent->engine;
144 	struct nv04_fifo_chan *chan = (void *)parent;
145 	unsigned long flags;
146 	u32 reg, ctx;
147 
148 	switch (nv_engidx(engctx->engine)) {
149 	case NVDEV_ENGINE_SW:
150 		return 0;
151 	case NVDEV_ENGINE_GR:
152 		reg = 0x32e0;
153 		ctx = 0x38;
154 		break;
155 	case NVDEV_ENGINE_MPEG:
156 		reg = 0x330c;
157 		ctx = 0x54;
158 		break;
159 	default:
160 		return -EINVAL;
161 	}
162 
163 	spin_lock_irqsave(&fifo->base.lock, flags);
164 	nv_mask(fifo, 0x002500, 0x00000001, 0x00000000);
165 
166 	if ((nv_rd32(fifo, 0x003204) & fifo->base.max) == chan->base.chid)
167 		nv_wr32(fifo, reg, 0x00000000);
168 	nv_wo32(fifo->ramfc, chan->ramfc + ctx, 0x00000000);
169 
170 	nv_mask(fifo, 0x002500, 0x00000001, 0x00000001);
171 	spin_unlock_irqrestore(&fifo->base.lock, flags);
172 	return 0;
173 }
174 
175 static int
176 nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
177 		    struct nvkm_oclass *oclass, void *data, u32 size,
178 		    struct nvkm_object **pobject)
179 {
180 	union {
181 		struct nv03_channel_dma_v0 v0;
182 	} *args = data;
183 	struct nv04_fifo *fifo = (void *)engine;
184 	struct nv04_fifo_chan *chan;
185 	int ret;
186 
187 	nv_ioctl(parent, "create channel dma size %d\n", size);
188 	if (nvif_unpack(args->v0, 0, 0, false)) {
189 		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
190 				 "offset %016llx\n", args->v0.version,
191 			 args->v0.pushbuf, args->v0.offset);
192 	} else
193 		return ret;
194 
195 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
196 				       0x1000, args->v0.pushbuf,
197 				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
198 				       (1ULL << NVDEV_ENGINE_SW) |
199 				       (1ULL << NVDEV_ENGINE_GR) |
200 				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
201 	*pobject = nv_object(chan);
202 	if (ret)
203 		return ret;
204 
205 	args->v0.chid = chan->base.chid;
206 
207 	nv_parent(chan)->context_attach = nv40_fifo_context_attach;
208 	nv_parent(chan)->context_detach = nv40_fifo_context_detach;
209 	nv_parent(chan)->object_attach = nv40_fifo_object_attach;
210 	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
211 	chan->ramfc = chan->base.chid * 128;
212 
213 	nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
214 	nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
215 	nv_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
216 	nv_wo32(fifo->ramfc, chan->ramfc + 0x18, 0x30000000 |
217 			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
218 			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
219 #ifdef __BIG_ENDIAN
220 			     NV_PFIFO_CACHE1_BIG_ENDIAN |
221 #endif
222 			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
223 	nv_wo32(fifo->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
224 	return 0;
225 }
226 
227 static struct nvkm_ofuncs
228 nv40_fifo_ofuncs = {
229 	.ctor = nv40_fifo_chan_ctor,
230 	.dtor = nv04_fifo_chan_dtor,
231 	.init = nv04_fifo_chan_init,
232 	.fini = nv04_fifo_chan_fini,
233 	.map  = _nvkm_fifo_channel_map,
234 	.rd32 = _nvkm_fifo_channel_rd32,
235 	.wr32 = _nvkm_fifo_channel_wr32,
236 	.ntfy = _nvkm_fifo_channel_ntfy
237 };
238 
239 static struct nvkm_oclass
240 nv40_fifo_sclass[] = {
241 	{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
242 	{}
243 };
244 
245 /*******************************************************************************
246  * FIFO context - basically just the instmem reserved for the channel
247  ******************************************************************************/
248 
249 static struct nvkm_oclass
250 nv40_fifo_cclass = {
251 	.handle = NV_ENGCTX(FIFO, 0x40),
252 	.ofuncs = &(struct nvkm_ofuncs) {
253 		.ctor = nv04_fifo_context_ctor,
254 		.dtor = _nvkm_fifo_context_dtor,
255 		.init = _nvkm_fifo_context_init,
256 		.fini = _nvkm_fifo_context_fini,
257 		.rd32 = _nvkm_fifo_context_rd32,
258 		.wr32 = _nvkm_fifo_context_wr32,
259 	},
260 };
261 
262 /*******************************************************************************
263  * PFIFO engine
264  ******************************************************************************/
265 
266 static int
267 nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
268 	       struct nvkm_oclass *oclass, void *data, u32 size,
269 	       struct nvkm_object **pobject)
270 {
271 	struct nv04_instmem *imem = nv04_instmem(parent);
272 	struct nv04_fifo *fifo;
273 	int ret;
274 
275 	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo);
276 	*pobject = nv_object(fifo);
277 	if (ret)
278 		return ret;
279 
280 	nvkm_ramht_ref(imem->ramht, &fifo->ramht);
281 	nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
282 	nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
283 
284 	nv_subdev(fifo)->unit = 0x00000100;
285 	nv_subdev(fifo)->intr = nv04_fifo_intr;
286 	nv_engine(fifo)->cclass = &nv40_fifo_cclass;
287 	nv_engine(fifo)->sclass = nv40_fifo_sclass;
288 	fifo->base.pause = nv04_fifo_pause;
289 	fifo->base.start = nv04_fifo_start;
290 	fifo->ramfc_desc = nv40_ramfc;
291 	return 0;
292 }
293 
294 static int
295 nv40_fifo_init(struct nvkm_object *object)
296 {
297 	struct nv04_fifo *fifo = (void *)object;
298 	struct nvkm_fb *fb = nvkm_fb(object);
299 	int ret;
300 
301 	ret = nvkm_fifo_init(&fifo->base);
302 	if (ret)
303 		return ret;
304 
305 	nv_wr32(fifo, 0x002040, 0x000000ff);
306 	nv_wr32(fifo, 0x002044, 0x2101ffff);
307 	nv_wr32(fifo, 0x002058, 0x00000001);
308 
309 	nv_wr32(fifo, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
310 				       ((fifo->ramht->bits - 9) << 16) |
311 				        (fifo->ramht->gpuobj.addr >> 8));
312 	nv_wr32(fifo, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8);
313 
314 	switch (nv_device(fifo)->chipset) {
315 	case 0x47:
316 	case 0x49:
317 	case 0x4b:
318 		nv_wr32(fifo, 0x002230, 0x00000001);
319 	case 0x40:
320 	case 0x41:
321 	case 0x42:
322 	case 0x43:
323 	case 0x45:
324 	case 0x48:
325 		nv_wr32(fifo, 0x002220, 0x00030002);
326 		break;
327 	default:
328 		nv_wr32(fifo, 0x002230, 0x00000000);
329 		nv_wr32(fifo, 0x002220, ((fb->ram->size - 512 * 1024 +
330 					 fifo->ramfc->addr) >> 16) |
331 					0x00030000);
332 		break;
333 	}
334 
335 	nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
336 
337 	nv_wr32(fifo, NV03_PFIFO_INTR_0, 0xffffffff);
338 	nv_wr32(fifo, NV03_PFIFO_INTR_EN_0, 0xffffffff);
339 
340 	nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, 1);
341 	nv_wr32(fifo, NV04_PFIFO_CACHE1_PULL0, 1);
342 	nv_wr32(fifo, NV03_PFIFO_CACHES, 1);
343 	return 0;
344 }
345 
346 struct nvkm_oclass *
347 nv40_fifo_oclass = &(struct nvkm_oclass) {
348 	.handle = NV_ENGINE(FIFO, 0x40),
349 	.ofuncs = &(struct nvkm_ofuncs) {
350 		.ctor = nv40_fifo_ctor,
351 		.dtor = nv04_fifo_dtor,
352 		.init = nv40_fifo_init,
353 		.fini = _nvkm_fifo_fini,
354 	},
355 };
356