xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c (revision 3a8c3400f3e74638bedd0d2410416aa8b794c0fd)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv40.h"
25 #include "regs.h"
26 
27 #include <core/client.h>
28 #include <core/handle.h>
29 #include <subdev/fb.h>
30 #include <subdev/timer.h>
31 #include <engine/fifo.h>
32 
33 struct nv40_gr {
34 	struct nvkm_gr base;
35 	u32 size;
36 };
37 
38 struct nv40_gr_chan {
39 	struct nvkm_gr_chan base;
40 };
41 
42 static u64
43 nv40_gr_units(struct nvkm_gr *gr)
44 {
45 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
46 }
47 
48 /*******************************************************************************
49  * Graphics object classes
50  ******************************************************************************/
51 
52 static int
53 nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
54 		    struct nvkm_oclass *oclass, void *data, u32 size,
55 		    struct nvkm_object **pobject)
56 {
57 	struct nvkm_gpuobj *obj;
58 	int ret;
59 
60 	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
61 				 20, 16, 0, &obj);
62 	*pobject = nv_object(obj);
63 	if (ret)
64 		return ret;
65 
66 	nvkm_kmap(obj);
67 	nvkm_wo32(obj, 0x00, nv_mclass(obj));
68 	nvkm_wo32(obj, 0x04, 0x00000000);
69 	nvkm_wo32(obj, 0x08, 0x00000000);
70 #ifdef __BIG_ENDIAN
71 	nvkm_mo32(obj, 0x08, 0x01000000, 0x01000000);
72 #endif
73 	nvkm_wo32(obj, 0x0c, 0x00000000);
74 	nvkm_wo32(obj, 0x10, 0x00000000);
75 	nvkm_done(obj);
76 	return 0;
77 }
78 
79 static struct nvkm_ofuncs
80 nv40_gr_ofuncs = {
81 	.ctor = nv40_gr_object_ctor,
82 	.dtor = _nvkm_gpuobj_dtor,
83 	.init = _nvkm_gpuobj_init,
84 	.fini = _nvkm_gpuobj_fini,
85 	.rd32 = _nvkm_gpuobj_rd32,
86 	.wr32 = _nvkm_gpuobj_wr32,
87 };
88 
89 static struct nvkm_oclass
90 nv40_gr_sclass[] = {
91 	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
92 	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
93 	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
94 	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
95 	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
96 	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
97 	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
98 	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
99 	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
100 	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
101 	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
102 	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
103 	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
104 	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
105 	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
106 	{ 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
107 	{},
108 };
109 
110 static struct nvkm_oclass
111 nv44_gr_sclass[] = {
112 	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
113 	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
114 	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
115 	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
116 	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
117 	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
118 	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
119 	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
120 	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
121 	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
122 	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
123 	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
124 	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
125 	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
126 	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
127 	{ 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
128 	{},
129 };
130 
131 /*******************************************************************************
132  * PGRAPH context
133  ******************************************************************************/
134 
135 static int
136 nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
137 		     struct nvkm_oclass *oclass, void *data, u32 size,
138 		     struct nvkm_object **pobject)
139 {
140 	struct nv40_gr *gr = (void *)engine;
141 	struct nv40_gr_chan *chan;
142 	int ret;
143 
144 	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, gr->size,
145 				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
146 	*pobject = nv_object(chan);
147 	if (ret)
148 		return ret;
149 
150 	nv40_grctx_fill(nv_device(gr), nv_gpuobj(chan));
151 	nvkm_wo32(&chan->base.base.gpuobj, 0x00000, nv_gpuobj(chan)->addr >> 4);
152 	return 0;
153 }
154 
155 static int
156 nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
157 {
158 	struct nv40_gr *gr = (void *)object->engine;
159 	struct nv40_gr_chan *chan = (void *)object;
160 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
161 	struct nvkm_device *device = subdev->device;
162 	u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
163 	int ret = 0;
164 
165 	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
166 
167 	if (nvkm_rd32(device, 0x40032c) == inst) {
168 		if (suspend) {
169 			nvkm_wr32(device, 0x400720, 0x00000000);
170 			nvkm_wr32(device, 0x400784, inst);
171 			nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
172 			nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
173 			if (nvkm_msec(device, 2000,
174 				if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
175 					break;
176 			) < 0) {
177 				u32 insn = nvkm_rd32(device, 0x400308);
178 				nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
179 				ret = -EBUSY;
180 			}
181 		}
182 
183 		nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
184 	}
185 
186 	if (nvkm_rd32(device, 0x400330) == inst)
187 		nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
188 
189 	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
190 	return ret;
191 }
192 
193 static struct nvkm_oclass
194 nv40_gr_cclass = {
195 	.handle = NV_ENGCTX(GR, 0x40),
196 	.ofuncs = &(struct nvkm_ofuncs) {
197 		.ctor = nv40_gr_context_ctor,
198 		.dtor = _nvkm_gr_context_dtor,
199 		.init = _nvkm_gr_context_init,
200 		.fini = nv40_gr_context_fini,
201 		.rd32 = _nvkm_gr_context_rd32,
202 		.wr32 = _nvkm_gr_context_wr32,
203 	},
204 };
205 
206 /*******************************************************************************
207  * PGRAPH engine/subdev functions
208  ******************************************************************************/
209 
210 static void
211 nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
212 {
213 	struct nv40_gr *gr = (void *)engine;
214 	struct nvkm_device *device = gr->base.engine.subdev.device;
215 	struct nvkm_fifo *fifo = device->fifo;
216 	struct nvkm_fb_tile *tile = &device->fb->tile.region[i];
217 	unsigned long flags;
218 
219 	fifo->pause(fifo, &flags);
220 	nv04_gr_idle(gr);
221 
222 	switch (nv_device(gr)->chipset) {
223 	case 0x40:
224 	case 0x41:
225 	case 0x42:
226 	case 0x43:
227 	case 0x45:
228 	case 0x4e:
229 		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
230 		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
231 		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
232 		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
233 		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
234 		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
235 		switch (nv_device(gr)->chipset) {
236 		case 0x40:
237 		case 0x45:
238 			nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
239 			nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
240 			break;
241 		case 0x41:
242 		case 0x42:
243 		case 0x43:
244 			nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
245 			nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
246 			break;
247 		default:
248 			break;
249 		}
250 		break;
251 	case 0x44:
252 	case 0x4a:
253 		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
254 		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
255 		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
256 		break;
257 	case 0x46:
258 	case 0x4c:
259 	case 0x47:
260 	case 0x49:
261 	case 0x4b:
262 	case 0x63:
263 	case 0x67:
264 	case 0x68:
265 		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
266 		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
267 		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
268 		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
269 		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
270 		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
271 		switch (nv_device(gr)->chipset) {
272 		case 0x47:
273 		case 0x49:
274 		case 0x4b:
275 			nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
276 			nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
277 			break;
278 		default:
279 			break;
280 		}
281 		break;
282 	default:
283 		break;
284 	}
285 
286 	fifo->start(fifo, &flags);
287 }
288 
289 static void
290 nv40_gr_intr(struct nvkm_subdev *subdev)
291 {
292 	struct nvkm_fifo *fifo = nvkm_fifo(subdev);
293 	struct nvkm_engine *engine = nv_engine(subdev);
294 	struct nvkm_object *engctx;
295 	struct nvkm_handle *handle = NULL;
296 	struct nv40_gr *gr = (void *)subdev;
297 	struct nvkm_device *device = gr->base.engine.subdev.device;
298 	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
299 	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
300 	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
301 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
302 	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
303 	u32 subc = (addr & 0x00070000) >> 16;
304 	u32 mthd = (addr & 0x00001ffc);
305 	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
306 	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
307 	u32 show = stat;
308 	char msg[128], src[128], sta[128];
309 	int chid;
310 
311 	engctx = nvkm_engctx_get(engine, inst);
312 	chid   = fifo->chid(fifo, engctx);
313 
314 	if (stat & NV_PGRAPH_INTR_ERROR) {
315 		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
316 			handle = nvkm_handle_get_class(engctx, class);
317 			if (handle && !nv_call(handle->object, mthd, data))
318 				show &= ~NV_PGRAPH_INTR_ERROR;
319 			nvkm_handle_put(handle);
320 		}
321 
322 		if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
323 			nvkm_mask(device, 0x402000, 0, 0);
324 		}
325 	}
326 
327 	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
328 	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
329 
330 	if (show) {
331 		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
332 		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
333 		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
334 		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
335 				   "nstatus %08x [%s] ch %d [%08x %s] subc %d "
336 				   "class %04x mthd %04x data %08x\n",
337 			   show, msg, nsource, src, nstatus, sta, chid,
338 			   inst << 4, nvkm_client_name(engctx), subc,
339 			   class, mthd, data);
340 	}
341 
342 	nvkm_engctx_put(engctx);
343 }
344 
345 static int
346 nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
347 	     struct nvkm_oclass *oclass, void *data, u32 size,
348 	     struct nvkm_object **pobject)
349 {
350 	struct nv40_gr *gr;
351 	int ret;
352 
353 	ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
354 	*pobject = nv_object(gr);
355 	if (ret)
356 		return ret;
357 
358 	nv_subdev(gr)->unit = 0x00001000;
359 	nv_subdev(gr)->intr = nv40_gr_intr;
360 	nv_engine(gr)->cclass = &nv40_gr_cclass;
361 	if (nv44_gr_class(gr))
362 		nv_engine(gr)->sclass = nv44_gr_sclass;
363 	else
364 		nv_engine(gr)->sclass = nv40_gr_sclass;
365 	nv_engine(gr)->tile_prog = nv40_gr_tile_prog;
366 
367 	gr->base.units = nv40_gr_units;
368 	return 0;
369 }
370 
371 static int
372 nv40_gr_init(struct nvkm_object *object)
373 {
374 	struct nvkm_engine *engine = nv_engine(object);
375 	struct nv40_gr *gr = (void *)engine;
376 	struct nvkm_device *device = gr->base.engine.subdev.device;
377 	struct nvkm_fb *fb = device->fb;
378 	int ret, i, j;
379 	u32 vramsz;
380 
381 	ret = nvkm_gr_init(&gr->base);
382 	if (ret)
383 		return ret;
384 
385 	/* generate and upload context program */
386 	ret = nv40_grctx_init(nv_device(gr), &gr->size);
387 	if (ret)
388 		return ret;
389 
390 	/* No context present currently */
391 	nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
392 
393 	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
394 	nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
395 
396 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
397 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
398 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
399 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
400 	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
401 	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
402 
403 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
404 	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
405 
406 	j = nvkm_rd32(device, 0x1540) & 0xff;
407 	if (j) {
408 		for (i = 0; !(j & 1); j >>= 1, i++)
409 			;
410 		nvkm_wr32(device, 0x405000, i);
411 	}
412 
413 	if (nv_device(gr)->chipset == 0x40) {
414 		nvkm_wr32(device, 0x4009b0, 0x83280fff);
415 		nvkm_wr32(device, 0x4009b4, 0x000000a0);
416 	} else {
417 		nvkm_wr32(device, 0x400820, 0x83280eff);
418 		nvkm_wr32(device, 0x400824, 0x000000a0);
419 	}
420 
421 	switch (nv_device(gr)->chipset) {
422 	case 0x40:
423 	case 0x45:
424 		nvkm_wr32(device, 0x4009b8, 0x0078e366);
425 		nvkm_wr32(device, 0x4009bc, 0x0000014c);
426 		break;
427 	case 0x41:
428 	case 0x42: /* pciid also 0x00Cx */
429 	/* case 0x0120: XXX (pciid) */
430 		nvkm_wr32(device, 0x400828, 0x007596ff);
431 		nvkm_wr32(device, 0x40082c, 0x00000108);
432 		break;
433 	case 0x43:
434 		nvkm_wr32(device, 0x400828, 0x0072cb77);
435 		nvkm_wr32(device, 0x40082c, 0x00000108);
436 		break;
437 	case 0x44:
438 	case 0x46: /* G72 */
439 	case 0x4a:
440 	case 0x4c: /* G7x-based C51 */
441 	case 0x4e:
442 		nvkm_wr32(device, 0x400860, 0);
443 		nvkm_wr32(device, 0x400864, 0);
444 		break;
445 	case 0x47: /* G70 */
446 	case 0x49: /* G71 */
447 	case 0x4b: /* G73 */
448 		nvkm_wr32(device, 0x400828, 0x07830610);
449 		nvkm_wr32(device, 0x40082c, 0x0000016A);
450 		break;
451 	default:
452 		break;
453 	}
454 
455 	nvkm_wr32(device, 0x400b38, 0x2ffff800);
456 	nvkm_wr32(device, 0x400b3c, 0x00006000);
457 
458 	/* Tiling related stuff. */
459 	switch (nv_device(gr)->chipset) {
460 	case 0x44:
461 	case 0x4a:
462 		nvkm_wr32(device, 0x400bc4, 0x1003d888);
463 		nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
464 		break;
465 	case 0x46:
466 		nvkm_wr32(device, 0x400bc4, 0x0000e024);
467 		nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
468 		break;
469 	case 0x4c:
470 	case 0x4e:
471 	case 0x67:
472 		nvkm_wr32(device, 0x400bc4, 0x1003d888);
473 		nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
474 		break;
475 	default:
476 		break;
477 	}
478 
479 	/* Turn all the tiling regions off. */
480 	for (i = 0; i < fb->tile.regions; i++)
481 		engine->tile_prog(engine, i);
482 
483 	/* begin RAM config */
484 	vramsz = nv_device_resource_len(nv_device(gr), 1) - 1;
485 	switch (nv_device(gr)->chipset) {
486 	case 0x40:
487 		nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
488 		nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
489 		nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
490 		nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
491 		nvkm_wr32(device, 0x400820, 0);
492 		nvkm_wr32(device, 0x400824, 0);
493 		nvkm_wr32(device, 0x400864, vramsz);
494 		nvkm_wr32(device, 0x400868, vramsz);
495 		break;
496 	default:
497 		switch (nv_device(gr)->chipset) {
498 		case 0x41:
499 		case 0x42:
500 		case 0x43:
501 		case 0x45:
502 		case 0x4e:
503 		case 0x44:
504 		case 0x4a:
505 			nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
506 			nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
507 			break;
508 		default:
509 			nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
510 			nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
511 			break;
512 		}
513 		nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
514 		nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
515 		nvkm_wr32(device, 0x400840, 0);
516 		nvkm_wr32(device, 0x400844, 0);
517 		nvkm_wr32(device, 0x4008A0, vramsz);
518 		nvkm_wr32(device, 0x4008A4, vramsz);
519 		break;
520 	}
521 
522 	return 0;
523 }
524 
525 struct nvkm_oclass
526 nv40_gr_oclass = {
527 	.handle = NV_ENGINE(GR, 0x40),
528 	.ofuncs = &(struct nvkm_ofuncs) {
529 		.ctor = nv40_gr_ctor,
530 		.dtor = _nvkm_gr_dtor,
531 		.init = nv40_gr_init,
532 		.fini = _nvkm_gr_fini,
533 	},
534 };
535