1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gf100.h" 25 #include "ctxgf100.h" 26 #include "fuc/os.h" 27 28 #include <core/client.h> 29 #include <core/device.h> 30 #include <core/handle.h> 31 #include <core/option.h> 32 #include <engine/fifo.h> 33 #include <subdev/fb.h> 34 #include <subdev/mc.h> 35 #include <subdev/timer.h> 36 37 #include <nvif/class.h> 38 #include <nvif/unpack.h> 39 40 /******************************************************************************* 41 * Zero Bandwidth Clear 42 ******************************************************************************/ 43 44 static void 45 gf100_gr_zbc_clear_color(struct gf100_gr_priv *priv, int zbc) 46 { 47 if (priv->zbc_color[zbc].format) { 48 nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]); 49 nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]); 50 nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]); 51 nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]); 52 } 53 nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format); 54 nv_wr32(priv, 0x405820, zbc); 55 nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ 56 } 57 58 static int 59 gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format, 60 const u32 ds[4], const u32 l2[4]) 61 { 62 struct nvkm_ltc *ltc = nvkm_ltc(priv); 63 int zbc = -ENOSPC, i; 64 65 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 66 if (priv->zbc_color[i].format) { 67 if (priv->zbc_color[i].format != format) 68 continue; 69 if (memcmp(priv->zbc_color[i].ds, ds, sizeof( 70 priv->zbc_color[i].ds))) 71 continue; 72 if (memcmp(priv->zbc_color[i].l2, l2, sizeof( 73 priv->zbc_color[i].l2))) { 74 WARN_ON(1); 75 return -EINVAL; 76 } 77 return i; 78 } else { 79 zbc = (zbc < 0) ? i : zbc; 80 } 81 } 82 83 if (zbc < 0) 84 return zbc; 85 86 memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds)); 87 memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2)); 88 priv->zbc_color[zbc].format = format; 89 ltc->zbc_color_get(ltc, zbc, l2); 90 gf100_gr_zbc_clear_color(priv, zbc); 91 return zbc; 92 } 93 94 static void 95 gf100_gr_zbc_clear_depth(struct gf100_gr_priv *priv, int zbc) 96 { 97 if (priv->zbc_depth[zbc].format) 98 nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds); 99 nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format); 100 nv_wr32(priv, 0x405820, zbc); 101 nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ 102 } 103 104 static int 105 gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format, 106 const u32 ds, const u32 l2) 107 { 108 struct nvkm_ltc *ltc = nvkm_ltc(priv); 109 int zbc = -ENOSPC, i; 110 111 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 112 if (priv->zbc_depth[i].format) { 113 if (priv->zbc_depth[i].format != format) 114 continue; 115 if (priv->zbc_depth[i].ds != ds) 116 continue; 117 if (priv->zbc_depth[i].l2 != l2) { 118 WARN_ON(1); 119 return -EINVAL; 120 } 121 return i; 122 } else { 123 zbc = (zbc < 0) ? i : zbc; 124 } 125 } 126 127 if (zbc < 0) 128 return zbc; 129 130 priv->zbc_depth[zbc].format = format; 131 priv->zbc_depth[zbc].ds = ds; 132 priv->zbc_depth[zbc].l2 = l2; 133 ltc->zbc_depth_get(ltc, zbc, l2); 134 gf100_gr_zbc_clear_depth(priv, zbc); 135 return zbc; 136 } 137 138 /******************************************************************************* 139 * Graphics object classes 140 ******************************************************************************/ 141 142 static int 143 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 144 { 145 struct gf100_gr_priv *priv = (void *)object->engine; 146 union { 147 struct fermi_a_zbc_color_v0 v0; 148 } *args = data; 149 int ret; 150 151 if (nvif_unpack(args->v0, 0, 0, false)) { 152 switch (args->v0.format) { 153 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: 154 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: 155 case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32: 156 case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16: 157 case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16: 158 case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16: 159 case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16: 160 case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16: 161 case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8: 162 case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8: 163 case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10: 164 case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10: 165 case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8: 166 case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8: 167 case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8: 168 case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8: 169 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: 170 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: 171 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: 172 ret = gf100_gr_zbc_color_get(priv, args->v0.format, 173 args->v0.ds, 174 args->v0.l2); 175 if (ret >= 0) { 176 args->v0.index = ret; 177 return 0; 178 } 179 break; 180 default: 181 return -EINVAL; 182 } 183 } 184 185 return ret; 186 } 187 188 static int 189 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 190 { 191 struct gf100_gr_priv *priv = (void *)object->engine; 192 union { 193 struct fermi_a_zbc_depth_v0 v0; 194 } *args = data; 195 int ret; 196 197 if (nvif_unpack(args->v0, 0, 0, false)) { 198 switch (args->v0.format) { 199 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: 200 ret = gf100_gr_zbc_depth_get(priv, args->v0.format, 201 args->v0.ds, 202 args->v0.l2); 203 return (ret >= 0) ? 0 : -ENOSPC; 204 default: 205 return -EINVAL; 206 } 207 } 208 209 return ret; 210 } 211 212 static int 213 gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 214 { 215 switch (mthd) { 216 case FERMI_A_ZBC_COLOR: 217 return gf100_fermi_mthd_zbc_color(object, data, size); 218 case FERMI_A_ZBC_DEPTH: 219 return gf100_fermi_mthd_zbc_depth(object, data, size); 220 default: 221 break; 222 } 223 return -EINVAL; 224 } 225 226 struct nvkm_ofuncs 227 gf100_fermi_ofuncs = { 228 .ctor = _nvkm_object_ctor, 229 .dtor = nvkm_object_destroy, 230 .init = nvkm_object_init, 231 .fini = nvkm_object_fini, 232 .mthd = gf100_fermi_mthd, 233 }; 234 235 static int 236 gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd, 237 void *pdata, u32 size) 238 { 239 struct gf100_gr_priv *priv = (void *)nv_engine(object); 240 if (size >= sizeof(u32)) { 241 u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000; 242 nv_wr32(priv, 0x419e44, data); 243 nv_wr32(priv, 0x419e4c, data); 244 return 0; 245 } 246 return -EINVAL; 247 } 248 249 struct nvkm_omthds 250 gf100_gr_9097_omthds[] = { 251 { 0x1528, 0x1528, gf100_gr_set_shader_exceptions }, 252 {} 253 }; 254 255 struct nvkm_omthds 256 gf100_gr_90c0_omthds[] = { 257 { 0x1528, 0x1528, gf100_gr_set_shader_exceptions }, 258 {} 259 }; 260 261 struct nvkm_oclass 262 gf100_gr_sclass[] = { 263 { 0x902d, &nvkm_object_ofuncs }, 264 { 0x9039, &nvkm_object_ofuncs }, 265 { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds }, 266 { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds }, 267 {} 268 }; 269 270 /******************************************************************************* 271 * PGRAPH context 272 ******************************************************************************/ 273 274 int 275 gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 276 struct nvkm_oclass *oclass, void *args, u32 size, 277 struct nvkm_object **pobject) 278 { 279 struct nvkm_vm *vm = nvkm_client(parent)->vm; 280 struct gf100_gr_priv *priv = (void *)engine; 281 struct gf100_gr_data *data = priv->mmio_data; 282 struct gf100_gr_mmio *mmio = priv->mmio_list; 283 struct gf100_gr_chan *chan; 284 int ret, i; 285 286 /* allocate memory for context, and fill with default values */ 287 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 288 priv->size, 0x100, 289 NVOBJ_FLAG_ZERO_ALLOC, &chan); 290 *pobject = nv_object(chan); 291 if (ret) 292 return ret; 293 294 /* allocate memory for a "mmio list" buffer that's used by the HUB 295 * fuc to modify some per-context register settings on first load 296 * of the context. 297 */ 298 ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0, 299 &chan->mmio); 300 if (ret) 301 return ret; 302 303 ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm, 304 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 305 &chan->mmio_vma); 306 if (ret) 307 return ret; 308 309 /* allocate buffers referenced by mmio list */ 310 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) { 311 ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size, 312 data->align, 0, &chan->data[i].mem); 313 if (ret) 314 return ret; 315 316 ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access, 317 &chan->data[i].vma); 318 if (ret) 319 return ret; 320 321 data++; 322 } 323 324 /* finally, fill in the mmio list and point the context at it */ 325 for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) { 326 u32 addr = mmio->addr; 327 u32 data = mmio->data; 328 329 if (mmio->buffer >= 0) { 330 u64 info = chan->data[mmio->buffer].vma.offset; 331 data |= info >> mmio->shift; 332 } 333 334 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); 335 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data); 336 mmio++; 337 } 338 339 for (i = 0; i < priv->size; i += 4) 340 nv_wo32(chan, i, priv->data[i / 4]); 341 342 if (!priv->firmware) { 343 nv_wo32(chan, 0x00, chan->mmio_nr / 2); 344 nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8); 345 } else { 346 nv_wo32(chan, 0xf4, 0); 347 nv_wo32(chan, 0xf8, 0); 348 nv_wo32(chan, 0x10, chan->mmio_nr / 2); 349 nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset)); 350 nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset)); 351 nv_wo32(chan, 0x1c, 1); 352 nv_wo32(chan, 0x20, 0); 353 nv_wo32(chan, 0x28, 0); 354 nv_wo32(chan, 0x2c, 0); 355 } 356 357 return 0; 358 } 359 360 void 361 gf100_gr_context_dtor(struct nvkm_object *object) 362 { 363 struct gf100_gr_chan *chan = (void *)object; 364 int i; 365 366 for (i = 0; i < ARRAY_SIZE(chan->data); i++) { 367 nvkm_gpuobj_unmap(&chan->data[i].vma); 368 nvkm_gpuobj_ref(NULL, &chan->data[i].mem); 369 } 370 371 nvkm_gpuobj_unmap(&chan->mmio_vma); 372 nvkm_gpuobj_ref(NULL, &chan->mmio); 373 374 nvkm_gr_context_destroy(&chan->base); 375 } 376 377 /******************************************************************************* 378 * PGRAPH register lists 379 ******************************************************************************/ 380 381 const struct gf100_gr_init 382 gf100_gr_init_main_0[] = { 383 { 0x400080, 1, 0x04, 0x003083c2 }, 384 { 0x400088, 1, 0x04, 0x00006fe7 }, 385 { 0x40008c, 1, 0x04, 0x00000000 }, 386 { 0x400090, 1, 0x04, 0x00000030 }, 387 { 0x40013c, 1, 0x04, 0x013901f7 }, 388 { 0x400140, 1, 0x04, 0x00000100 }, 389 { 0x400144, 1, 0x04, 0x00000000 }, 390 { 0x400148, 1, 0x04, 0x00000110 }, 391 { 0x400138, 1, 0x04, 0x00000000 }, 392 { 0x400130, 2, 0x04, 0x00000000 }, 393 { 0x400124, 1, 0x04, 0x00000002 }, 394 {} 395 }; 396 397 const struct gf100_gr_init 398 gf100_gr_init_fe_0[] = { 399 { 0x40415c, 1, 0x04, 0x00000000 }, 400 { 0x404170, 1, 0x04, 0x00000000 }, 401 {} 402 }; 403 404 const struct gf100_gr_init 405 gf100_gr_init_pri_0[] = { 406 { 0x404488, 2, 0x04, 0x00000000 }, 407 {} 408 }; 409 410 const struct gf100_gr_init 411 gf100_gr_init_rstr2d_0[] = { 412 { 0x407808, 1, 0x04, 0x00000000 }, 413 {} 414 }; 415 416 const struct gf100_gr_init 417 gf100_gr_init_pd_0[] = { 418 { 0x406024, 1, 0x04, 0x00000000 }, 419 {} 420 }; 421 422 const struct gf100_gr_init 423 gf100_gr_init_ds_0[] = { 424 { 0x405844, 1, 0x04, 0x00ffffff }, 425 { 0x405850, 1, 0x04, 0x00000000 }, 426 { 0x405908, 1, 0x04, 0x00000000 }, 427 {} 428 }; 429 430 const struct gf100_gr_init 431 gf100_gr_init_scc_0[] = { 432 { 0x40803c, 1, 0x04, 0x00000000 }, 433 {} 434 }; 435 436 const struct gf100_gr_init 437 gf100_gr_init_prop_0[] = { 438 { 0x4184a0, 1, 0x04, 0x00000000 }, 439 {} 440 }; 441 442 const struct gf100_gr_init 443 gf100_gr_init_gpc_unk_0[] = { 444 { 0x418604, 1, 0x04, 0x00000000 }, 445 { 0x418680, 1, 0x04, 0x00000000 }, 446 { 0x418714, 1, 0x04, 0x80000000 }, 447 { 0x418384, 1, 0x04, 0x00000000 }, 448 {} 449 }; 450 451 const struct gf100_gr_init 452 gf100_gr_init_setup_0[] = { 453 { 0x418814, 3, 0x04, 0x00000000 }, 454 {} 455 }; 456 457 const struct gf100_gr_init 458 gf100_gr_init_crstr_0[] = { 459 { 0x418b04, 1, 0x04, 0x00000000 }, 460 {} 461 }; 462 463 const struct gf100_gr_init 464 gf100_gr_init_setup_1[] = { 465 { 0x4188c8, 1, 0x04, 0x80000000 }, 466 { 0x4188cc, 1, 0x04, 0x00000000 }, 467 { 0x4188d0, 1, 0x04, 0x00010000 }, 468 { 0x4188d4, 1, 0x04, 0x00000001 }, 469 {} 470 }; 471 472 const struct gf100_gr_init 473 gf100_gr_init_zcull_0[] = { 474 { 0x418910, 1, 0x04, 0x00010001 }, 475 { 0x418914, 1, 0x04, 0x00000301 }, 476 { 0x418918, 1, 0x04, 0x00800000 }, 477 { 0x418980, 1, 0x04, 0x77777770 }, 478 { 0x418984, 3, 0x04, 0x77777777 }, 479 {} 480 }; 481 482 const struct gf100_gr_init 483 gf100_gr_init_gpm_0[] = { 484 { 0x418c04, 1, 0x04, 0x00000000 }, 485 { 0x418c88, 1, 0x04, 0x00000000 }, 486 {} 487 }; 488 489 const struct gf100_gr_init 490 gf100_gr_init_gpc_unk_1[] = { 491 { 0x418d00, 1, 0x04, 0x00000000 }, 492 { 0x418f08, 1, 0x04, 0x00000000 }, 493 { 0x418e00, 1, 0x04, 0x00000050 }, 494 { 0x418e08, 1, 0x04, 0x00000000 }, 495 {} 496 }; 497 498 const struct gf100_gr_init 499 gf100_gr_init_gcc_0[] = { 500 { 0x41900c, 1, 0x04, 0x00000000 }, 501 { 0x419018, 1, 0x04, 0x00000000 }, 502 {} 503 }; 504 505 const struct gf100_gr_init 506 gf100_gr_init_tpccs_0[] = { 507 { 0x419d08, 2, 0x04, 0x00000000 }, 508 { 0x419d10, 1, 0x04, 0x00000014 }, 509 {} 510 }; 511 512 const struct gf100_gr_init 513 gf100_gr_init_tex_0[] = { 514 { 0x419ab0, 1, 0x04, 0x00000000 }, 515 { 0x419ab8, 1, 0x04, 0x000000e7 }, 516 { 0x419abc, 2, 0x04, 0x00000000 }, 517 {} 518 }; 519 520 const struct gf100_gr_init 521 gf100_gr_init_pe_0[] = { 522 { 0x41980c, 3, 0x04, 0x00000000 }, 523 { 0x419844, 1, 0x04, 0x00000000 }, 524 { 0x41984c, 1, 0x04, 0x00005bc5 }, 525 { 0x419850, 4, 0x04, 0x00000000 }, 526 {} 527 }; 528 529 const struct gf100_gr_init 530 gf100_gr_init_l1c_0[] = { 531 { 0x419c98, 1, 0x04, 0x00000000 }, 532 { 0x419ca8, 1, 0x04, 0x80000000 }, 533 { 0x419cb4, 1, 0x04, 0x00000000 }, 534 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 535 { 0x419cbc, 1, 0x04, 0x28137606 }, 536 { 0x419cc0, 2, 0x04, 0x00000000 }, 537 {} 538 }; 539 540 const struct gf100_gr_init 541 gf100_gr_init_wwdx_0[] = { 542 { 0x419bd4, 1, 0x04, 0x00800000 }, 543 { 0x419bdc, 1, 0x04, 0x00000000 }, 544 {} 545 }; 546 547 const struct gf100_gr_init 548 gf100_gr_init_tpccs_1[] = { 549 { 0x419d2c, 1, 0x04, 0x00000000 }, 550 {} 551 }; 552 553 const struct gf100_gr_init 554 gf100_gr_init_mpc_0[] = { 555 { 0x419c0c, 1, 0x04, 0x00000000 }, 556 {} 557 }; 558 559 static const struct gf100_gr_init 560 gf100_gr_init_sm_0[] = { 561 { 0x419e00, 1, 0x04, 0x00000000 }, 562 { 0x419ea0, 1, 0x04, 0x00000000 }, 563 { 0x419ea4, 1, 0x04, 0x00000100 }, 564 { 0x419ea8, 1, 0x04, 0x00001100 }, 565 { 0x419eac, 1, 0x04, 0x11100702 }, 566 { 0x419eb0, 1, 0x04, 0x00000003 }, 567 { 0x419eb4, 4, 0x04, 0x00000000 }, 568 { 0x419ec8, 1, 0x04, 0x06060618 }, 569 { 0x419ed0, 1, 0x04, 0x0eff0e38 }, 570 { 0x419ed4, 1, 0x04, 0x011104f1 }, 571 { 0x419edc, 1, 0x04, 0x00000000 }, 572 { 0x419f00, 1, 0x04, 0x00000000 }, 573 { 0x419f2c, 1, 0x04, 0x00000000 }, 574 {} 575 }; 576 577 const struct gf100_gr_init 578 gf100_gr_init_be_0[] = { 579 { 0x40880c, 1, 0x04, 0x00000000 }, 580 { 0x408910, 9, 0x04, 0x00000000 }, 581 { 0x408950, 1, 0x04, 0x00000000 }, 582 { 0x408954, 1, 0x04, 0x0000ffff }, 583 { 0x408984, 1, 0x04, 0x00000000 }, 584 { 0x408988, 1, 0x04, 0x08040201 }, 585 { 0x40898c, 1, 0x04, 0x80402010 }, 586 {} 587 }; 588 589 const struct gf100_gr_init 590 gf100_gr_init_fe_1[] = { 591 { 0x4040f0, 1, 0x04, 0x00000000 }, 592 {} 593 }; 594 595 const struct gf100_gr_init 596 gf100_gr_init_pe_1[] = { 597 { 0x419880, 1, 0x04, 0x00000002 }, 598 {} 599 }; 600 601 static const struct gf100_gr_pack 602 gf100_gr_pack_mmio[] = { 603 { gf100_gr_init_main_0 }, 604 { gf100_gr_init_fe_0 }, 605 { gf100_gr_init_pri_0 }, 606 { gf100_gr_init_rstr2d_0 }, 607 { gf100_gr_init_pd_0 }, 608 { gf100_gr_init_ds_0 }, 609 { gf100_gr_init_scc_0 }, 610 { gf100_gr_init_prop_0 }, 611 { gf100_gr_init_gpc_unk_0 }, 612 { gf100_gr_init_setup_0 }, 613 { gf100_gr_init_crstr_0 }, 614 { gf100_gr_init_setup_1 }, 615 { gf100_gr_init_zcull_0 }, 616 { gf100_gr_init_gpm_0 }, 617 { gf100_gr_init_gpc_unk_1 }, 618 { gf100_gr_init_gcc_0 }, 619 { gf100_gr_init_tpccs_0 }, 620 { gf100_gr_init_tex_0 }, 621 { gf100_gr_init_pe_0 }, 622 { gf100_gr_init_l1c_0 }, 623 { gf100_gr_init_wwdx_0 }, 624 { gf100_gr_init_tpccs_1 }, 625 { gf100_gr_init_mpc_0 }, 626 { gf100_gr_init_sm_0 }, 627 { gf100_gr_init_be_0 }, 628 { gf100_gr_init_fe_1 }, 629 { gf100_gr_init_pe_1 }, 630 {} 631 }; 632 633 /******************************************************************************* 634 * PGRAPH engine/subdev functions 635 ******************************************************************************/ 636 637 void 638 gf100_gr_zbc_init(struct gf100_gr_priv *priv) 639 { 640 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 641 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 642 const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 643 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; 644 const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 645 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 646 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 647 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; 648 struct nvkm_ltc *ltc = nvkm_ltc(priv); 649 int index; 650 651 if (!priv->zbc_color[0].format) { 652 gf100_gr_zbc_color_get(priv, 1, & zero[0], &zero[4]); 653 gf100_gr_zbc_color_get(priv, 2, & one[0], &one[4]); 654 gf100_gr_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]); 655 gf100_gr_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]); 656 gf100_gr_zbc_depth_get(priv, 1, 0x00000000, 0x00000000); 657 gf100_gr_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000); 658 } 659 660 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 661 gf100_gr_zbc_clear_color(priv, index); 662 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 663 gf100_gr_zbc_clear_depth(priv, index); 664 } 665 666 void 667 gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 668 { 669 const struct gf100_gr_pack *pack; 670 const struct gf100_gr_init *init; 671 672 pack_for_each_init(init, pack, p) { 673 u32 next = init->addr + init->count * init->pitch; 674 u32 addr = init->addr; 675 while (addr < next) { 676 nv_wr32(priv, addr, init->data); 677 addr += init->pitch; 678 } 679 } 680 } 681 682 void 683 gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 684 { 685 const struct gf100_gr_pack *pack; 686 const struct gf100_gr_init *init; 687 u32 data = 0; 688 689 nv_wr32(priv, 0x400208, 0x80000000); 690 691 pack_for_each_init(init, pack, p) { 692 u32 next = init->addr + init->count * init->pitch; 693 u32 addr = init->addr; 694 695 if ((pack == p && init == p->init) || data != init->data) { 696 nv_wr32(priv, 0x400204, init->data); 697 data = init->data; 698 } 699 700 while (addr < next) { 701 nv_wr32(priv, 0x400200, addr); 702 nv_wait(priv, 0x400700, 0x00000002, 0x00000000); 703 addr += init->pitch; 704 } 705 } 706 707 nv_wr32(priv, 0x400208, 0x00000000); 708 } 709 710 void 711 gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 712 { 713 const struct gf100_gr_pack *pack; 714 const struct gf100_gr_init *init; 715 u32 data = 0; 716 717 pack_for_each_init(init, pack, p) { 718 u32 ctrl = 0x80000000 | pack->type; 719 u32 next = init->addr + init->count * init->pitch; 720 u32 addr = init->addr; 721 722 if ((pack == p && init == p->init) || data != init->data) { 723 nv_wr32(priv, 0x40448c, init->data); 724 data = init->data; 725 } 726 727 while (addr < next) { 728 nv_wr32(priv, 0x404488, ctrl | (addr << 14)); 729 addr += init->pitch; 730 } 731 } 732 } 733 734 u64 735 gf100_gr_units(struct nvkm_gr *gr) 736 { 737 struct gf100_gr_priv *priv = (void *)gr; 738 u64 cfg; 739 740 cfg = (u32)priv->gpc_nr; 741 cfg |= (u32)priv->tpc_total << 8; 742 cfg |= (u64)priv->rop_nr << 32; 743 744 return cfg; 745 } 746 747 static const struct nvkm_enum gk104_sked_error[] = { 748 { 7, "CONSTANT_BUFFER_SIZE" }, 749 { 9, "LOCAL_MEMORY_SIZE_POS" }, 750 { 10, "LOCAL_MEMORY_SIZE_NEG" }, 751 { 11, "WARP_CSTACK_SIZE" }, 752 { 12, "TOTAL_TEMP_SIZE" }, 753 { 13, "REGISTER_COUNT" }, 754 { 18, "TOTAL_THREADS" }, 755 { 20, "PROGRAM_OFFSET" }, 756 { 21, "SHARED_MEMORY_SIZE" }, 757 { 25, "SHARED_CONFIG_TOO_SMALL" }, 758 { 26, "TOTAL_REGISTER_COUNT" }, 759 {} 760 }; 761 762 static const struct nvkm_enum gf100_gpc_rop_error[] = { 763 { 1, "RT_PITCH_OVERRUN" }, 764 { 4, "RT_WIDTH_OVERRUN" }, 765 { 5, "RT_HEIGHT_OVERRUN" }, 766 { 7, "ZETA_STORAGE_TYPE_MISMATCH" }, 767 { 8, "RT_STORAGE_TYPE_MISMATCH" }, 768 { 10, "RT_LINEAR_MISMATCH" }, 769 {} 770 }; 771 772 static void 773 gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc) 774 { 775 u32 trap[4]; 776 int i; 777 778 trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420)); 779 trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434)); 780 trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438)); 781 trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c)); 782 783 nv_error(priv, "GPC%d/PROP trap:", gpc); 784 for (i = 0; i <= 29; ++i) { 785 if (!(trap[0] & (1 << i))) 786 continue; 787 pr_cont(" "); 788 nvkm_enum_print(gf100_gpc_rop_error, i); 789 } 790 pr_cont("\n"); 791 792 nv_error(priv, "x = %u, y = %u, format = %x, storage type = %x\n", 793 trap[1] & 0xffff, trap[1] >> 16, (trap[2] >> 8) & 0x3f, 794 trap[3] & 0xff); 795 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); 796 } 797 798 static const struct nvkm_enum gf100_mp_warp_error[] = { 799 { 0x00, "NO_ERROR" }, 800 { 0x01, "STACK_MISMATCH" }, 801 { 0x05, "MISALIGNED_PC" }, 802 { 0x08, "MISALIGNED_GPR" }, 803 { 0x09, "INVALID_OPCODE" }, 804 { 0x0d, "GPR_OUT_OF_BOUNDS" }, 805 { 0x0e, "MEM_OUT_OF_BOUNDS" }, 806 { 0x0f, "UNALIGNED_MEM_ACCESS" }, 807 { 0x11, "INVALID_PARAM" }, 808 {} 809 }; 810 811 static const struct nvkm_bitfield gf100_mp_global_error[] = { 812 { 0x00000004, "MULTIPLE_WARP_ERRORS" }, 813 { 0x00000008, "OUT_OF_STACK_SPACE" }, 814 {} 815 }; 816 817 static void 818 gf100_gr_trap_mp(struct gf100_gr_priv *priv, int gpc, int tpc) 819 { 820 u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x648)); 821 u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x650)); 822 823 nv_error(priv, "GPC%i/TPC%i/MP trap:", gpc, tpc); 824 nvkm_bitfield_print(gf100_mp_global_error, gerr); 825 if (werr) { 826 pr_cont(" "); 827 nvkm_enum_print(gf100_mp_warp_error, werr & 0xffff); 828 } 829 pr_cont("\n"); 830 831 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x648), 0x00000000); 832 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x650), gerr); 833 } 834 835 static void 836 gf100_gr_trap_tpc(struct gf100_gr_priv *priv, int gpc, int tpc) 837 { 838 u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508)); 839 840 if (stat & 0x00000001) { 841 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224)); 842 nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap); 843 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); 844 stat &= ~0x00000001; 845 } 846 847 if (stat & 0x00000002) { 848 gf100_gr_trap_mp(priv, gpc, tpc); 849 stat &= ~0x00000002; 850 } 851 852 if (stat & 0x00000004) { 853 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084)); 854 nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap); 855 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); 856 stat &= ~0x00000004; 857 } 858 859 if (stat & 0x00000008) { 860 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c)); 861 nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap); 862 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); 863 stat &= ~0x00000008; 864 } 865 866 if (stat) { 867 nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat); 868 } 869 } 870 871 static void 872 gf100_gr_trap_gpc(struct gf100_gr_priv *priv, int gpc) 873 { 874 u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90)); 875 int tpc; 876 877 if (stat & 0x00000001) { 878 gf100_gr_trap_gpc_rop(priv, gpc); 879 stat &= ~0x00000001; 880 } 881 882 if (stat & 0x00000002) { 883 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900)); 884 nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap); 885 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); 886 stat &= ~0x00000002; 887 } 888 889 if (stat & 0x00000004) { 890 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028)); 891 nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap); 892 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); 893 stat &= ~0x00000004; 894 } 895 896 if (stat & 0x00000008) { 897 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824)); 898 nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap); 899 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); 900 stat &= ~0x00000009; 901 } 902 903 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 904 u32 mask = 0x00010000 << tpc; 905 if (stat & mask) { 906 gf100_gr_trap_tpc(priv, gpc, tpc); 907 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask); 908 stat &= ~mask; 909 } 910 } 911 912 if (stat) { 913 nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat); 914 } 915 } 916 917 static void 918 gf100_gr_trap_intr(struct gf100_gr_priv *priv) 919 { 920 u32 trap = nv_rd32(priv, 0x400108); 921 int rop, gpc, i; 922 923 if (trap & 0x00000001) { 924 u32 stat = nv_rd32(priv, 0x404000); 925 nv_error(priv, "DISPATCH 0x%08x\n", stat); 926 nv_wr32(priv, 0x404000, 0xc0000000); 927 nv_wr32(priv, 0x400108, 0x00000001); 928 trap &= ~0x00000001; 929 } 930 931 if (trap & 0x00000002) { 932 u32 stat = nv_rd32(priv, 0x404600); 933 nv_error(priv, "M2MF 0x%08x\n", stat); 934 nv_wr32(priv, 0x404600, 0xc0000000); 935 nv_wr32(priv, 0x400108, 0x00000002); 936 trap &= ~0x00000002; 937 } 938 939 if (trap & 0x00000008) { 940 u32 stat = nv_rd32(priv, 0x408030); 941 nv_error(priv, "CCACHE 0x%08x\n", stat); 942 nv_wr32(priv, 0x408030, 0xc0000000); 943 nv_wr32(priv, 0x400108, 0x00000008); 944 trap &= ~0x00000008; 945 } 946 947 if (trap & 0x00000010) { 948 u32 stat = nv_rd32(priv, 0x405840); 949 nv_error(priv, "SHADER 0x%08x\n", stat); 950 nv_wr32(priv, 0x405840, 0xc0000000); 951 nv_wr32(priv, 0x400108, 0x00000010); 952 trap &= ~0x00000010; 953 } 954 955 if (trap & 0x00000040) { 956 u32 stat = nv_rd32(priv, 0x40601c); 957 nv_error(priv, "UNK6 0x%08x\n", stat); 958 nv_wr32(priv, 0x40601c, 0xc0000000); 959 nv_wr32(priv, 0x400108, 0x00000040); 960 trap &= ~0x00000040; 961 } 962 963 if (trap & 0x00000080) { 964 u32 stat = nv_rd32(priv, 0x404490); 965 nv_error(priv, "MACRO 0x%08x\n", stat); 966 nv_wr32(priv, 0x404490, 0xc0000000); 967 nv_wr32(priv, 0x400108, 0x00000080); 968 trap &= ~0x00000080; 969 } 970 971 if (trap & 0x00000100) { 972 u32 stat = nv_rd32(priv, 0x407020); 973 974 nv_error(priv, "SKED:"); 975 for (i = 0; i <= 29; ++i) { 976 if (!(stat & (1 << i))) 977 continue; 978 pr_cont(" "); 979 nvkm_enum_print(gk104_sked_error, i); 980 } 981 pr_cont("\n"); 982 983 if (stat & 0x3fffffff) 984 nv_wr32(priv, 0x407020, 0x40000000); 985 nv_wr32(priv, 0x400108, 0x00000100); 986 trap &= ~0x00000100; 987 } 988 989 if (trap & 0x01000000) { 990 u32 stat = nv_rd32(priv, 0x400118); 991 for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) { 992 u32 mask = 0x00000001 << gpc; 993 if (stat & mask) { 994 gf100_gr_trap_gpc(priv, gpc); 995 nv_wr32(priv, 0x400118, mask); 996 stat &= ~mask; 997 } 998 } 999 nv_wr32(priv, 0x400108, 0x01000000); 1000 trap &= ~0x01000000; 1001 } 1002 1003 if (trap & 0x02000000) { 1004 for (rop = 0; rop < priv->rop_nr; rop++) { 1005 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070)); 1006 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144)); 1007 nv_error(priv, "ROP%d 0x%08x 0x%08x\n", 1008 rop, statz, statc); 1009 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); 1010 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); 1011 } 1012 nv_wr32(priv, 0x400108, 0x02000000); 1013 trap &= ~0x02000000; 1014 } 1015 1016 if (trap) { 1017 nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap); 1018 nv_wr32(priv, 0x400108, trap); 1019 } 1020 } 1021 1022 static void 1023 gf100_gr_ctxctl_debug_unit(struct gf100_gr_priv *priv, u32 base) 1024 { 1025 nv_error(priv, "%06x - done 0x%08x\n", base, 1026 nv_rd32(priv, base + 0x400)); 1027 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 1028 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804), 1029 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c)); 1030 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 1031 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814), 1032 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c)); 1033 } 1034 1035 void 1036 gf100_gr_ctxctl_debug(struct gf100_gr_priv *priv) 1037 { 1038 u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff; 1039 u32 gpc; 1040 1041 gf100_gr_ctxctl_debug_unit(priv, 0x409000); 1042 for (gpc = 0; gpc < gpcnr; gpc++) 1043 gf100_gr_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000)); 1044 } 1045 1046 static void 1047 gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv) 1048 { 1049 u32 stat = nv_rd32(priv, 0x409c18); 1050 1051 if (stat & 0x00000001) { 1052 u32 code = nv_rd32(priv, 0x409814); 1053 if (code == E_BAD_FWMTHD) { 1054 u32 class = nv_rd32(priv, 0x409808); 1055 u32 addr = nv_rd32(priv, 0x40980c); 1056 u32 subc = (addr & 0x00070000) >> 16; 1057 u32 mthd = (addr & 0x00003ffc); 1058 u32 data = nv_rd32(priv, 0x409810); 1059 1060 nv_error(priv, "FECS MTHD subc %d class 0x%04x " 1061 "mthd 0x%04x data 0x%08x\n", 1062 subc, class, mthd, data); 1063 1064 nv_wr32(priv, 0x409c20, 0x00000001); 1065 stat &= ~0x00000001; 1066 } else { 1067 nv_error(priv, "FECS ucode error %d\n", code); 1068 } 1069 } 1070 1071 if (stat & 0x00080000) { 1072 nv_error(priv, "FECS watchdog timeout\n"); 1073 gf100_gr_ctxctl_debug(priv); 1074 nv_wr32(priv, 0x409c20, 0x00080000); 1075 stat &= ~0x00080000; 1076 } 1077 1078 if (stat) { 1079 nv_error(priv, "FECS 0x%08x\n", stat); 1080 gf100_gr_ctxctl_debug(priv); 1081 nv_wr32(priv, 0x409c20, stat); 1082 } 1083 } 1084 1085 static void 1086 gf100_gr_intr(struct nvkm_subdev *subdev) 1087 { 1088 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 1089 struct nvkm_engine *engine = nv_engine(subdev); 1090 struct nvkm_object *engctx; 1091 struct nvkm_handle *handle; 1092 struct gf100_gr_priv *priv = (void *)subdev; 1093 u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff; 1094 u32 stat = nv_rd32(priv, 0x400100); 1095 u32 addr = nv_rd32(priv, 0x400704); 1096 u32 mthd = (addr & 0x00003ffc); 1097 u32 subc = (addr & 0x00070000) >> 16; 1098 u32 data = nv_rd32(priv, 0x400708); 1099 u32 code = nv_rd32(priv, 0x400110); 1100 u32 class = nv_rd32(priv, 0x404200 + (subc * 4)); 1101 int chid; 1102 1103 engctx = nvkm_engctx_get(engine, inst); 1104 chid = pfifo->chid(pfifo, engctx); 1105 1106 if (stat & 0x00000010) { 1107 handle = nvkm_handle_get_class(engctx, class); 1108 if (!handle || nv_call(handle->object, mthd, data)) { 1109 nv_error(priv, 1110 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1111 chid, inst << 12, nvkm_client_name(engctx), 1112 subc, class, mthd, data); 1113 } 1114 nvkm_handle_put(handle); 1115 nv_wr32(priv, 0x400100, 0x00000010); 1116 stat &= ~0x00000010; 1117 } 1118 1119 if (stat & 0x00000020) { 1120 nv_error(priv, 1121 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1122 chid, inst << 12, nvkm_client_name(engctx), subc, 1123 class, mthd, data); 1124 nv_wr32(priv, 0x400100, 0x00000020); 1125 stat &= ~0x00000020; 1126 } 1127 1128 if (stat & 0x00100000) { 1129 nv_error(priv, "DATA_ERROR ["); 1130 nvkm_enum_print(nv50_data_error_names, code); 1131 pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1132 chid, inst << 12, nvkm_client_name(engctx), subc, 1133 class, mthd, data); 1134 nv_wr32(priv, 0x400100, 0x00100000); 1135 stat &= ~0x00100000; 1136 } 1137 1138 if (stat & 0x00200000) { 1139 nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12, 1140 nvkm_client_name(engctx)); 1141 gf100_gr_trap_intr(priv); 1142 nv_wr32(priv, 0x400100, 0x00200000); 1143 stat &= ~0x00200000; 1144 } 1145 1146 if (stat & 0x00080000) { 1147 gf100_gr_ctxctl_isr(priv); 1148 nv_wr32(priv, 0x400100, 0x00080000); 1149 stat &= ~0x00080000; 1150 } 1151 1152 if (stat) { 1153 nv_error(priv, "unknown stat 0x%08x\n", stat); 1154 nv_wr32(priv, 0x400100, stat); 1155 } 1156 1157 nv_wr32(priv, 0x400500, 0x00010001); 1158 nvkm_engctx_put(engctx); 1159 } 1160 1161 void 1162 gf100_gr_init_fw(struct gf100_gr_priv *priv, u32 fuc_base, 1163 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) 1164 { 1165 int i; 1166 1167 nv_wr32(priv, fuc_base + 0x01c0, 0x01000000); 1168 for (i = 0; i < data->size / 4; i++) 1169 nv_wr32(priv, fuc_base + 0x01c4, data->data[i]); 1170 1171 nv_wr32(priv, fuc_base + 0x0180, 0x01000000); 1172 for (i = 0; i < code->size / 4; i++) { 1173 if ((i & 0x3f) == 0) 1174 nv_wr32(priv, fuc_base + 0x0188, i >> 6); 1175 nv_wr32(priv, fuc_base + 0x0184, code->data[i]); 1176 } 1177 1178 /* code must be padded to 0x40 words */ 1179 for (; i & 0x3f; i++) 1180 nv_wr32(priv, fuc_base + 0x0184, 0); 1181 } 1182 1183 static void 1184 gf100_gr_init_csdata(struct gf100_gr_priv *priv, 1185 const struct gf100_gr_pack *pack, 1186 u32 falcon, u32 starstar, u32 base) 1187 { 1188 const struct gf100_gr_pack *iter; 1189 const struct gf100_gr_init *init; 1190 u32 addr = ~0, prev = ~0, xfer = 0; 1191 u32 star, temp; 1192 1193 nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar); 1194 star = nv_rd32(priv, falcon + 0x01c4); 1195 temp = nv_rd32(priv, falcon + 0x01c4); 1196 if (temp > star) 1197 star = temp; 1198 nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star); 1199 1200 pack_for_each_init(init, iter, pack) { 1201 u32 head = init->addr - base; 1202 u32 tail = head + init->count * init->pitch; 1203 while (head < tail) { 1204 if (head != prev + 4 || xfer >= 32) { 1205 if (xfer) { 1206 u32 data = ((--xfer << 26) | addr); 1207 nv_wr32(priv, falcon + 0x01c4, data); 1208 star += 4; 1209 } 1210 addr = head; 1211 xfer = 0; 1212 } 1213 prev = head; 1214 xfer = xfer + 1; 1215 head = head + init->pitch; 1216 } 1217 } 1218 1219 nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr); 1220 nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar); 1221 nv_wr32(priv, falcon + 0x01c4, star + 4); 1222 } 1223 1224 int 1225 gf100_gr_init_ctxctl(struct gf100_gr_priv *priv) 1226 { 1227 struct gf100_gr_oclass *oclass = (void *)nv_object(priv)->oclass; 1228 struct gf100_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass; 1229 int i; 1230 1231 if (priv->firmware) { 1232 /* load fuc microcode */ 1233 nvkm_mc(priv)->unk260(nvkm_mc(priv), 0); 1234 gf100_gr_init_fw(priv, 0x409000, &priv->fuc409c, 1235 &priv->fuc409d); 1236 gf100_gr_init_fw(priv, 0x41a000, &priv->fuc41ac, 1237 &priv->fuc41ad); 1238 nvkm_mc(priv)->unk260(nvkm_mc(priv), 1); 1239 1240 /* start both of them running */ 1241 nv_wr32(priv, 0x409840, 0xffffffff); 1242 nv_wr32(priv, 0x41a10c, 0x00000000); 1243 nv_wr32(priv, 0x40910c, 0x00000000); 1244 nv_wr32(priv, 0x41a100, 0x00000002); 1245 nv_wr32(priv, 0x409100, 0x00000002); 1246 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) 1247 nv_warn(priv, "0x409800 wait failed\n"); 1248 1249 nv_wr32(priv, 0x409840, 0xffffffff); 1250 nv_wr32(priv, 0x409500, 0x7fffffff); 1251 nv_wr32(priv, 0x409504, 0x00000021); 1252 1253 nv_wr32(priv, 0x409840, 0xffffffff); 1254 nv_wr32(priv, 0x409500, 0x00000000); 1255 nv_wr32(priv, 0x409504, 0x00000010); 1256 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1257 nv_error(priv, "fuc09 req 0x10 timeout\n"); 1258 return -EBUSY; 1259 } 1260 priv->size = nv_rd32(priv, 0x409800); 1261 1262 nv_wr32(priv, 0x409840, 0xffffffff); 1263 nv_wr32(priv, 0x409500, 0x00000000); 1264 nv_wr32(priv, 0x409504, 0x00000016); 1265 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1266 nv_error(priv, "fuc09 req 0x16 timeout\n"); 1267 return -EBUSY; 1268 } 1269 1270 nv_wr32(priv, 0x409840, 0xffffffff); 1271 nv_wr32(priv, 0x409500, 0x00000000); 1272 nv_wr32(priv, 0x409504, 0x00000025); 1273 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1274 nv_error(priv, "fuc09 req 0x25 timeout\n"); 1275 return -EBUSY; 1276 } 1277 1278 if (nv_device(priv)->chipset >= 0xe0) { 1279 nv_wr32(priv, 0x409800, 0x00000000); 1280 nv_wr32(priv, 0x409500, 0x00000001); 1281 nv_wr32(priv, 0x409504, 0x00000030); 1282 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1283 nv_error(priv, "fuc09 req 0x30 timeout\n"); 1284 return -EBUSY; 1285 } 1286 1287 nv_wr32(priv, 0x409810, 0xb00095c8); 1288 nv_wr32(priv, 0x409800, 0x00000000); 1289 nv_wr32(priv, 0x409500, 0x00000001); 1290 nv_wr32(priv, 0x409504, 0x00000031); 1291 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1292 nv_error(priv, "fuc09 req 0x31 timeout\n"); 1293 return -EBUSY; 1294 } 1295 1296 nv_wr32(priv, 0x409810, 0x00080420); 1297 nv_wr32(priv, 0x409800, 0x00000000); 1298 nv_wr32(priv, 0x409500, 0x00000001); 1299 nv_wr32(priv, 0x409504, 0x00000032); 1300 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1301 nv_error(priv, "fuc09 req 0x32 timeout\n"); 1302 return -EBUSY; 1303 } 1304 1305 nv_wr32(priv, 0x409614, 0x00000070); 1306 nv_wr32(priv, 0x409614, 0x00000770); 1307 nv_wr32(priv, 0x40802c, 0x00000001); 1308 } 1309 1310 if (priv->data == NULL) { 1311 int ret = gf100_grctx_generate(priv); 1312 if (ret) { 1313 nv_error(priv, "failed to construct context\n"); 1314 return ret; 1315 } 1316 } 1317 1318 return 0; 1319 } else 1320 if (!oclass->fecs.ucode) { 1321 return -ENOSYS; 1322 } 1323 1324 /* load HUB microcode */ 1325 nvkm_mc(priv)->unk260(nvkm_mc(priv), 0); 1326 nv_wr32(priv, 0x4091c0, 0x01000000); 1327 for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++) 1328 nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]); 1329 1330 nv_wr32(priv, 0x409180, 0x01000000); 1331 for (i = 0; i < oclass->fecs.ucode->code.size / 4; i++) { 1332 if ((i & 0x3f) == 0) 1333 nv_wr32(priv, 0x409188, i >> 6); 1334 nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]); 1335 } 1336 1337 /* load GPC microcode */ 1338 nv_wr32(priv, 0x41a1c0, 0x01000000); 1339 for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++) 1340 nv_wr32(priv, 0x41a1c4, oclass->gpccs.ucode->data.data[i]); 1341 1342 nv_wr32(priv, 0x41a180, 0x01000000); 1343 for (i = 0; i < oclass->gpccs.ucode->code.size / 4; i++) { 1344 if ((i & 0x3f) == 0) 1345 nv_wr32(priv, 0x41a188, i >> 6); 1346 nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]); 1347 } 1348 nvkm_mc(priv)->unk260(nvkm_mc(priv), 1); 1349 1350 /* load register lists */ 1351 gf100_gr_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000); 1352 gf100_gr_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000); 1353 gf100_gr_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800); 1354 gf100_gr_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00); 1355 1356 /* start HUB ucode running, it'll init the GPCs */ 1357 nv_wr32(priv, 0x40910c, 0x00000000); 1358 nv_wr32(priv, 0x409100, 0x00000002); 1359 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) { 1360 nv_error(priv, "HUB_INIT timed out\n"); 1361 gf100_gr_ctxctl_debug(priv); 1362 return -EBUSY; 1363 } 1364 1365 priv->size = nv_rd32(priv, 0x409804); 1366 if (priv->data == NULL) { 1367 int ret = gf100_grctx_generate(priv); 1368 if (ret) { 1369 nv_error(priv, "failed to construct context\n"); 1370 return ret; 1371 } 1372 } 1373 1374 return 0; 1375 } 1376 1377 int 1378 gf100_gr_init(struct nvkm_object *object) 1379 { 1380 struct gf100_gr_oclass *oclass = (void *)object->oclass; 1381 struct gf100_gr_priv *priv = (void *)object; 1382 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); 1383 u32 data[TPC_MAX / 8] = {}; 1384 u8 tpcnr[GPC_MAX]; 1385 int gpc, tpc, rop; 1386 int ret, i; 1387 1388 ret = nvkm_gr_init(&priv->base); 1389 if (ret) 1390 return ret; 1391 1392 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000); 1393 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000); 1394 nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000); 1395 nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000); 1396 nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000); 1397 nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000); 1398 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); 1399 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); 1400 1401 gf100_gr_mmio(priv, oclass->mmio); 1402 1403 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 1404 for (i = 0, gpc = -1; i < priv->tpc_total; i++) { 1405 do { 1406 gpc = (gpc + 1) % priv->gpc_nr; 1407 } while (!tpcnr[gpc]); 1408 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; 1409 1410 data[i / 8] |= tpc << ((i % 8) * 4); 1411 } 1412 1413 nv_wr32(priv, GPC_BCAST(0x0980), data[0]); 1414 nv_wr32(priv, GPC_BCAST(0x0984), data[1]); 1415 nv_wr32(priv, GPC_BCAST(0x0988), data[2]); 1416 nv_wr32(priv, GPC_BCAST(0x098c), data[3]); 1417 1418 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 1419 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), 1420 priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]); 1421 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | 1422 priv->tpc_total); 1423 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); 1424 } 1425 1426 if (nv_device(priv)->chipset != 0xd7) 1427 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918); 1428 else 1429 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); 1430 1431 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); 1432 1433 nv_wr32(priv, 0x400500, 0x00010001); 1434 1435 nv_wr32(priv, 0x400100, 0xffffffff); 1436 nv_wr32(priv, 0x40013c, 0xffffffff); 1437 1438 nv_wr32(priv, 0x409c24, 0x000f0000); 1439 nv_wr32(priv, 0x404000, 0xc0000000); 1440 nv_wr32(priv, 0x404600, 0xc0000000); 1441 nv_wr32(priv, 0x408030, 0xc0000000); 1442 nv_wr32(priv, 0x40601c, 0xc0000000); 1443 nv_wr32(priv, 0x404490, 0xc0000000); 1444 nv_wr32(priv, 0x406018, 0xc0000000); 1445 nv_wr32(priv, 0x405840, 0xc0000000); 1446 nv_wr32(priv, 0x405844, 0x00ffffff); 1447 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); 1448 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000); 1449 1450 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 1451 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); 1452 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); 1453 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); 1454 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); 1455 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 1456 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 1457 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 1458 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 1459 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 1460 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 1461 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 1462 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 1463 } 1464 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 1465 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 1466 } 1467 1468 for (rop = 0; rop < priv->rop_nr; rop++) { 1469 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); 1470 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); 1471 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff); 1472 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff); 1473 } 1474 1475 nv_wr32(priv, 0x400108, 0xffffffff); 1476 nv_wr32(priv, 0x400138, 0xffffffff); 1477 nv_wr32(priv, 0x400118, 0xffffffff); 1478 nv_wr32(priv, 0x400130, 0xffffffff); 1479 nv_wr32(priv, 0x40011c, 0xffffffff); 1480 nv_wr32(priv, 0x400134, 0xffffffff); 1481 1482 nv_wr32(priv, 0x400054, 0x34ce3464); 1483 1484 gf100_gr_zbc_init(priv); 1485 1486 return gf100_gr_init_ctxctl(priv); 1487 } 1488 1489 static void 1490 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) 1491 { 1492 kfree(fuc->data); 1493 fuc->data = NULL; 1494 } 1495 1496 int 1497 gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname, 1498 struct gf100_gr_fuc *fuc) 1499 { 1500 struct nvkm_device *device = nv_device(priv); 1501 const struct firmware *fw; 1502 char f[32]; 1503 int ret; 1504 1505 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); 1506 ret = request_firmware(&fw, f, nv_device_base(device)); 1507 if (ret) { 1508 snprintf(f, sizeof(f), "nouveau/%s", fwname); 1509 ret = request_firmware(&fw, f, nv_device_base(device)); 1510 if (ret) { 1511 nv_error(priv, "failed to load %s\n", fwname); 1512 return ret; 1513 } 1514 } 1515 1516 fuc->size = fw->size; 1517 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 1518 release_firmware(fw); 1519 return (fuc->data != NULL) ? 0 : -ENOMEM; 1520 } 1521 1522 void 1523 gf100_gr_dtor(struct nvkm_object *object) 1524 { 1525 struct gf100_gr_priv *priv = (void *)object; 1526 1527 kfree(priv->data); 1528 1529 gf100_gr_dtor_fw(&priv->fuc409c); 1530 gf100_gr_dtor_fw(&priv->fuc409d); 1531 gf100_gr_dtor_fw(&priv->fuc41ac); 1532 gf100_gr_dtor_fw(&priv->fuc41ad); 1533 1534 nvkm_gpuobj_ref(NULL, &priv->unk4188b8); 1535 nvkm_gpuobj_ref(NULL, &priv->unk4188b4); 1536 1537 nvkm_gr_destroy(&priv->base); 1538 } 1539 1540 int 1541 gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1542 struct nvkm_oclass *bclass, void *data, u32 size, 1543 struct nvkm_object **pobject) 1544 { 1545 struct gf100_gr_oclass *oclass = (void *)bclass; 1546 struct nvkm_device *device = nv_device(parent); 1547 struct gf100_gr_priv *priv; 1548 bool use_ext_fw, enable; 1549 int ret, i, j; 1550 1551 use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW", 1552 oclass->fecs.ucode == NULL); 1553 enable = use_ext_fw || oclass->fecs.ucode != NULL; 1554 1555 ret = nvkm_gr_create(parent, engine, bclass, enable, &priv); 1556 *pobject = nv_object(priv); 1557 if (ret) 1558 return ret; 1559 1560 nv_subdev(priv)->unit = 0x08001000; 1561 nv_subdev(priv)->intr = gf100_gr_intr; 1562 1563 priv->base.units = gf100_gr_units; 1564 1565 if (use_ext_fw) { 1566 nv_info(priv, "using external firmware\n"); 1567 if (gf100_gr_ctor_fw(priv, "fuc409c", &priv->fuc409c) || 1568 gf100_gr_ctor_fw(priv, "fuc409d", &priv->fuc409d) || 1569 gf100_gr_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) || 1570 gf100_gr_ctor_fw(priv, "fuc41ad", &priv->fuc41ad)) 1571 return -ENODEV; 1572 priv->firmware = true; 1573 } 1574 1575 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0, 1576 &priv->unk4188b4); 1577 if (ret) 1578 return ret; 1579 1580 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0, 1581 &priv->unk4188b8); 1582 if (ret) 1583 return ret; 1584 1585 for (i = 0; i < 0x1000; i += 4) { 1586 nv_wo32(priv->unk4188b4, i, 0x00000010); 1587 nv_wo32(priv->unk4188b8, i, 0x00000010); 1588 } 1589 1590 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16; 1591 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f; 1592 for (i = 0; i < priv->gpc_nr; i++) { 1593 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); 1594 priv->tpc_total += priv->tpc_nr[i]; 1595 priv->ppc_nr[i] = oclass->ppc_nr; 1596 for (j = 0; j < priv->ppc_nr[i]; j++) { 1597 u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4))); 1598 priv->ppc_tpc_nr[i][j] = hweight8(mask); 1599 } 1600 } 1601 1602 /*XXX: these need figuring out... though it might not even matter */ 1603 switch (nv_device(priv)->chipset) { 1604 case 0xc0: 1605 if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */ 1606 priv->magic_not_rop_nr = 0x07; 1607 } else 1608 if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */ 1609 priv->magic_not_rop_nr = 0x05; 1610 } else 1611 if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */ 1612 priv->magic_not_rop_nr = 0x06; 1613 } 1614 break; 1615 case 0xc3: /* 450, 4/0/0/0, 2 */ 1616 priv->magic_not_rop_nr = 0x03; 1617 break; 1618 case 0xc4: /* 460, 3/4/0/0, 4 */ 1619 priv->magic_not_rop_nr = 0x01; 1620 break; 1621 case 0xc1: /* 2/0/0/0, 1 */ 1622 priv->magic_not_rop_nr = 0x01; 1623 break; 1624 case 0xc8: /* 4/4/3/4, 5 */ 1625 priv->magic_not_rop_nr = 0x06; 1626 break; 1627 case 0xce: /* 4/4/0/0, 4 */ 1628 priv->magic_not_rop_nr = 0x03; 1629 break; 1630 case 0xcf: /* 4/0/0/0, 3 */ 1631 priv->magic_not_rop_nr = 0x03; 1632 break; 1633 case 0xd7: 1634 case 0xd9: /* 1/0/0/0, 1 */ 1635 priv->magic_not_rop_nr = 0x01; 1636 break; 1637 } 1638 1639 nv_engine(priv)->cclass = *oclass->cclass; 1640 nv_engine(priv)->sclass = oclass->sclass; 1641 return 0; 1642 } 1643 1644 #include "fuc/hubgf100.fuc3.h" 1645 1646 struct gf100_gr_ucode 1647 gf100_gr_fecs_ucode = { 1648 .code.data = gf100_grhub_code, 1649 .code.size = sizeof(gf100_grhub_code), 1650 .data.data = gf100_grhub_data, 1651 .data.size = sizeof(gf100_grhub_data), 1652 }; 1653 1654 #include "fuc/gpcgf100.fuc3.h" 1655 1656 struct gf100_gr_ucode 1657 gf100_gr_gpccs_ucode = { 1658 .code.data = gf100_grgpc_code, 1659 .code.size = sizeof(gf100_grgpc_code), 1660 .data.data = gf100_grgpc_data, 1661 .data.size = sizeof(gf100_grgpc_data), 1662 }; 1663 1664 struct nvkm_oclass * 1665 gf100_gr_oclass = &(struct gf100_gr_oclass) { 1666 .base.handle = NV_ENGINE(GR, 0xc0), 1667 .base.ofuncs = &(struct nvkm_ofuncs) { 1668 .ctor = gf100_gr_ctor, 1669 .dtor = gf100_gr_dtor, 1670 .init = gf100_gr_init, 1671 .fini = _nvkm_gr_fini, 1672 }, 1673 .cclass = &gf100_grctx_oclass, 1674 .sclass = gf100_gr_sclass, 1675 .mmio = gf100_gr_pack_mmio, 1676 .fecs.ucode = &gf100_gr_fecs_ucode, 1677 .gpccs.ucode = &gf100_gr_gpccs_ucode, 1678 }.base; 1679