1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 26 #include <core/client.h> 27 #include <core/engctx.h> 28 #include <core/enum.h> 29 #include <core/handle.h> 30 #include <subdev/bar.h> 31 #include <subdev/fb.h> 32 #include <subdev/mmu.h> 33 #include <subdev/timer.h> 34 35 #include <nvif/class.h> 36 #include <nvif/unpack.h> 37 38 #define _(a,b) { (a), ((1ULL << (a)) | (b)) } 39 static const struct { 40 u64 subdev; 41 u64 mask; 42 } fifo_engine[] = { 43 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) | 44 (1ULL << NVDEV_ENGINE_CE2)), 45 _(NVDEV_ENGINE_MSPDEC , 0), 46 _(NVDEV_ENGINE_MSPPP , 0), 47 _(NVDEV_ENGINE_MSVLD , 0), 48 _(NVDEV_ENGINE_CE0 , 0), 49 _(NVDEV_ENGINE_CE1 , 0), 50 _(NVDEV_ENGINE_MSENC , 0), 51 }; 52 #undef _ 53 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) 54 55 struct gk104_fifo_engn { 56 struct nvkm_gpuobj *runlist[2]; 57 int cur_runlist; 58 wait_queue_head_t wait; 59 }; 60 61 struct gk104_fifo { 62 struct nvkm_fifo base; 63 64 struct work_struct fault; 65 u64 mask; 66 67 struct gk104_fifo_engn engine[FIFO_ENGINE_NR]; 68 struct { 69 struct nvkm_gpuobj *mem; 70 struct nvkm_vma bar; 71 } user; 72 int spoon_nr; 73 }; 74 75 struct gk104_fifo_base { 76 struct nvkm_fifo_base base; 77 struct nvkm_gpuobj *pgd; 78 struct nvkm_vm *vm; 79 }; 80 81 struct gk104_fifo_chan { 82 struct nvkm_fifo_chan base; 83 u32 engine; 84 enum { 85 STOPPED, 86 RUNNING, 87 KILLED 88 } state; 89 }; 90 91 /******************************************************************************* 92 * FIFO channel objects 93 ******************************************************************************/ 94 95 static void 96 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) 97 { 98 struct nvkm_bar *bar = nvkm_bar(fifo); 99 struct gk104_fifo_engn *engn = &fifo->engine[engine]; 100 struct nvkm_gpuobj *cur; 101 int i, p; 102 103 mutex_lock(&nv_subdev(fifo)->mutex); 104 cur = engn->runlist[engn->cur_runlist]; 105 engn->cur_runlist = !engn->cur_runlist; 106 107 for (i = 0, p = 0; i < fifo->base.max; i++) { 108 struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i]; 109 if (chan && chan->state == RUNNING && chan->engine == engine) { 110 nv_wo32(cur, p + 0, i); 111 nv_wo32(cur, p + 4, 0x00000000); 112 p += 8; 113 } 114 } 115 bar->flush(bar); 116 117 nv_wr32(fifo, 0x002270, cur->addr >> 12); 118 nv_wr32(fifo, 0x002274, (engine << 20) | (p >> 3)); 119 120 if (wait_event_timeout(engn->wait, !(nv_rd32(fifo, 0x002284 + 121 (engine * 0x08)) & 0x00100000), 122 msecs_to_jiffies(2000)) == 0) 123 nv_error(fifo, "runlist %d update timeout\n", engine); 124 mutex_unlock(&nv_subdev(fifo)->mutex); 125 } 126 127 static int 128 gk104_fifo_context_attach(struct nvkm_object *parent, 129 struct nvkm_object *object) 130 { 131 struct nvkm_bar *bar = nvkm_bar(parent); 132 struct gk104_fifo_base *base = (void *)parent->parent; 133 struct nvkm_engctx *ectx = (void *)object; 134 u32 addr; 135 int ret; 136 137 switch (nv_engidx(object->engine)) { 138 case NVDEV_ENGINE_SW : 139 return 0; 140 case NVDEV_ENGINE_CE0: 141 case NVDEV_ENGINE_CE1: 142 case NVDEV_ENGINE_CE2: 143 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; 144 return 0; 145 case NVDEV_ENGINE_GR : addr = 0x0210; break; 146 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; 147 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; 148 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; 149 default: 150 return -EINVAL; 151 } 152 153 if (!ectx->vma.node) { 154 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, 155 NV_MEM_ACCESS_RW, &ectx->vma); 156 if (ret) 157 return ret; 158 159 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; 160 } 161 162 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); 163 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); 164 bar->flush(bar); 165 return 0; 166 } 167 168 static int 169 gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) 170 { 171 struct nvkm_object *obj = (void *)chan; 172 struct gk104_fifo *fifo = (void *)obj->engine; 173 174 nv_wr32(fifo, 0x002634, chan->base.chid); 175 if (!nv_wait(fifo, 0x002634, 0x100000, 0x000000)) { 176 nv_error(fifo, "channel %d [%s] kick timeout\n", 177 chan->base.chid, nvkm_client_name(chan)); 178 return -EBUSY; 179 } 180 181 return 0; 182 } 183 184 static int 185 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, 186 struct nvkm_object *object) 187 { 188 struct nvkm_bar *bar = nvkm_bar(parent); 189 struct gk104_fifo_base *base = (void *)parent->parent; 190 struct gk104_fifo_chan *chan = (void *)parent; 191 u32 addr; 192 int ret; 193 194 switch (nv_engidx(object->engine)) { 195 case NVDEV_ENGINE_SW : return 0; 196 case NVDEV_ENGINE_CE0 : 197 case NVDEV_ENGINE_CE1 : 198 case NVDEV_ENGINE_CE2 : addr = 0x0000; break; 199 case NVDEV_ENGINE_GR : addr = 0x0210; break; 200 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; 201 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; 202 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; 203 default: 204 return -EINVAL; 205 } 206 207 ret = gk104_fifo_chan_kick(chan); 208 if (ret && suspend) 209 return ret; 210 211 if (addr) { 212 nv_wo32(base, addr + 0x00, 0x00000000); 213 nv_wo32(base, addr + 0x04, 0x00000000); 214 bar->flush(bar); 215 } 216 217 return 0; 218 } 219 220 static int 221 gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 222 struct nvkm_oclass *oclass, void *data, u32 size, 223 struct nvkm_object **pobject) 224 { 225 union { 226 struct kepler_channel_gpfifo_a_v0 v0; 227 } *args = data; 228 struct nvkm_bar *bar = nvkm_bar(parent); 229 struct gk104_fifo *fifo = (void *)engine; 230 struct gk104_fifo_base *base = (void *)parent; 231 struct gk104_fifo_chan *chan; 232 u64 usermem, ioffset, ilength; 233 int ret, i; 234 235 nv_ioctl(parent, "create channel gpfifo size %d\n", size); 236 if (nvif_unpack(args->v0, 0, 0, false)) { 237 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " 238 "ioffset %016llx ilength %08x engine %08x\n", 239 args->v0.version, args->v0.pushbuf, args->v0.ioffset, 240 args->v0.ilength, args->v0.engine); 241 } else 242 return ret; 243 244 for (i = 0; i < FIFO_ENGINE_NR; i++) { 245 if (args->v0.engine & (1 << i)) { 246 if (nvkm_engine(parent, fifo_engine[i].subdev)) { 247 args->v0.engine = (1 << i); 248 break; 249 } 250 } 251 } 252 253 if (i == FIFO_ENGINE_NR) { 254 nv_error(fifo, "unsupported engines 0x%08x\n", args->v0.engine); 255 return -ENODEV; 256 } 257 258 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, 259 fifo->user.bar.offset, 0x200, 260 args->v0.pushbuf, 261 fifo_engine[i].mask, &chan); 262 *pobject = nv_object(chan); 263 if (ret) 264 return ret; 265 266 args->v0.chid = chan->base.chid; 267 268 nv_parent(chan)->context_attach = gk104_fifo_context_attach; 269 nv_parent(chan)->context_detach = gk104_fifo_context_detach; 270 chan->engine = i; 271 272 usermem = chan->base.chid * 0x200; 273 ioffset = args->v0.ioffset; 274 ilength = order_base_2(args->v0.ilength / 8); 275 276 for (i = 0; i < 0x200; i += 4) 277 nv_wo32(fifo->user.mem, usermem + i, 0x00000000); 278 279 nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); 280 nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); 281 nv_wo32(base, 0x10, 0x0000face); 282 nv_wo32(base, 0x30, 0xfffff902); 283 nv_wo32(base, 0x48, lower_32_bits(ioffset)); 284 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); 285 nv_wo32(base, 0x84, 0x20400000); 286 nv_wo32(base, 0x94, 0x30000001); 287 nv_wo32(base, 0x9c, 0x00000100); 288 nv_wo32(base, 0xac, 0x0000001f); 289 nv_wo32(base, 0xe8, chan->base.chid); 290 nv_wo32(base, 0xb8, 0xf8000000); 291 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ 292 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ 293 bar->flush(bar); 294 return 0; 295 } 296 297 static int 298 gk104_fifo_chan_init(struct nvkm_object *object) 299 { 300 struct nvkm_gpuobj *base = nv_gpuobj(object->parent); 301 struct gk104_fifo *fifo = (void *)object->engine; 302 struct gk104_fifo_chan *chan = (void *)object; 303 u32 chid = chan->base.chid; 304 int ret; 305 306 ret = nvkm_fifo_channel_init(&chan->base); 307 if (ret) 308 return ret; 309 310 nv_mask(fifo, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); 311 nv_wr32(fifo, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); 312 313 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { 314 nv_mask(fifo, 0x800004 + (chid * 8), 0x00000400, 0x00000400); 315 gk104_fifo_runlist_update(fifo, chan->engine); 316 nv_mask(fifo, 0x800004 + (chid * 8), 0x00000400, 0x00000400); 317 } 318 319 return 0; 320 } 321 322 static int 323 gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) 324 { 325 struct gk104_fifo *fifo = (void *)object->engine; 326 struct gk104_fifo_chan *chan = (void *)object; 327 u32 chid = chan->base.chid; 328 329 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { 330 nv_mask(fifo, 0x800004 + (chid * 8), 0x00000800, 0x00000800); 331 gk104_fifo_runlist_update(fifo, chan->engine); 332 } 333 334 nv_wr32(fifo, 0x800000 + (chid * 8), 0x00000000); 335 return nvkm_fifo_channel_fini(&chan->base, suspend); 336 } 337 338 struct nvkm_ofuncs 339 gk104_fifo_chan_ofuncs = { 340 .ctor = gk104_fifo_chan_ctor, 341 .dtor = _nvkm_fifo_channel_dtor, 342 .init = gk104_fifo_chan_init, 343 .fini = gk104_fifo_chan_fini, 344 .map = _nvkm_fifo_channel_map, 345 .rd32 = _nvkm_fifo_channel_rd32, 346 .wr32 = _nvkm_fifo_channel_wr32, 347 .ntfy = _nvkm_fifo_channel_ntfy 348 }; 349 350 static struct nvkm_oclass 351 gk104_fifo_sclass[] = { 352 { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, 353 {} 354 }; 355 356 /******************************************************************************* 357 * FIFO context - instmem heap and vm setup 358 ******************************************************************************/ 359 360 static int 361 gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 362 struct nvkm_oclass *oclass, void *data, u32 size, 363 struct nvkm_object **pobject) 364 { 365 struct gk104_fifo_base *base; 366 int ret; 367 368 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 369 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base); 370 *pobject = nv_object(base); 371 if (ret) 372 return ret; 373 374 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, 375 &base->pgd); 376 if (ret) 377 return ret; 378 379 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); 380 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); 381 nv_wo32(base, 0x0208, 0xffffffff); 382 nv_wo32(base, 0x020c, 0x000000ff); 383 384 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); 385 if (ret) 386 return ret; 387 388 return 0; 389 } 390 391 static void 392 gk104_fifo_context_dtor(struct nvkm_object *object) 393 { 394 struct gk104_fifo_base *base = (void *)object; 395 nvkm_vm_ref(NULL, &base->vm, base->pgd); 396 nvkm_gpuobj_ref(NULL, &base->pgd); 397 nvkm_fifo_context_destroy(&base->base); 398 } 399 400 static struct nvkm_oclass 401 gk104_fifo_cclass = { 402 .handle = NV_ENGCTX(FIFO, 0xe0), 403 .ofuncs = &(struct nvkm_ofuncs) { 404 .ctor = gk104_fifo_context_ctor, 405 .dtor = gk104_fifo_context_dtor, 406 .init = _nvkm_fifo_context_init, 407 .fini = _nvkm_fifo_context_fini, 408 .rd32 = _nvkm_fifo_context_rd32, 409 .wr32 = _nvkm_fifo_context_wr32, 410 }, 411 }; 412 413 /******************************************************************************* 414 * PFIFO engine 415 ******************************************************************************/ 416 417 static inline int 418 gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn) 419 { 420 switch (engn) { 421 case NVDEV_ENGINE_GR : 422 case NVDEV_ENGINE_CE2 : engn = 0; break; 423 case NVDEV_ENGINE_MSVLD : engn = 1; break; 424 case NVDEV_ENGINE_MSPPP : engn = 2; break; 425 case NVDEV_ENGINE_MSPDEC: engn = 3; break; 426 case NVDEV_ENGINE_CE0 : engn = 4; break; 427 case NVDEV_ENGINE_CE1 : engn = 5; break; 428 case NVDEV_ENGINE_MSENC : engn = 6; break; 429 default: 430 return -1; 431 } 432 433 return engn; 434 } 435 436 static inline struct nvkm_engine * 437 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) 438 { 439 if (engn >= ARRAY_SIZE(fifo_engine)) 440 return NULL; 441 return nvkm_engine(fifo, fifo_engine[engn].subdev); 442 } 443 444 static void 445 gk104_fifo_recover_work(struct work_struct *work) 446 { 447 struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault); 448 struct nvkm_object *engine; 449 unsigned long flags; 450 u32 engn, engm = 0; 451 u64 mask, todo; 452 453 spin_lock_irqsave(&fifo->base.lock, flags); 454 mask = fifo->mask; 455 fifo->mask = 0ULL; 456 spin_unlock_irqrestore(&fifo->base.lock, flags); 457 458 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) 459 engm |= 1 << gk104_fifo_engidx(fifo, engn); 460 nv_mask(fifo, 0x002630, engm, engm); 461 462 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { 463 if ((engine = (void *)nvkm_engine(fifo, engn))) { 464 nv_ofuncs(engine)->fini(engine, false); 465 WARN_ON(nv_ofuncs(engine)->init(engine)); 466 } 467 gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn)); 468 } 469 470 nv_wr32(fifo, 0x00262c, engm); 471 nv_mask(fifo, 0x002630, engm, 0x00000000); 472 } 473 474 static void 475 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, 476 struct gk104_fifo_chan *chan) 477 { 478 u32 chid = chan->base.chid; 479 unsigned long flags; 480 481 nv_error(fifo, "%s engine fault on channel %d, recovering...\n", 482 nv_subdev(engine)->name, chid); 483 484 nv_mask(fifo, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); 485 chan->state = KILLED; 486 487 spin_lock_irqsave(&fifo->base.lock, flags); 488 fifo->mask |= 1ULL << nv_engidx(engine); 489 spin_unlock_irqrestore(&fifo->base.lock, flags); 490 schedule_work(&fifo->fault); 491 } 492 493 static int 494 gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data) 495 { 496 struct gk104_fifo_chan *chan = NULL; 497 struct nvkm_handle *bind; 498 unsigned long flags; 499 int ret = -EINVAL; 500 501 spin_lock_irqsave(&fifo->base.lock, flags); 502 if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) 503 chan = (void *)fifo->base.channel[chid]; 504 if (unlikely(!chan)) 505 goto out; 506 507 bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e); 508 if (likely(bind)) { 509 if (!mthd || !nv_call(bind->object, mthd, data)) 510 ret = 0; 511 nvkm_namedb_put(bind); 512 } 513 514 out: 515 spin_unlock_irqrestore(&fifo->base.lock, flags); 516 return ret; 517 } 518 519 static const struct nvkm_enum 520 gk104_fifo_bind_reason[] = { 521 { 0x01, "BIND_NOT_UNBOUND" }, 522 { 0x02, "SNOOP_WITHOUT_BAR1" }, 523 { 0x03, "UNBIND_WHILE_RUNNING" }, 524 { 0x05, "INVALID_RUNLIST" }, 525 { 0x06, "INVALID_CTX_TGT" }, 526 { 0x0b, "UNBIND_WHILE_PARKED" }, 527 {} 528 }; 529 530 static void 531 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 532 { 533 u32 intr = nv_rd32(fifo, 0x00252c); 534 u32 code = intr & 0x000000ff; 535 const struct nvkm_enum *en; 536 char enunk[6] = ""; 537 538 en = nvkm_enum_find(gk104_fifo_bind_reason, code); 539 if (!en) 540 snprintf(enunk, sizeof(enunk), "UNK%02x", code); 541 542 nv_error(fifo, "BIND_ERROR [ %s ]\n", en ? en->name : enunk); 543 } 544 545 static const struct nvkm_enum 546 gk104_fifo_sched_reason[] = { 547 { 0x0a, "CTXSW_TIMEOUT" }, 548 {} 549 }; 550 551 static void 552 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 553 { 554 struct nvkm_engine *engine; 555 struct gk104_fifo_chan *chan; 556 u32 engn; 557 558 for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { 559 u32 stat = nv_rd32(fifo, 0x002640 + (engn * 0x04)); 560 u32 busy = (stat & 0x80000000); 561 u32 next = (stat & 0x07ff0000) >> 16; 562 u32 chsw = (stat & 0x00008000); 563 u32 save = (stat & 0x00004000); 564 u32 load = (stat & 0x00002000); 565 u32 prev = (stat & 0x000007ff); 566 u32 chid = load ? next : prev; 567 (void)save; 568 569 if (busy && chsw) { 570 if (!(chan = (void *)fifo->base.channel[chid])) 571 continue; 572 if (!(engine = gk104_fifo_engine(fifo, engn))) 573 continue; 574 gk104_fifo_recover(fifo, engine, chan); 575 } 576 } 577 } 578 579 static void 580 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 581 { 582 u32 intr = nv_rd32(fifo, 0x00254c); 583 u32 code = intr & 0x000000ff; 584 const struct nvkm_enum *en; 585 char enunk[6] = ""; 586 587 en = nvkm_enum_find(gk104_fifo_sched_reason, code); 588 if (!en) 589 snprintf(enunk, sizeof(enunk), "UNK%02x", code); 590 591 nv_error(fifo, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); 592 593 switch (code) { 594 case 0x0a: 595 gk104_fifo_intr_sched_ctxsw(fifo); 596 break; 597 default: 598 break; 599 } 600 } 601 602 static void 603 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 604 { 605 u32 stat = nv_rd32(fifo, 0x00256c); 606 nv_error(fifo, "CHSW_ERROR 0x%08x\n", stat); 607 nv_wr32(fifo, 0x00256c, stat); 608 } 609 610 static void 611 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 612 { 613 u32 stat = nv_rd32(fifo, 0x00259c); 614 nv_error(fifo, "DROPPED_MMU_FAULT 0x%08x\n", stat); 615 } 616 617 static const struct nvkm_enum 618 gk104_fifo_fault_engine[] = { 619 { 0x00, "GR", NULL, NVDEV_ENGINE_GR }, 620 { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB }, 621 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, 622 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, 623 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO }, 624 { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO }, 625 { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO }, 626 { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD }, 627 { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP }, 628 { 0x13, "PERF" }, 629 { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC }, 630 { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 }, 631 { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 }, 632 { 0x17, "PMU" }, 633 { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC }, 634 { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 }, 635 {} 636 }; 637 638 static const struct nvkm_enum 639 gk104_fifo_fault_reason[] = { 640 { 0x00, "PDE" }, 641 { 0x01, "PDE_SIZE" }, 642 { 0x02, "PTE" }, 643 { 0x03, "VA_LIMIT_VIOLATION" }, 644 { 0x04, "UNBOUND_INST_BLOCK" }, 645 { 0x05, "PRIV_VIOLATION" }, 646 { 0x06, "RO_VIOLATION" }, 647 { 0x07, "WO_VIOLATION" }, 648 { 0x08, "PITCH_MASK_VIOLATION" }, 649 { 0x09, "WORK_CREATION" }, 650 { 0x0a, "UNSUPPORTED_APERTURE" }, 651 { 0x0b, "COMPRESSION_FAILURE" }, 652 { 0x0c, "UNSUPPORTED_KIND" }, 653 { 0x0d, "REGION_VIOLATION" }, 654 { 0x0e, "BOTH_PTES_VALID" }, 655 { 0x0f, "INFO_TYPE_POISONED" }, 656 {} 657 }; 658 659 static const struct nvkm_enum 660 gk104_fifo_fault_hubclient[] = { 661 { 0x00, "VIP" }, 662 { 0x01, "CE0" }, 663 { 0x02, "CE1" }, 664 { 0x03, "DNISO" }, 665 { 0x04, "FE" }, 666 { 0x05, "FECS" }, 667 { 0x06, "HOST" }, 668 { 0x07, "HOST_CPU" }, 669 { 0x08, "HOST_CPU_NB" }, 670 { 0x09, "ISO" }, 671 { 0x0a, "MMU" }, 672 { 0x0b, "MSPDEC" }, 673 { 0x0c, "MSPPP" }, 674 { 0x0d, "MSVLD" }, 675 { 0x0e, "NISO" }, 676 { 0x0f, "P2P" }, 677 { 0x10, "PD" }, 678 { 0x11, "PERF" }, 679 { 0x12, "PMU" }, 680 { 0x13, "RASTERTWOD" }, 681 { 0x14, "SCC" }, 682 { 0x15, "SCC_NB" }, 683 { 0x16, "SEC" }, 684 { 0x17, "SSYNC" }, 685 { 0x18, "GR_CE" }, 686 { 0x19, "CE2" }, 687 { 0x1a, "XV" }, 688 { 0x1b, "MMU_NB" }, 689 { 0x1c, "MSENC" }, 690 { 0x1d, "DFALCON" }, 691 { 0x1e, "SKED" }, 692 { 0x1f, "AFALCON" }, 693 {} 694 }; 695 696 static const struct nvkm_enum 697 gk104_fifo_fault_gpcclient[] = { 698 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 699 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 700 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 701 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 702 { 0x0c, "RAST" }, 703 { 0x0d, "GCC" }, 704 { 0x0e, "GPCCS" }, 705 { 0x0f, "PROP_0" }, 706 { 0x10, "PROP_1" }, 707 { 0x11, "PROP_2" }, 708 { 0x12, "PROP_3" }, 709 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 710 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 711 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 712 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 713 { 0x1f, "GPM" }, 714 { 0x20, "LTP_UTLB_0" }, 715 { 0x21, "LTP_UTLB_1" }, 716 { 0x22, "LTP_UTLB_2" }, 717 { 0x23, "LTP_UTLB_3" }, 718 { 0x24, "GPC_RGG_UTLB" }, 719 {} 720 }; 721 722 static void 723 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) 724 { 725 u32 inst = nv_rd32(fifo, 0x002800 + (unit * 0x10)); 726 u32 valo = nv_rd32(fifo, 0x002804 + (unit * 0x10)); 727 u32 vahi = nv_rd32(fifo, 0x002808 + (unit * 0x10)); 728 u32 stat = nv_rd32(fifo, 0x00280c + (unit * 0x10)); 729 u32 gpc = (stat & 0x1f000000) >> 24; 730 u32 client = (stat & 0x00001f00) >> 8; 731 u32 write = (stat & 0x00000080); 732 u32 hub = (stat & 0x00000040); 733 u32 reason = (stat & 0x0000000f); 734 struct nvkm_object *engctx = NULL, *object; 735 struct nvkm_engine *engine = NULL; 736 const struct nvkm_enum *er, *eu, *ec; 737 char erunk[6] = ""; 738 char euunk[6] = ""; 739 char ecunk[6] = ""; 740 char gpcid[3] = ""; 741 742 er = nvkm_enum_find(gk104_fifo_fault_reason, reason); 743 if (!er) 744 snprintf(erunk, sizeof(erunk), "UNK%02X", reason); 745 746 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit); 747 if (eu) { 748 switch (eu->data2) { 749 case NVDEV_SUBDEV_BAR: 750 nv_mask(fifo, 0x001704, 0x00000000, 0x00000000); 751 break; 752 case NVDEV_SUBDEV_INSTMEM: 753 nv_mask(fifo, 0x001714, 0x00000000, 0x00000000); 754 break; 755 case NVDEV_ENGINE_IFB: 756 nv_mask(fifo, 0x001718, 0x00000000, 0x00000000); 757 break; 758 default: 759 engine = nvkm_engine(fifo, eu->data2); 760 if (engine) 761 engctx = nvkm_engctx_get(engine, inst); 762 break; 763 } 764 } else { 765 snprintf(euunk, sizeof(euunk), "UNK%02x", unit); 766 } 767 768 if (hub) { 769 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client); 770 } else { 771 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client); 772 snprintf(gpcid, sizeof(gpcid), "%d", gpc); 773 } 774 775 if (!ec) 776 snprintf(ecunk, sizeof(ecunk), "UNK%02x", client); 777 778 nv_error(fifo, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " 779 "channel 0x%010llx [%s]\n", write ? "write" : "read", 780 (u64)vahi << 32 | valo, er ? er->name : erunk, 781 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/", 782 ec ? ec->name : ecunk, (u64)inst << 12, 783 nvkm_client_name(engctx)); 784 785 object = engctx; 786 while (object) { 787 switch (nv_mclass(object)) { 788 case KEPLER_CHANNEL_GPFIFO_A: 789 case MAXWELL_CHANNEL_GPFIFO_A: 790 gk104_fifo_recover(fifo, engine, (void *)object); 791 break; 792 } 793 object = object->parent; 794 } 795 796 nvkm_engctx_put(engctx); 797 } 798 799 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 800 { 0x00000001, "MEMREQ" }, 801 { 0x00000002, "MEMACK_TIMEOUT" }, 802 { 0x00000004, "MEMACK_EXTRA" }, 803 { 0x00000008, "MEMDAT_TIMEOUT" }, 804 { 0x00000010, "MEMDAT_EXTRA" }, 805 { 0x00000020, "MEMFLUSH" }, 806 { 0x00000040, "MEMOP" }, 807 { 0x00000080, "LBCONNECT" }, 808 { 0x00000100, "LBREQ" }, 809 { 0x00000200, "LBACK_TIMEOUT" }, 810 { 0x00000400, "LBACK_EXTRA" }, 811 { 0x00000800, "LBDAT_TIMEOUT" }, 812 { 0x00001000, "LBDAT_EXTRA" }, 813 { 0x00002000, "GPFIFO" }, 814 { 0x00004000, "GPPTR" }, 815 { 0x00008000, "GPENTRY" }, 816 { 0x00010000, "GPCRC" }, 817 { 0x00020000, "PBPTR" }, 818 { 0x00040000, "PBENTRY" }, 819 { 0x00080000, "PBCRC" }, 820 { 0x00100000, "XBARCONNECT" }, 821 { 0x00200000, "METHOD" }, 822 { 0x00400000, "METHODCRC" }, 823 { 0x00800000, "DEVICE" }, 824 { 0x02000000, "SEMAPHORE" }, 825 { 0x04000000, "ACQUIRE" }, 826 { 0x08000000, "PRI" }, 827 { 0x20000000, "NO_CTXSW_SEG" }, 828 { 0x40000000, "PBSEG" }, 829 { 0x80000000, "SIGNATURE" }, 830 {} 831 }; 832 833 static void 834 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 835 { 836 u32 mask = nv_rd32(fifo, 0x04010c + (unit * 0x2000)); 837 u32 stat = nv_rd32(fifo, 0x040108 + (unit * 0x2000)) & mask; 838 u32 addr = nv_rd32(fifo, 0x0400c0 + (unit * 0x2000)); 839 u32 data = nv_rd32(fifo, 0x0400c4 + (unit * 0x2000)); 840 u32 chid = nv_rd32(fifo, 0x040120 + (unit * 0x2000)) & 0xfff; 841 u32 subc = (addr & 0x00070000) >> 16; 842 u32 mthd = (addr & 0x00003ffc); 843 u32 show = stat; 844 845 if (stat & 0x00800000) { 846 if (!gk104_fifo_swmthd(fifo, chid, mthd, data)) 847 show &= ~0x00800000; 848 nv_wr32(fifo, 0x0400c0 + (unit * 0x2000), 0x80600008); 849 } 850 851 if (show) { 852 nv_error(fifo, "PBDMA%d:", unit); 853 nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show); 854 pr_cont("\n"); 855 nv_error(fifo, 856 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 857 unit, chid, 858 nvkm_client_name_for_fifo_chid(&fifo->base, chid), 859 subc, mthd, data); 860 } 861 862 nv_wr32(fifo, 0x040108 + (unit * 0x2000), stat); 863 } 864 865 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 866 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 867 { 0x00000002, "HCE_RE_ALIGNB" }, 868 { 0x00000004, "HCE_PRIV" }, 869 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 870 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 871 {} 872 }; 873 874 static void 875 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 876 { 877 u32 mask = nv_rd32(fifo, 0x04014c + (unit * 0x2000)); 878 u32 stat = nv_rd32(fifo, 0x040148 + (unit * 0x2000)) & mask; 879 u32 chid = nv_rd32(fifo, 0x040120 + (unit * 0x2000)) & 0xfff; 880 881 if (stat) { 882 nv_error(fifo, "PBDMA%d:", unit); 883 nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat); 884 pr_cont("\n"); 885 nv_error(fifo, "PBDMA%d: ch %d %08x %08x\n", unit, chid, 886 nv_rd32(fifo, 0x040150 + (unit * 0x2000)), 887 nv_rd32(fifo, 0x040154 + (unit * 0x2000))); 888 } 889 890 nv_wr32(fifo, 0x040148 + (unit * 0x2000), stat); 891 } 892 893 static void 894 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 895 { 896 u32 mask = nv_rd32(fifo, 0x002a00); 897 while (mask) { 898 u32 engn = __ffs(mask); 899 wake_up(&fifo->engine[engn].wait); 900 nv_wr32(fifo, 0x002a00, 1 << engn); 901 mask &= ~(1 << engn); 902 } 903 } 904 905 static void 906 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 907 { 908 nvkm_fifo_uevent(&fifo->base); 909 } 910 911 static void 912 gk104_fifo_intr(struct nvkm_subdev *subdev) 913 { 914 struct gk104_fifo *fifo = (void *)subdev; 915 u32 mask = nv_rd32(fifo, 0x002140); 916 u32 stat = nv_rd32(fifo, 0x002100) & mask; 917 918 if (stat & 0x00000001) { 919 gk104_fifo_intr_bind(fifo); 920 nv_wr32(fifo, 0x002100, 0x00000001); 921 stat &= ~0x00000001; 922 } 923 924 if (stat & 0x00000010) { 925 nv_error(fifo, "PIO_ERROR\n"); 926 nv_wr32(fifo, 0x002100, 0x00000010); 927 stat &= ~0x00000010; 928 } 929 930 if (stat & 0x00000100) { 931 gk104_fifo_intr_sched(fifo); 932 nv_wr32(fifo, 0x002100, 0x00000100); 933 stat &= ~0x00000100; 934 } 935 936 if (stat & 0x00010000) { 937 gk104_fifo_intr_chsw(fifo); 938 nv_wr32(fifo, 0x002100, 0x00010000); 939 stat &= ~0x00010000; 940 } 941 942 if (stat & 0x00800000) { 943 nv_error(fifo, "FB_FLUSH_TIMEOUT\n"); 944 nv_wr32(fifo, 0x002100, 0x00800000); 945 stat &= ~0x00800000; 946 } 947 948 if (stat & 0x01000000) { 949 nv_error(fifo, "LB_ERROR\n"); 950 nv_wr32(fifo, 0x002100, 0x01000000); 951 stat &= ~0x01000000; 952 } 953 954 if (stat & 0x08000000) { 955 gk104_fifo_intr_dropped_fault(fifo); 956 nv_wr32(fifo, 0x002100, 0x08000000); 957 stat &= ~0x08000000; 958 } 959 960 if (stat & 0x10000000) { 961 u32 mask = nv_rd32(fifo, 0x00259c); 962 while (mask) { 963 u32 unit = __ffs(mask); 964 gk104_fifo_intr_fault(fifo, unit); 965 nv_wr32(fifo, 0x00259c, (1 << unit)); 966 mask &= ~(1 << unit); 967 } 968 stat &= ~0x10000000; 969 } 970 971 if (stat & 0x20000000) { 972 u32 mask = nv_rd32(fifo, 0x0025a0); 973 while (mask) { 974 u32 unit = __ffs(mask); 975 gk104_fifo_intr_pbdma_0(fifo, unit); 976 gk104_fifo_intr_pbdma_1(fifo, unit); 977 nv_wr32(fifo, 0x0025a0, (1 << unit)); 978 mask &= ~(1 << unit); 979 } 980 stat &= ~0x20000000; 981 } 982 983 if (stat & 0x40000000) { 984 gk104_fifo_intr_runlist(fifo); 985 stat &= ~0x40000000; 986 } 987 988 if (stat & 0x80000000) { 989 nv_wr32(fifo, 0x002100, 0x80000000); 990 gk104_fifo_intr_engine(fifo); 991 stat &= ~0x80000000; 992 } 993 994 if (stat) { 995 nv_error(fifo, "INTR 0x%08x\n", stat); 996 nv_mask(fifo, 0x002140, stat, 0x00000000); 997 nv_wr32(fifo, 0x002100, stat); 998 } 999 } 1000 1001 static void 1002 gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index) 1003 { 1004 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 1005 nv_mask(fifo, 0x002140, 0x80000000, 0x80000000); 1006 } 1007 1008 static void 1009 gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) 1010 { 1011 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 1012 nv_mask(fifo, 0x002140, 0x80000000, 0x00000000); 1013 } 1014 1015 static const struct nvkm_event_func 1016 gk104_fifo_uevent_func = { 1017 .ctor = nvkm_fifo_uevent_ctor, 1018 .init = gk104_fifo_uevent_init, 1019 .fini = gk104_fifo_uevent_fini, 1020 }; 1021 1022 int 1023 gk104_fifo_fini(struct nvkm_object *object, bool suspend) 1024 { 1025 struct gk104_fifo *fifo = (void *)object; 1026 int ret; 1027 1028 ret = nvkm_fifo_fini(&fifo->base, suspend); 1029 if (ret) 1030 return ret; 1031 1032 /* allow mmu fault interrupts, even when we're not using fifo */ 1033 nv_mask(fifo, 0x002140, 0x10000000, 0x10000000); 1034 return 0; 1035 } 1036 1037 int 1038 gk104_fifo_init(struct nvkm_object *object) 1039 { 1040 struct gk104_fifo *fifo = (void *)object; 1041 int ret, i; 1042 1043 ret = nvkm_fifo_init(&fifo->base); 1044 if (ret) 1045 return ret; 1046 1047 /* enable all available PBDMA units */ 1048 nv_wr32(fifo, 0x000204, 0xffffffff); 1049 fifo->spoon_nr = hweight32(nv_rd32(fifo, 0x000204)); 1050 nv_debug(fifo, "%d PBDMA unit(s)\n", fifo->spoon_nr); 1051 1052 /* PBDMA[n] */ 1053 for (i = 0; i < fifo->spoon_nr; i++) { 1054 nv_mask(fifo, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 1055 nv_wr32(fifo, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 1056 nv_wr32(fifo, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 1057 } 1058 1059 /* PBDMA[n].HCE */ 1060 for (i = 0; i < fifo->spoon_nr; i++) { 1061 nv_wr32(fifo, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 1062 nv_wr32(fifo, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 1063 } 1064 1065 nv_wr32(fifo, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); 1066 1067 nv_wr32(fifo, 0x002100, 0xffffffff); 1068 nv_wr32(fifo, 0x002140, 0x7fffffff); 1069 return 0; 1070 } 1071 1072 void 1073 gk104_fifo_dtor(struct nvkm_object *object) 1074 { 1075 struct gk104_fifo *fifo = (void *)object; 1076 int i; 1077 1078 nvkm_gpuobj_unmap(&fifo->user.bar); 1079 nvkm_gpuobj_ref(NULL, &fifo->user.mem); 1080 1081 for (i = 0; i < FIFO_ENGINE_NR; i++) { 1082 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]); 1083 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]); 1084 } 1085 1086 nvkm_fifo_destroy(&fifo->base); 1087 } 1088 1089 int 1090 gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1091 struct nvkm_oclass *oclass, void *data, u32 size, 1092 struct nvkm_object **pobject) 1093 { 1094 struct gk104_fifo_impl *impl = (void *)oclass; 1095 struct gk104_fifo *fifo; 1096 int ret, i; 1097 1098 ret = nvkm_fifo_create(parent, engine, oclass, 0, 1099 impl->channels - 1, &fifo); 1100 *pobject = nv_object(fifo); 1101 if (ret) 1102 return ret; 1103 1104 INIT_WORK(&fifo->fault, gk104_fifo_recover_work); 1105 1106 for (i = 0; i < FIFO_ENGINE_NR; i++) { 1107 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, 1108 0, &fifo->engine[i].runlist[0]); 1109 if (ret) 1110 return ret; 1111 1112 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, 1113 0, &fifo->engine[i].runlist[1]); 1114 if (ret) 1115 return ret; 1116 1117 init_waitqueue_head(&fifo->engine[i].wait); 1118 } 1119 1120 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200, 1121 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem); 1122 if (ret) 1123 return ret; 1124 1125 ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW, 1126 &fifo->user.bar); 1127 if (ret) 1128 return ret; 1129 1130 ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent); 1131 if (ret) 1132 return ret; 1133 1134 nv_subdev(fifo)->unit = 0x00000100; 1135 nv_subdev(fifo)->intr = gk104_fifo_intr; 1136 nv_engine(fifo)->cclass = &gk104_fifo_cclass; 1137 nv_engine(fifo)->sclass = gk104_fifo_sclass; 1138 return 0; 1139 } 1140 1141 struct nvkm_oclass * 1142 gk104_fifo_oclass = &(struct gk104_fifo_impl) { 1143 .base.handle = NV_ENGINE(FIFO, 0xe0), 1144 .base.ofuncs = &(struct nvkm_ofuncs) { 1145 .ctor = gk104_fifo_ctor, 1146 .dtor = gk104_fifo_dtor, 1147 .init = gk104_fifo_init, 1148 .fini = gk104_fifo_fini, 1149 }, 1150 .channels = 4096, 1151 }.base; 1152