1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 #include "changk104.h" 26 27 #include <core/client.h> 28 #include <core/enum.h> 29 #include <core/gpuobj.h> 30 #include <subdev/bar.h> 31 #include <engine/sw.h> 32 33 #include <nvif/class.h> 34 35 void 36 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) 37 { 38 struct nvkm_device *device = fifo->engine.subdev.device; 39 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); 40 } 41 42 void 43 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) 44 { 45 struct nvkm_device *device = fifo->engine.subdev.device; 46 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); 47 } 48 49 void 50 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) 51 { 52 struct gk104_fifo_engn *engn = &fifo->engine[engine]; 53 struct gk104_fifo_chan *chan; 54 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 55 struct nvkm_device *device = subdev->device; 56 struct nvkm_memory *cur; 57 int nr = 0; 58 59 mutex_lock(&subdev->mutex); 60 cur = engn->runlist[engn->cur_runlist]; 61 engn->cur_runlist = !engn->cur_runlist; 62 63 nvkm_kmap(cur); 64 list_for_each_entry(chan, &engn->chan, head) { 65 nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid); 66 nvkm_wo32(cur, (nr * 8) + 4, 0x00000000); 67 nr++; 68 } 69 nvkm_done(cur); 70 71 nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); 72 nvkm_wr32(device, 0x002274, (engine << 20) | nr); 73 74 if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + 75 (engine * 0x08)) & 0x00100000), 76 msecs_to_jiffies(2000)) == 0) 77 nvkm_error(subdev, "runlist %d update timeout\n", engine); 78 mutex_unlock(&subdev->mutex); 79 } 80 81 static inline struct nvkm_engine * 82 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) 83 { 84 struct nvkm_device *device = fifo->base.engine.subdev.device; 85 u64 subdevs = gk104_fifo_engine_subdev(engn); 86 if (subdevs) 87 return nvkm_device_engine(device, __ffs(subdevs)); 88 return NULL; 89 } 90 91 static void 92 gk104_fifo_recover_work(struct work_struct *work) 93 { 94 struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault); 95 struct nvkm_device *device = fifo->base.engine.subdev.device; 96 struct nvkm_engine *engine; 97 unsigned long flags; 98 u32 engn, engm = 0; 99 u64 mask, todo; 100 101 spin_lock_irqsave(&fifo->base.lock, flags); 102 mask = fifo->mask; 103 fifo->mask = 0ULL; 104 spin_unlock_irqrestore(&fifo->base.lock, flags); 105 106 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) 107 engm |= 1 << gk104_fifo_subdev_engine(engn); 108 nvkm_mask(device, 0x002630, engm, engm); 109 110 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { 111 if ((engine = nvkm_device_engine(device, engn))) { 112 nvkm_subdev_fini(&engine->subdev, false); 113 WARN_ON(nvkm_subdev_init(&engine->subdev)); 114 } 115 gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn)); 116 } 117 118 nvkm_wr32(device, 0x00262c, engm); 119 nvkm_mask(device, 0x002630, engm, 0x00000000); 120 } 121 122 static void 123 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, 124 struct gk104_fifo_chan *chan) 125 { 126 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 127 struct nvkm_device *device = subdev->device; 128 u32 chid = chan->base.chid; 129 130 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n", 131 nvkm_subdev_name[engine->subdev.index], chid); 132 assert_spin_locked(&fifo->base.lock); 133 134 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); 135 list_del_init(&chan->head); 136 chan->killed = true; 137 138 fifo->mask |= 1ULL << engine->subdev.index; 139 schedule_work(&fifo->fault); 140 } 141 142 static const struct nvkm_enum 143 gk104_fifo_bind_reason[] = { 144 { 0x01, "BIND_NOT_UNBOUND" }, 145 { 0x02, "SNOOP_WITHOUT_BAR1" }, 146 { 0x03, "UNBIND_WHILE_RUNNING" }, 147 { 0x05, "INVALID_RUNLIST" }, 148 { 0x06, "INVALID_CTX_TGT" }, 149 { 0x0b, "UNBIND_WHILE_PARKED" }, 150 {} 151 }; 152 153 static void 154 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 155 { 156 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 157 struct nvkm_device *device = subdev->device; 158 u32 intr = nvkm_rd32(device, 0x00252c); 159 u32 code = intr & 0x000000ff; 160 const struct nvkm_enum *en = 161 nvkm_enum_find(gk104_fifo_bind_reason, code); 162 163 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 164 } 165 166 static const struct nvkm_enum 167 gk104_fifo_sched_reason[] = { 168 { 0x0a, "CTXSW_TIMEOUT" }, 169 {} 170 }; 171 172 static void 173 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 174 { 175 struct nvkm_device *device = fifo->base.engine.subdev.device; 176 struct nvkm_engine *engine; 177 struct gk104_fifo_chan *chan; 178 unsigned long flags; 179 u32 engn; 180 181 spin_lock_irqsave(&fifo->base.lock, flags); 182 for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) { 183 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); 184 u32 busy = (stat & 0x80000000); 185 u32 next = (stat & 0x07ff0000) >> 16; 186 u32 chsw = (stat & 0x00008000); 187 u32 save = (stat & 0x00004000); 188 u32 load = (stat & 0x00002000); 189 u32 prev = (stat & 0x000007ff); 190 u32 chid = load ? next : prev; 191 (void)save; 192 193 if (busy && chsw) { 194 list_for_each_entry(chan, &fifo->engine[engn].chan, head) { 195 if (chan->base.chid == chid) { 196 engine = gk104_fifo_engine(fifo, engn); 197 if (!engine) 198 break; 199 gk104_fifo_recover(fifo, engine, chan); 200 break; 201 } 202 } 203 } 204 } 205 spin_unlock_irqrestore(&fifo->base.lock, flags); 206 } 207 208 static void 209 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 210 { 211 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 212 struct nvkm_device *device = subdev->device; 213 u32 intr = nvkm_rd32(device, 0x00254c); 214 u32 code = intr & 0x000000ff; 215 const struct nvkm_enum *en = 216 nvkm_enum_find(gk104_fifo_sched_reason, code); 217 218 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 219 220 switch (code) { 221 case 0x0a: 222 gk104_fifo_intr_sched_ctxsw(fifo); 223 break; 224 default: 225 break; 226 } 227 } 228 229 static void 230 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 231 { 232 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 233 struct nvkm_device *device = subdev->device; 234 u32 stat = nvkm_rd32(device, 0x00256c); 235 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 236 nvkm_wr32(device, 0x00256c, stat); 237 } 238 239 static void 240 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 241 { 242 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 243 struct nvkm_device *device = subdev->device; 244 u32 stat = nvkm_rd32(device, 0x00259c); 245 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 246 } 247 248 static const struct nvkm_enum 249 gk104_fifo_fault_engine[] = { 250 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 251 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 252 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 253 { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM }, 254 { 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO }, 255 { 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO }, 256 { 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO }, 257 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 258 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 259 { 0x13, "PERF" }, 260 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 261 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, 262 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, 263 { 0x17, "PMU" }, 264 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 265 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, 266 {} 267 }; 268 269 static const struct nvkm_enum 270 gk104_fifo_fault_reason[] = { 271 { 0x00, "PDE" }, 272 { 0x01, "PDE_SIZE" }, 273 { 0x02, "PTE" }, 274 { 0x03, "VA_LIMIT_VIOLATION" }, 275 { 0x04, "UNBOUND_INST_BLOCK" }, 276 { 0x05, "PRIV_VIOLATION" }, 277 { 0x06, "RO_VIOLATION" }, 278 { 0x07, "WO_VIOLATION" }, 279 { 0x08, "PITCH_MASK_VIOLATION" }, 280 { 0x09, "WORK_CREATION" }, 281 { 0x0a, "UNSUPPORTED_APERTURE" }, 282 { 0x0b, "COMPRESSION_FAILURE" }, 283 { 0x0c, "UNSUPPORTED_KIND" }, 284 { 0x0d, "REGION_VIOLATION" }, 285 { 0x0e, "BOTH_PTES_VALID" }, 286 { 0x0f, "INFO_TYPE_POISONED" }, 287 {} 288 }; 289 290 static const struct nvkm_enum 291 gk104_fifo_fault_hubclient[] = { 292 { 0x00, "VIP" }, 293 { 0x01, "CE0" }, 294 { 0x02, "CE1" }, 295 { 0x03, "DNISO" }, 296 { 0x04, "FE" }, 297 { 0x05, "FECS" }, 298 { 0x06, "HOST" }, 299 { 0x07, "HOST_CPU" }, 300 { 0x08, "HOST_CPU_NB" }, 301 { 0x09, "ISO" }, 302 { 0x0a, "MMU" }, 303 { 0x0b, "MSPDEC" }, 304 { 0x0c, "MSPPP" }, 305 { 0x0d, "MSVLD" }, 306 { 0x0e, "NISO" }, 307 { 0x0f, "P2P" }, 308 { 0x10, "PD" }, 309 { 0x11, "PERF" }, 310 { 0x12, "PMU" }, 311 { 0x13, "RASTERTWOD" }, 312 { 0x14, "SCC" }, 313 { 0x15, "SCC_NB" }, 314 { 0x16, "SEC" }, 315 { 0x17, "SSYNC" }, 316 { 0x18, "GR_CE" }, 317 { 0x19, "CE2" }, 318 { 0x1a, "XV" }, 319 { 0x1b, "MMU_NB" }, 320 { 0x1c, "MSENC" }, 321 { 0x1d, "DFALCON" }, 322 { 0x1e, "SKED" }, 323 { 0x1f, "AFALCON" }, 324 {} 325 }; 326 327 static const struct nvkm_enum 328 gk104_fifo_fault_gpcclient[] = { 329 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 330 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 331 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 332 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 333 { 0x0c, "RAST" }, 334 { 0x0d, "GCC" }, 335 { 0x0e, "GPCCS" }, 336 { 0x0f, "PROP_0" }, 337 { 0x10, "PROP_1" }, 338 { 0x11, "PROP_2" }, 339 { 0x12, "PROP_3" }, 340 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 341 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 342 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 343 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 344 { 0x1f, "GPM" }, 345 { 0x20, "LTP_UTLB_0" }, 346 { 0x21, "LTP_UTLB_1" }, 347 { 0x22, "LTP_UTLB_2" }, 348 { 0x23, "LTP_UTLB_3" }, 349 { 0x24, "GPC_RGG_UTLB" }, 350 {} 351 }; 352 353 static void 354 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) 355 { 356 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 357 struct nvkm_device *device = subdev->device; 358 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 359 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 360 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 361 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 362 u32 gpc = (stat & 0x1f000000) >> 24; 363 u32 client = (stat & 0x00001f00) >> 8; 364 u32 write = (stat & 0x00000080); 365 u32 hub = (stat & 0x00000040); 366 u32 reason = (stat & 0x0000000f); 367 const struct nvkm_enum *er, *eu, *ec; 368 struct nvkm_engine *engine = NULL; 369 struct nvkm_fifo_chan *chan; 370 unsigned long flags; 371 char gpcid[8] = ""; 372 373 er = nvkm_enum_find(gk104_fifo_fault_reason, reason); 374 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit); 375 if (hub) { 376 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client); 377 } else { 378 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client); 379 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc); 380 } 381 382 if (eu) { 383 switch (eu->data2) { 384 case NVKM_SUBDEV_BAR: 385 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); 386 break; 387 case NVKM_SUBDEV_INSTMEM: 388 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); 389 break; 390 case NVKM_ENGINE_IFB: 391 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); 392 break; 393 default: 394 engine = nvkm_device_engine(device, eu->data2); 395 break; 396 } 397 } 398 399 chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags); 400 401 nvkm_error(subdev, 402 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " 403 "reason %02x [%s] on channel %d [%010llx %s]\n", 404 write ? "write" : "read", (u64)vahi << 32 | valo, 405 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", 406 reason, er ? er->name : "", chan ? chan->chid : -1, 407 (u64)inst << 12, 408 chan ? chan->object.client->name : "unknown"); 409 410 if (engine && chan) 411 gk104_fifo_recover(fifo, engine, (void *)chan); 412 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 413 } 414 415 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 416 { 0x00000001, "MEMREQ" }, 417 { 0x00000002, "MEMACK_TIMEOUT" }, 418 { 0x00000004, "MEMACK_EXTRA" }, 419 { 0x00000008, "MEMDAT_TIMEOUT" }, 420 { 0x00000010, "MEMDAT_EXTRA" }, 421 { 0x00000020, "MEMFLUSH" }, 422 { 0x00000040, "MEMOP" }, 423 { 0x00000080, "LBCONNECT" }, 424 { 0x00000100, "LBREQ" }, 425 { 0x00000200, "LBACK_TIMEOUT" }, 426 { 0x00000400, "LBACK_EXTRA" }, 427 { 0x00000800, "LBDAT_TIMEOUT" }, 428 { 0x00001000, "LBDAT_EXTRA" }, 429 { 0x00002000, "GPFIFO" }, 430 { 0x00004000, "GPPTR" }, 431 { 0x00008000, "GPENTRY" }, 432 { 0x00010000, "GPCRC" }, 433 { 0x00020000, "PBPTR" }, 434 { 0x00040000, "PBENTRY" }, 435 { 0x00080000, "PBCRC" }, 436 { 0x00100000, "XBARCONNECT" }, 437 { 0x00200000, "METHOD" }, 438 { 0x00400000, "METHODCRC" }, 439 { 0x00800000, "DEVICE" }, 440 { 0x02000000, "SEMAPHORE" }, 441 { 0x04000000, "ACQUIRE" }, 442 { 0x08000000, "PRI" }, 443 { 0x20000000, "NO_CTXSW_SEG" }, 444 { 0x40000000, "PBSEG" }, 445 { 0x80000000, "SIGNATURE" }, 446 {} 447 }; 448 449 static void 450 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 451 { 452 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 453 struct nvkm_device *device = subdev->device; 454 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); 455 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; 456 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); 457 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); 458 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 459 u32 subc = (addr & 0x00070000) >> 16; 460 u32 mthd = (addr & 0x00003ffc); 461 u32 show = stat; 462 struct nvkm_fifo_chan *chan; 463 unsigned long flags; 464 char msg[128]; 465 466 if (stat & 0x00800000) { 467 if (device->sw) { 468 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) 469 show &= ~0x00800000; 470 } 471 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); 472 } 473 474 if (show) { 475 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); 476 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 477 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " 478 "subc %d mthd %04x data %08x\n", 479 unit, show, msg, chid, chan ? chan->inst->addr : 0, 480 chan ? chan->object.client->name : "unknown", 481 subc, mthd, data); 482 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 483 } 484 485 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); 486 } 487 488 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 489 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 490 { 0x00000002, "HCE_RE_ALIGNB" }, 491 { 0x00000004, "HCE_PRIV" }, 492 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 493 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 494 {} 495 }; 496 497 static void 498 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 499 { 500 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 501 struct nvkm_device *device = subdev->device; 502 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); 503 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; 504 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 505 char msg[128]; 506 507 if (stat) { 508 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); 509 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 510 unit, stat, msg, chid, 511 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), 512 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); 513 } 514 515 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); 516 } 517 518 static void 519 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 520 { 521 struct nvkm_device *device = fifo->base.engine.subdev.device; 522 u32 mask = nvkm_rd32(device, 0x002a00); 523 while (mask) { 524 u32 engn = __ffs(mask); 525 wake_up(&fifo->engine[engn].wait); 526 nvkm_wr32(device, 0x002a00, 1 << engn); 527 mask &= ~(1 << engn); 528 } 529 } 530 531 static void 532 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 533 { 534 nvkm_fifo_uevent(&fifo->base); 535 } 536 537 void 538 gk104_fifo_intr(struct nvkm_fifo *base) 539 { 540 struct gk104_fifo *fifo = gk104_fifo(base); 541 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 542 struct nvkm_device *device = subdev->device; 543 u32 mask = nvkm_rd32(device, 0x002140); 544 u32 stat = nvkm_rd32(device, 0x002100) & mask; 545 546 if (stat & 0x00000001) { 547 gk104_fifo_intr_bind(fifo); 548 nvkm_wr32(device, 0x002100, 0x00000001); 549 stat &= ~0x00000001; 550 } 551 552 if (stat & 0x00000010) { 553 nvkm_error(subdev, "PIO_ERROR\n"); 554 nvkm_wr32(device, 0x002100, 0x00000010); 555 stat &= ~0x00000010; 556 } 557 558 if (stat & 0x00000100) { 559 gk104_fifo_intr_sched(fifo); 560 nvkm_wr32(device, 0x002100, 0x00000100); 561 stat &= ~0x00000100; 562 } 563 564 if (stat & 0x00010000) { 565 gk104_fifo_intr_chsw(fifo); 566 nvkm_wr32(device, 0x002100, 0x00010000); 567 stat &= ~0x00010000; 568 } 569 570 if (stat & 0x00800000) { 571 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 572 nvkm_wr32(device, 0x002100, 0x00800000); 573 stat &= ~0x00800000; 574 } 575 576 if (stat & 0x01000000) { 577 nvkm_error(subdev, "LB_ERROR\n"); 578 nvkm_wr32(device, 0x002100, 0x01000000); 579 stat &= ~0x01000000; 580 } 581 582 if (stat & 0x08000000) { 583 gk104_fifo_intr_dropped_fault(fifo); 584 nvkm_wr32(device, 0x002100, 0x08000000); 585 stat &= ~0x08000000; 586 } 587 588 if (stat & 0x10000000) { 589 u32 mask = nvkm_rd32(device, 0x00259c); 590 while (mask) { 591 u32 unit = __ffs(mask); 592 gk104_fifo_intr_fault(fifo, unit); 593 nvkm_wr32(device, 0x00259c, (1 << unit)); 594 mask &= ~(1 << unit); 595 } 596 stat &= ~0x10000000; 597 } 598 599 if (stat & 0x20000000) { 600 u32 mask = nvkm_rd32(device, 0x0025a0); 601 while (mask) { 602 u32 unit = __ffs(mask); 603 gk104_fifo_intr_pbdma_0(fifo, unit); 604 gk104_fifo_intr_pbdma_1(fifo, unit); 605 nvkm_wr32(device, 0x0025a0, (1 << unit)); 606 mask &= ~(1 << unit); 607 } 608 stat &= ~0x20000000; 609 } 610 611 if (stat & 0x40000000) { 612 gk104_fifo_intr_runlist(fifo); 613 stat &= ~0x40000000; 614 } 615 616 if (stat & 0x80000000) { 617 nvkm_wr32(device, 0x002100, 0x80000000); 618 gk104_fifo_intr_engine(fifo); 619 stat &= ~0x80000000; 620 } 621 622 if (stat) { 623 nvkm_error(subdev, "INTR %08x\n", stat); 624 nvkm_mask(device, 0x002140, stat, 0x00000000); 625 nvkm_wr32(device, 0x002100, stat); 626 } 627 } 628 629 void 630 gk104_fifo_fini(struct nvkm_fifo *base) 631 { 632 struct gk104_fifo *fifo = gk104_fifo(base); 633 struct nvkm_device *device = fifo->base.engine.subdev.device; 634 flush_work(&fifo->fault); 635 /* allow mmu fault interrupts, even when we're not using fifo */ 636 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); 637 } 638 639 int 640 gk104_fifo_oneinit(struct nvkm_fifo *base) 641 { 642 struct gk104_fifo *fifo = gk104_fifo(base); 643 struct nvkm_device *device = fifo->base.engine.subdev.device; 644 int ret, i; 645 646 for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { 647 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 648 0x8000, 0x1000, false, 649 &fifo->engine[i].runlist[0]); 650 if (ret) 651 return ret; 652 653 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 654 0x8000, 0x1000, false, 655 &fifo->engine[i].runlist[1]); 656 if (ret) 657 return ret; 658 659 init_waitqueue_head(&fifo->engine[i].wait); 660 INIT_LIST_HEAD(&fifo->engine[i].chan); 661 } 662 663 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 664 fifo->base.nr * 0x200, 0x1000, true, 665 &fifo->user.mem); 666 if (ret) 667 return ret; 668 669 ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12, 670 &fifo->user.bar); 671 if (ret) 672 return ret; 673 674 nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0); 675 return 0; 676 } 677 678 void 679 gk104_fifo_init(struct nvkm_fifo *base) 680 { 681 struct gk104_fifo *fifo = gk104_fifo(base); 682 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 683 struct nvkm_device *device = subdev->device; 684 int i; 685 686 /* enable all available PBDMA units */ 687 nvkm_wr32(device, 0x000204, 0xffffffff); 688 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204)); 689 nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr); 690 691 /* PBDMA[n] */ 692 for (i = 0; i < fifo->spoon_nr; i++) { 693 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 694 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 695 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 696 } 697 698 /* PBDMA[n].HCE */ 699 for (i = 0; i < fifo->spoon_nr; i++) { 700 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 701 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 702 } 703 704 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); 705 706 nvkm_wr32(device, 0x002100, 0xffffffff); 707 nvkm_wr32(device, 0x002140, 0x7fffffff); 708 } 709 710 void * 711 gk104_fifo_dtor(struct nvkm_fifo *base) 712 { 713 struct gk104_fifo *fifo = gk104_fifo(base); 714 int i; 715 716 nvkm_vm_put(&fifo->user.bar); 717 nvkm_memory_del(&fifo->user.mem); 718 719 for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { 720 nvkm_memory_del(&fifo->engine[i].runlist[1]); 721 nvkm_memory_del(&fifo->engine[i].runlist[0]); 722 } 723 724 return fifo; 725 } 726 727 int 728 gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, 729 int index, int nr, struct nvkm_fifo **pfifo) 730 { 731 struct gk104_fifo *fifo; 732 733 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 734 return -ENOMEM; 735 INIT_WORK(&fifo->fault, gk104_fifo_recover_work); 736 *pfifo = &fifo->base; 737 738 return nvkm_fifo_ctor(func, device, index, nr, &fifo->base); 739 } 740 741 static const struct nvkm_fifo_func 742 gk104_fifo = { 743 .dtor = gk104_fifo_dtor, 744 .oneinit = gk104_fifo_oneinit, 745 .init = gk104_fifo_init, 746 .fini = gk104_fifo_fini, 747 .intr = gk104_fifo_intr, 748 .uevent_init = gk104_fifo_uevent_init, 749 .uevent_fini = gk104_fifo_uevent_fini, 750 .chan = { 751 &gk104_fifo_gpfifo_oclass, 752 NULL 753 }, 754 }; 755 756 int 757 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 758 { 759 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); 760 } 761