1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv04.h" 25 26 #include <core/client.h> 27 #include <core/engctx.h> 28 #include <core/handle.h> 29 #include <core/ramht.h> 30 #include <subdev/instmem/nv04.h> 31 #include <subdev/timer.h> 32 33 #include <nvif/class.h> 34 #include <nvif/unpack.h> 35 36 static struct ramfc_desc 37 nv04_ramfc[] = { 38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 46 {} 47 }; 48 49 /******************************************************************************* 50 * FIFO channel objects 51 ******************************************************************************/ 52 53 int 54 nv04_fifo_object_attach(struct nvkm_object *parent, 55 struct nvkm_object *object, u32 handle) 56 { 57 struct nv04_fifo *fifo = (void *)parent->engine; 58 struct nv04_fifo_chan *chan = (void *)parent; 59 u32 context, chid = chan->base.chid; 60 int ret; 61 62 if (nv_iclass(object, NV_GPUOBJ_CLASS)) 63 context = nv_gpuobj(object)->addr >> 4; 64 else 65 context = 0x00000004; /* just non-zero */ 66 67 switch (nv_engidx(object->engine)) { 68 case NVDEV_ENGINE_DMAOBJ: 69 case NVDEV_ENGINE_SW: 70 context |= 0x00000000; 71 break; 72 case NVDEV_ENGINE_GR: 73 context |= 0x00010000; 74 break; 75 case NVDEV_ENGINE_MPEG: 76 context |= 0x00020000; 77 break; 78 default: 79 return -EINVAL; 80 } 81 82 context |= 0x80000000; /* valid */ 83 context |= chid << 24; 84 85 mutex_lock(&nv_subdev(fifo)->mutex); 86 ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context); 87 mutex_unlock(&nv_subdev(fifo)->mutex); 88 return ret; 89 } 90 91 void 92 nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) 93 { 94 struct nv04_fifo *fifo = (void *)parent->engine; 95 mutex_lock(&nv_subdev(fifo)->mutex); 96 nvkm_ramht_remove(fifo->ramht, cookie); 97 mutex_unlock(&nv_subdev(fifo)->mutex); 98 } 99 100 int 101 nv04_fifo_context_attach(struct nvkm_object *parent, 102 struct nvkm_object *object) 103 { 104 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; 105 return 0; 106 } 107 108 static int 109 nv04_fifo_chan_ctor(struct nvkm_object *parent, 110 struct nvkm_object *engine, 111 struct nvkm_oclass *oclass, void *data, u32 size, 112 struct nvkm_object **pobject) 113 { 114 union { 115 struct nv03_channel_dma_v0 v0; 116 } *args = data; 117 struct nv04_fifo *fifo = (void *)engine; 118 struct nv04_fifo_chan *chan; 119 int ret; 120 121 nvif_ioctl(parent, "create channel dma size %d\n", size); 122 if (nvif_unpack(args->v0, 0, 0, false)) { 123 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " 124 "offset %08x\n", args->v0.version, 125 args->v0.pushbuf, args->v0.offset); 126 } else 127 return ret; 128 129 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 130 0x10000, args->v0.pushbuf, 131 (1ULL << NVDEV_ENGINE_DMAOBJ) | 132 (1ULL << NVDEV_ENGINE_SW) | 133 (1ULL << NVDEV_ENGINE_GR), &chan); 134 *pobject = nv_object(chan); 135 if (ret) 136 return ret; 137 138 args->v0.chid = chan->base.chid; 139 140 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 141 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 142 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 143 chan->ramfc = chan->base.chid * 32; 144 145 nvkm_kmap(fifo->ramfc); 146 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset); 147 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset); 148 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); 149 nvkm_wo32(fifo->ramfc, chan->ramfc + 0x10, 150 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 151 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 152 #ifdef __BIG_ENDIAN 153 NV_PFIFO_CACHE1_BIG_ENDIAN | 154 #endif 155 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 156 nvkm_done(fifo->ramfc); 157 return 0; 158 } 159 160 void 161 nv04_fifo_chan_dtor(struct nvkm_object *object) 162 { 163 struct nv04_fifo *fifo = (void *)object->engine; 164 struct nv04_fifo_chan *chan = (void *)object; 165 struct ramfc_desc *c = fifo->ramfc_desc; 166 167 nvkm_kmap(fifo->ramfc); 168 do { 169 nvkm_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000); 170 } while ((++c)->bits); 171 nvkm_done(fifo->ramfc); 172 173 nvkm_fifo_channel_destroy(&chan->base); 174 } 175 176 int 177 nv04_fifo_chan_init(struct nvkm_object *object) 178 { 179 struct nv04_fifo *fifo = (void *)object->engine; 180 struct nv04_fifo_chan *chan = (void *)object; 181 struct nvkm_device *device = fifo->base.engine.subdev.device; 182 u32 mask = 1 << chan->base.chid; 183 unsigned long flags; 184 int ret; 185 186 ret = nvkm_fifo_channel_init(&chan->base); 187 if (ret) 188 return ret; 189 190 spin_lock_irqsave(&fifo->base.lock, flags); 191 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); 192 spin_unlock_irqrestore(&fifo->base.lock, flags); 193 return 0; 194 } 195 196 int 197 nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) 198 { 199 struct nv04_fifo *fifo = (void *)object->engine; 200 struct nv04_fifo_chan *chan = (void *)object; 201 struct nvkm_gpuobj *fctx = fifo->ramfc; 202 struct nvkm_device *device = fifo->base.engine.subdev.device; 203 struct ramfc_desc *c; 204 unsigned long flags; 205 u32 data = chan->ramfc; 206 u32 chid; 207 208 /* prevent fifo context switches */ 209 spin_lock_irqsave(&fifo->base.lock, flags); 210 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 211 212 /* if this channel is active, replace it with a null context */ 213 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 214 if (chid == chan->base.chid) { 215 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 216 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); 217 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 218 219 c = fifo->ramfc_desc; 220 do { 221 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 222 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 223 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; 224 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm); 225 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 226 } while ((++c)->bits); 227 228 c = fifo->ramfc_desc; 229 do { 230 nvkm_wr32(device, c->regp, 0x00000000); 231 } while ((++c)->bits); 232 233 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); 234 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); 235 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 236 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 237 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 238 } 239 240 /* restore normal operation, after disabling dma mode */ 241 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); 242 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 243 spin_unlock_irqrestore(&fifo->base.lock, flags); 244 245 return nvkm_fifo_channel_fini(&chan->base, suspend); 246 } 247 248 static struct nvkm_ofuncs 249 nv04_fifo_ofuncs = { 250 .ctor = nv04_fifo_chan_ctor, 251 .dtor = nv04_fifo_chan_dtor, 252 .init = nv04_fifo_chan_init, 253 .fini = nv04_fifo_chan_fini, 254 .map = _nvkm_fifo_channel_map, 255 .rd32 = _nvkm_fifo_channel_rd32, 256 .wr32 = _nvkm_fifo_channel_wr32, 257 .ntfy = _nvkm_fifo_channel_ntfy 258 }; 259 260 static struct nvkm_oclass 261 nv04_fifo_sclass[] = { 262 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, 263 {} 264 }; 265 266 /******************************************************************************* 267 * FIFO context - basically just the instmem reserved for the channel 268 ******************************************************************************/ 269 270 int 271 nv04_fifo_context_ctor(struct nvkm_object *parent, 272 struct nvkm_object *engine, 273 struct nvkm_oclass *oclass, void *data, u32 size, 274 struct nvkm_object **pobject) 275 { 276 struct nv04_fifo_base *base; 277 int ret; 278 279 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 280 0x1000, NVOBJ_FLAG_HEAP, &base); 281 *pobject = nv_object(base); 282 if (ret) 283 return ret; 284 285 return 0; 286 } 287 288 static struct nvkm_oclass 289 nv04_fifo_cclass = { 290 .handle = NV_ENGCTX(FIFO, 0x04), 291 .ofuncs = &(struct nvkm_ofuncs) { 292 .ctor = nv04_fifo_context_ctor, 293 .dtor = _nvkm_fifo_context_dtor, 294 .init = _nvkm_fifo_context_init, 295 .fini = _nvkm_fifo_context_fini, 296 .rd32 = _nvkm_fifo_context_rd32, 297 .wr32 = _nvkm_fifo_context_wr32, 298 }, 299 }; 300 301 /******************************************************************************* 302 * PFIFO engine 303 ******************************************************************************/ 304 305 void 306 nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags) 307 __acquires(fifo->base.lock) 308 { 309 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 310 struct nvkm_device *device = fifo->base.engine.subdev.device; 311 unsigned long flags; 312 313 spin_lock_irqsave(&fifo->base.lock, flags); 314 *pflags = flags; 315 316 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); 317 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 318 319 /* in some cases the puller may be left in an inconsistent state 320 * if you try to stop it while it's busy translating handles. 321 * sometimes you get a CACHE_ERROR, sometimes it just fails 322 * silently; sending incorrect instance offsets to PGRAPH after 323 * it's started up again. 324 * 325 * to avoid this, we invalidate the most recently calculated 326 * instance. 327 */ 328 nvkm_msec(device, 2000, 329 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); 330 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) 331 break; 332 ); 333 334 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & 335 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 336 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 337 338 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); 339 } 340 341 void 342 nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags) 343 __releases(fifo->base.lock) 344 { 345 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); 346 struct nvkm_device *device = fifo->base.engine.subdev.device; 347 unsigned long flags = *pflags; 348 349 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 350 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); 351 352 spin_unlock_irqrestore(&fifo->base.lock, flags); 353 } 354 355 static const char * 356 nv_dma_state_err(u32 state) 357 { 358 static const char * const desc[] = { 359 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 360 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 361 }; 362 return desc[(state >> 29) & 0x7]; 363 } 364 365 static bool 366 nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data) 367 { 368 struct nvkm_device *device = fifo->base.engine.subdev.device; 369 struct nv04_fifo_chan *chan = NULL; 370 struct nvkm_handle *bind; 371 const int subc = (addr >> 13) & 0x7; 372 const int mthd = addr & 0x1ffc; 373 bool handled = false; 374 unsigned long flags; 375 u32 engine; 376 377 spin_lock_irqsave(&fifo->base.lock, flags); 378 if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) 379 chan = (void *)fifo->base.channel[chid]; 380 if (unlikely(!chan)) 381 goto out; 382 383 switch (mthd) { 384 case 0x0000: 385 bind = nvkm_namedb_get(nv_namedb(chan), data); 386 if (unlikely(!bind)) 387 break; 388 389 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { 390 engine = 0x0000000f << (subc * 4); 391 chan->subc[subc] = data; 392 handled = true; 393 394 nvkm_mask(device, NV04_PFIFO_CACHE1_ENGINE, engine, 0); 395 } 396 397 nvkm_namedb_put(bind); 398 break; 399 default: 400 engine = nvkm_rd32(device, NV04_PFIFO_CACHE1_ENGINE); 401 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 402 break; 403 404 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]); 405 if (likely(bind)) { 406 if (!nv_call(bind->object, mthd, data)) 407 handled = true; 408 nvkm_namedb_put(bind); 409 } 410 break; 411 } 412 413 out: 414 spin_unlock_irqrestore(&fifo->base.lock, flags); 415 return handled; 416 } 417 418 static void 419 nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) 420 { 421 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 422 struct nvkm_device *device = subdev->device; 423 u32 mthd, data; 424 int ptr; 425 426 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 427 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 428 * show that it wraps around to the start at GET=0x800.. No clue as to 429 * why.. 430 */ 431 ptr = (get & 0x7ff) >> 2; 432 433 if (device->card_type < NV_40) { 434 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); 435 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); 436 } else { 437 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); 438 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); 439 } 440 441 if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) { 442 const char *client_name = 443 nvkm_client_name_for_fifo_chid(&fifo->base, chid); 444 nvkm_error(subdev, "CACHE_ERROR - " 445 "ch %d [%s] subc %d mthd %04x data %08x\n", 446 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, 447 data); 448 } 449 450 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 451 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 452 453 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 454 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); 455 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 456 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 457 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); 458 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); 459 460 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 461 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 462 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 463 } 464 465 static void 466 nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) 467 { 468 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 469 struct nvkm_device *device = subdev->device; 470 u32 dma_get = nvkm_rd32(device, 0x003244); 471 u32 dma_put = nvkm_rd32(device, 0x003240); 472 u32 push = nvkm_rd32(device, 0x003220); 473 u32 state = nvkm_rd32(device, 0x003228); 474 const char *client_name; 475 476 client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid); 477 478 if (device->card_type == NV_50) { 479 u32 ho_get = nvkm_rd32(device, 0x003328); 480 u32 ho_put = nvkm_rd32(device, 0x003320); 481 u32 ib_get = nvkm_rd32(device, 0x003334); 482 u32 ib_put = nvkm_rd32(device, 0x003330); 483 484 nvkm_error(subdev, "DMA_PUSHER - " 485 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " 486 "ib_put %08x state %08x (err: %s) push %08x\n", 487 chid, client_name, ho_get, dma_get, ho_put, dma_put, 488 ib_get, ib_put, state, nv_dma_state_err(state), 489 push); 490 491 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 492 nvkm_wr32(device, 0x003364, 0x00000000); 493 if (dma_get != dma_put || ho_get != ho_put) { 494 nvkm_wr32(device, 0x003244, dma_put); 495 nvkm_wr32(device, 0x003328, ho_put); 496 } else 497 if (ib_get != ib_put) 498 nvkm_wr32(device, 0x003334, ib_put); 499 } else { 500 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " 501 "state %08x (err: %s) push %08x\n", 502 chid, client_name, dma_get, dma_put, state, 503 nv_dma_state_err(state), push); 504 505 if (dma_get != dma_put) 506 nvkm_wr32(device, 0x003244, dma_put); 507 } 508 509 nvkm_wr32(device, 0x003228, 0x00000000); 510 nvkm_wr32(device, 0x003220, 0x00000001); 511 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 512 } 513 514 void 515 nv04_fifo_intr(struct nvkm_subdev *subdev) 516 { 517 struct nvkm_device *device = subdev->device; 518 struct nv04_fifo *fifo = (void *)subdev; 519 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); 520 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; 521 u32 reassign, chid, get, sem; 522 523 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; 524 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 525 526 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; 527 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); 528 529 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 530 nv04_fifo_cache_error(fifo, chid, get); 531 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 532 } 533 534 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 535 nv04_fifo_dma_pusher(fifo, chid); 536 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 537 } 538 539 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 540 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 541 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 542 543 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); 544 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 545 546 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 547 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 548 } 549 550 if (device->card_type == NV_50) { 551 if (stat & 0x00000010) { 552 stat &= ~0x00000010; 553 nvkm_wr32(device, 0x002100, 0x00000010); 554 } 555 556 if (stat & 0x40000000) { 557 nvkm_wr32(device, 0x002100, 0x40000000); 558 nvkm_fifo_uevent(&fifo->base); 559 stat &= ~0x40000000; 560 } 561 } 562 563 if (stat) { 564 nvkm_warn(subdev, "intr %08x\n", stat); 565 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 566 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); 567 } 568 569 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); 570 } 571 572 static int 573 nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 574 struct nvkm_oclass *oclass, void *data, u32 size, 575 struct nvkm_object **pobject) 576 { 577 struct nv04_instmem *imem = nv04_instmem(parent); 578 struct nv04_fifo *fifo; 579 int ret; 580 581 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo); 582 *pobject = nv_object(fifo); 583 if (ret) 584 return ret; 585 586 nvkm_ramht_ref(imem->ramht, &fifo->ramht); 587 nvkm_gpuobj_ref(imem->ramro, &fifo->ramro); 588 nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc); 589 590 nv_subdev(fifo)->unit = 0x00000100; 591 nv_subdev(fifo)->intr = nv04_fifo_intr; 592 nv_engine(fifo)->cclass = &nv04_fifo_cclass; 593 nv_engine(fifo)->sclass = nv04_fifo_sclass; 594 fifo->base.pause = nv04_fifo_pause; 595 fifo->base.start = nv04_fifo_start; 596 fifo->ramfc_desc = nv04_ramfc; 597 return 0; 598 } 599 600 void 601 nv04_fifo_dtor(struct nvkm_object *object) 602 { 603 struct nv04_fifo *fifo = (void *)object; 604 nvkm_gpuobj_ref(NULL, &fifo->ramfc); 605 nvkm_gpuobj_ref(NULL, &fifo->ramro); 606 nvkm_ramht_ref(NULL, &fifo->ramht); 607 nvkm_fifo_destroy(&fifo->base); 608 } 609 610 int 611 nv04_fifo_init(struct nvkm_object *object) 612 { 613 struct nv04_fifo *fifo = (void *)object; 614 struct nvkm_device *device = fifo->base.engine.subdev.device; 615 int ret; 616 617 ret = nvkm_fifo_init(&fifo->base); 618 if (ret) 619 return ret; 620 621 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); 622 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 623 624 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 625 ((fifo->ramht->bits - 9) << 16) | 626 (fifo->ramht->gpuobj.addr >> 8)); 627 nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8); 628 nvkm_wr32(device, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8); 629 630 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); 631 632 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 633 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 634 635 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 636 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 637 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 638 return 0; 639 } 640 641 struct nvkm_oclass * 642 nv04_fifo_oclass = &(struct nvkm_oclass) { 643 .handle = NV_ENGINE(FIFO, 0x04), 644 .ofuncs = &(struct nvkm_ofuncs) { 645 .ctor = nv04_fifo_ctor, 646 .dtor = nv04_fifo_dtor, 647 .init = nv04_fifo_init, 648 .fini = _nvkm_fifo_fini, 649 }, 650 }; 651