1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv50.h" 25 #include "channv50.h" 26 27 #include <core/gpuobj.h> 28 29 static void 30 nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo) 31 { 32 struct nvkm_device *device = fifo->base.engine.subdev.device; 33 struct nvkm_memory *cur; 34 int i, p; 35 36 cur = fifo->runlist[fifo->cur_runlist]; 37 fifo->cur_runlist = !fifo->cur_runlist; 38 39 nvkm_kmap(cur); 40 for (i = 0, p = 0; i < fifo->base.nr; i++) { 41 if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000) 42 nvkm_wo32(cur, p++ * 4, i); 43 } 44 nvkm_done(cur); 45 46 nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12); 47 nvkm_wr32(device, 0x0032ec, p); 48 nvkm_wr32(device, 0x002500, 0x00000101); 49 } 50 51 void 52 nv50_fifo_runlist_update(struct nv50_fifo *fifo) 53 { 54 mutex_lock(&fifo->base.engine.subdev.mutex); 55 nv50_fifo_runlist_update_locked(fifo); 56 mutex_unlock(&fifo->base.engine.subdev.mutex); 57 } 58 59 int 60 nv50_fifo_oneinit(struct nvkm_fifo *base) 61 { 62 struct nv50_fifo *fifo = nv50_fifo(base); 63 struct nvkm_device *device = fifo->base.engine.subdev.device; 64 int ret; 65 66 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, 67 false, &fifo->runlist[0]); 68 if (ret) 69 return ret; 70 71 return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, 72 false, &fifo->runlist[1]); 73 } 74 75 void 76 nv50_fifo_init(struct nvkm_fifo *base) 77 { 78 struct nv50_fifo *fifo = nv50_fifo(base); 79 struct nvkm_device *device = fifo->base.engine.subdev.device; 80 int i; 81 82 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000); 83 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100); 84 nvkm_wr32(device, 0x00250c, 0x6f3cfc34); 85 nvkm_wr32(device, 0x002044, 0x01003fff); 86 87 nvkm_wr32(device, 0x002100, 0xffffffff); 88 nvkm_wr32(device, 0x002140, 0xbfffffff); 89 90 for (i = 0; i < 128; i++) 91 nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000); 92 nv50_fifo_runlist_update_locked(fifo); 93 94 nvkm_wr32(device, 0x003200, 0x00000001); 95 nvkm_wr32(device, 0x003250, 0x00000001); 96 nvkm_wr32(device, 0x002500, 0x00000001); 97 } 98 99 void * 100 nv50_fifo_dtor(struct nvkm_fifo *base) 101 { 102 struct nv50_fifo *fifo = nv50_fifo(base); 103 nvkm_memory_del(&fifo->runlist[1]); 104 nvkm_memory_del(&fifo->runlist[0]); 105 return fifo; 106 } 107 108 int 109 nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, 110 int index, struct nvkm_fifo **pfifo) 111 { 112 struct nv50_fifo *fifo; 113 int ret; 114 115 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 116 return -ENOMEM; 117 *pfifo = &fifo->base; 118 119 ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base); 120 if (ret) 121 return ret; 122 123 set_bit(0, fifo->base.mask); /* PIO channel */ 124 set_bit(127, fifo->base.mask); /* inactive channel */ 125 return 0; 126 } 127 128 static const struct nvkm_fifo_func 129 nv50_fifo = { 130 .dtor = nv50_fifo_dtor, 131 .oneinit = nv50_fifo_oneinit, 132 .init = nv50_fifo_init, 133 .intr = nv04_fifo_intr, 134 .pause = nv04_fifo_pause, 135 .start = nv04_fifo_start, 136 .chan = { 137 &nv50_fifo_dma_oclass, 138 &nv50_fifo_gpfifo_oclass, 139 NULL 140 }, 141 }; 142 143 int 144 nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 145 { 146 return nv50_fifo_new_(&nv50_fifo, device, index, pfifo); 147 } 148