1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "cgrp.h" 26 #include "chan.h" 27 #include "chid.h" 28 #include "runl.h" 29 30 #include "regsnv04.h" 31 32 #include <core/ramht.h> 33 #include <subdev/fb.h> 34 #include <subdev/instmem.h> 35 36 #include <nvif/class.h> 37 38 static int 39 nv40_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) 40 { 41 struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc; 42 const u32 base = chan->id * 128; 43 44 chan->ramfc_offset = base; 45 46 nvkm_kmap(ramfc); 47 nvkm_wo32(ramfc, base + 0x00, offset); 48 nvkm_wo32(ramfc, base + 0x04, offset); 49 nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4); 50 nvkm_wo32(ramfc, base + 0x18, 0x30000000 | 51 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 52 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 53 #ifdef __BIG_ENDIAN 54 NV_PFIFO_CACHE1_BIG_ENDIAN | 55 #endif 56 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 57 nvkm_wo32(ramfc, base + 0x3c, 0x0001ffff); 58 nvkm_done(ramfc); 59 return 0; 60 } 61 62 static const struct nvkm_chan_func_ramfc 63 nv40_chan_ramfc = { 64 .layout = (const struct nvkm_ramfc_layout[]) { 65 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 66 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 67 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, 68 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 69 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 70 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 71 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 72 { 2, 28, 0x18, 28, 0x002058 }, 73 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE }, 74 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 }, 75 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE }, 76 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP }, 77 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT }, 78 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE }, 79 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE }, 80 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE }, 81 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE }, 82 { 32, 0, 0x40, 0, 0x0032e4 }, 83 { 32, 0, 0x44, 0, 0x0032e8 }, 84 { 32, 0, 0x4c, 0, 0x002088 }, 85 { 32, 0, 0x50, 0, 0x003300 }, 86 { 32, 0, 0x54, 0, 0x00330c }, 87 {} 88 }, 89 .write = nv40_chan_ramfc_write, 90 .clear = nv04_chan_ramfc_clear, 91 .ctxdma = true, 92 }; 93 94 static const struct nvkm_chan_func_userd 95 nv40_chan_userd = { 96 .bar = 0, 97 .base = 0xc00000, 98 .size = 0x001000, 99 }; 100 101 static const struct nvkm_chan_func 102 nv40_chan = { 103 .inst = &nv04_chan_inst, 104 .userd = &nv40_chan_userd, 105 .ramfc = &nv40_chan_ramfc, 106 .start = nv04_chan_start, 107 .stop = nv04_chan_stop, 108 }; 109 110 static int 111 nv40_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan) 112 { 113 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 114 struct nvkm_instmem *imem = fifo->engine.subdev.device->imem; 115 u32 context = chan->id << 23 | engn->id << 20; 116 int hash; 117 118 mutex_lock(&fifo->mutex); 119 hash = nvkm_ramht_insert(imem->ramht, eobj, chan->id, 4, eobj->handle, context); 120 mutex_unlock(&fifo->mutex); 121 return hash; 122 } 123 124 static void 125 nv40_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan) 126 { 127 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 128 struct nvkm_device *device = fifo->engine.subdev.device; 129 struct nvkm_memory *ramfc = device->imem->ramfc; 130 u32 inst = 0x00000000, reg, ctx; 131 int chid; 132 133 switch (engn->engine->subdev.type) { 134 case NVKM_ENGINE_GR: 135 reg = 0x0032e0; 136 ctx = 0x38; 137 break; 138 case NVKM_ENGINE_MPEG: 139 if (WARN_ON(device->chipset < 0x44)) 140 return; 141 reg = 0x00330c; 142 ctx = 0x54; 143 break; 144 default: 145 WARN_ON(1); 146 return; 147 } 148 149 if (cctx) 150 inst = cctx->vctx->inst->addr >> 4; 151 152 spin_lock_irq(&fifo->lock); 153 nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); 154 155 chid = nvkm_rd32(device, 0x003204) & (fifo->chid->nr - 1); 156 if (chid == chan->id) 157 nvkm_wr32(device, reg, inst); 158 159 nvkm_kmap(ramfc); 160 nvkm_wo32(ramfc, chan->ramfc_offset + ctx, inst); 161 nvkm_done(ramfc); 162 163 nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); 164 spin_unlock_irq(&fifo->lock); 165 } 166 167 static const struct nvkm_engn_func 168 nv40_engn = { 169 .bind = nv40_ectx_bind, 170 .ramht_add = nv40_eobj_ramht_add, 171 .ramht_del = nv04_eobj_ramht_del, 172 }; 173 174 static const struct nvkm_engn_func 175 nv40_engn_sw = { 176 .ramht_add = nv40_eobj_ramht_add, 177 .ramht_del = nv04_eobj_ramht_del, 178 }; 179 180 static void 181 nv40_fifo_init(struct nvkm_fifo *fifo) 182 { 183 struct nvkm_device *device = fifo->engine.subdev.device; 184 struct nvkm_fb *fb = device->fb; 185 struct nvkm_instmem *imem = device->imem; 186 struct nvkm_ramht *ramht = imem->ramht; 187 struct nvkm_memory *ramro = imem->ramro; 188 struct nvkm_memory *ramfc = imem->ramfc; 189 190 nvkm_wr32(device, 0x002040, 0x000000ff); 191 nvkm_wr32(device, 0x002044, 0x2101ffff); 192 nvkm_wr32(device, 0x002058, 0x00000001); 193 194 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 195 ((ramht->bits - 9) << 16) | 196 (ramht->gpuobj->addr >> 8)); 197 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); 198 199 switch (device->chipset) { 200 case 0x47: 201 case 0x49: 202 case 0x4b: 203 nvkm_wr32(device, 0x002230, 0x00000001); 204 fallthrough; 205 case 0x40: 206 case 0x41: 207 case 0x42: 208 case 0x43: 209 case 0x45: 210 case 0x48: 211 nvkm_wr32(device, 0x002220, 0x00030002); 212 break; 213 default: 214 nvkm_wr32(device, 0x002230, 0x00000000); 215 nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 + 216 nvkm_memory_addr(ramfc)) >> 16) | 217 0x00030000); 218 break; 219 } 220 221 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask); 222 223 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 224 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 225 226 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 227 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 228 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 229 } 230 231 static const struct nvkm_fifo_func 232 nv40_fifo = { 233 .chid_nr = nv10_fifo_chid_nr, 234 .chid_ctor = nv04_fifo_chid_ctor, 235 .runl_ctor = nv04_fifo_runl_ctor, 236 .init = nv40_fifo_init, 237 .intr = nv04_fifo_intr, 238 .pause = nv04_fifo_pause, 239 .start = nv04_fifo_start, 240 .runl = &nv04_runl, 241 .engn = &nv40_engn, 242 .engn_sw = &nv40_engn_sw, 243 .cgrp = {{ }, &nv04_cgrp }, 244 .chan = {{ 0, 0, NV40_CHANNEL_DMA }, &nv40_chan }, 245 }; 246 247 int 248 nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 249 struct nvkm_fifo **pfifo) 250 { 251 return nvkm_fifo_new_(&nv40_fifo, device, type, inst, pfifo); 252 } 253