1 /* 2 * Copyright (C) 2007 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include "nouveau_drv.h" 28 #include "nouveau_dma.h" 29 #include "nouveau_vmm.h" 30 31 #include <nvif/user.h> 32 33 void 34 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) 35 { 36 bool is_iomem; 37 u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem); 38 mem = &mem[chan->dma.cur]; 39 if (is_iomem) 40 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); 41 else 42 memcpy(mem, data, nr_dwords * 4); 43 chan->dma.cur += nr_dwords; 44 } 45 46 /* Fetch and adjust GPU GET pointer 47 * 48 * Returns: 49 * value >= 0, the adjusted GET pointer 50 * -EINVAL if GET pointer currently outside main push buffer 51 * -EBUSY if timeout exceeded 52 */ 53 static inline int 54 READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) 55 { 56 uint64_t val; 57 58 val = nvif_rd32(&chan->user, chan->user_get); 59 if (chan->user_get_hi) 60 val |= (uint64_t)nvif_rd32(&chan->user, chan->user_get_hi) << 32; 61 62 /* reset counter as long as GET is still advancing, this is 63 * to avoid misdetecting a GPU lockup if the GPU happens to 64 * just be processing an operation that takes a long time 65 */ 66 if (val != *prev_get) { 67 *prev_get = val; 68 *timeout = 0; 69 } 70 71 if ((++*timeout & 0xff) == 0) { 72 udelay(1); 73 if (*timeout > 100000) 74 return -EBUSY; 75 } 76 77 if (val < chan->push.addr || 78 val > chan->push.addr + (chan->dma.max << 2)) 79 return -EINVAL; 80 81 return (val - chan->push.addr) >> 2; 82 } 83 84 void 85 nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length) 86 { 87 struct nvif_user *user = &chan->drm->client.device.user; 88 struct nouveau_bo *pb = chan->push.buffer; 89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 90 91 BUG_ON(chan->dma.ib_free < 1); 92 93 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 94 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 95 96 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; 97 98 mb(); 99 /* Flush writes. */ 100 nouveau_bo_rd32(pb, 0); 101 102 nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put); 103 if (user->func && user->func->doorbell) 104 user->func->doorbell(user, chan->token); 105 chan->dma.ib_free--; 106 } 107 108 static int 109 nv50_dma_push_wait(struct nouveau_channel *chan, int count) 110 { 111 uint32_t cnt = 0, prev_get = 0; 112 113 while (chan->dma.ib_free < count) { 114 uint32_t get = nvif_rd32(&chan->user, 0x88); 115 if (get != prev_get) { 116 prev_get = get; 117 cnt = 0; 118 } 119 120 if ((++cnt & 0xff) == 0) { 121 udelay(1); 122 if (cnt > 100000) 123 return -EBUSY; 124 } 125 126 chan->dma.ib_free = get - chan->dma.ib_put; 127 if (chan->dma.ib_free <= 0) 128 chan->dma.ib_free += chan->dma.ib_max; 129 } 130 131 return 0; 132 } 133 134 static int 135 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) 136 { 137 uint64_t prev_get = 0; 138 int ret, cnt = 0; 139 140 ret = nv50_dma_push_wait(chan, slots + 1); 141 if (unlikely(ret)) 142 return ret; 143 144 while (chan->dma.free < count) { 145 int get = READ_GET(chan, &prev_get, &cnt); 146 if (unlikely(get < 0)) { 147 if (get == -EINVAL) 148 continue; 149 150 return get; 151 } 152 153 if (get <= chan->dma.cur) { 154 chan->dma.free = chan->dma.max - chan->dma.cur; 155 if (chan->dma.free >= count) 156 break; 157 158 FIRE_RING(chan); 159 do { 160 get = READ_GET(chan, &prev_get, &cnt); 161 if (unlikely(get < 0)) { 162 if (get == -EINVAL) 163 continue; 164 return get; 165 } 166 } while (get == 0); 167 chan->dma.cur = 0; 168 chan->dma.put = 0; 169 } 170 171 chan->dma.free = get - chan->dma.cur - 1; 172 } 173 174 return 0; 175 } 176 177 int 178 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) 179 { 180 uint64_t prev_get = 0; 181 int cnt = 0, get; 182 183 if (chan->dma.ib_max) 184 return nv50_dma_wait(chan, slots, size); 185 186 while (chan->dma.free < size) { 187 get = READ_GET(chan, &prev_get, &cnt); 188 if (unlikely(get == -EBUSY)) 189 return -EBUSY; 190 191 /* loop until we have a usable GET pointer. the value 192 * we read from the GPU may be outside the main ring if 193 * PFIFO is processing a buffer called from the main ring, 194 * discard these values until something sensible is seen. 195 * 196 * the other case we discard GET is while the GPU is fetching 197 * from the SKIPS area, so the code below doesn't have to deal 198 * with some fun corner cases. 199 */ 200 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) 201 continue; 202 203 if (get <= chan->dma.cur) { 204 /* engine is fetching behind us, or is completely 205 * idle (GET == PUT) so we have free space up until 206 * the end of the push buffer 207 * 208 * we can only hit that path once per call due to 209 * looping back to the beginning of the push buffer, 210 * we'll hit the fetching-ahead-of-us path from that 211 * point on. 212 * 213 * the *one* exception to that rule is if we read 214 * GET==PUT, in which case the below conditional will 215 * always succeed and break us out of the wait loop. 216 */ 217 chan->dma.free = chan->dma.max - chan->dma.cur; 218 if (chan->dma.free >= size) 219 break; 220 221 /* not enough space left at the end of the push buffer, 222 * instruct the GPU to jump back to the start right 223 * after processing the currently pending commands. 224 */ 225 OUT_RING(chan, chan->push.addr | 0x20000000); 226 227 /* wait for GET to depart from the skips area. 228 * prevents writing GET==PUT and causing a race 229 * condition that causes us to think the GPU is 230 * idle when it's not. 231 */ 232 do { 233 get = READ_GET(chan, &prev_get, &cnt); 234 if (unlikely(get == -EBUSY)) 235 return -EBUSY; 236 if (unlikely(get == -EINVAL)) 237 continue; 238 } while (get <= NOUVEAU_DMA_SKIPS); 239 WRITE_PUT(NOUVEAU_DMA_SKIPS); 240 241 /* we're now submitting commands at the start of 242 * the push buffer. 243 */ 244 chan->dma.cur = 245 chan->dma.put = NOUVEAU_DMA_SKIPS; 246 } 247 248 /* engine fetching ahead of us, we have space up until the 249 * current GET pointer. the "- 1" is to ensure there's 250 * space left to emit a jump back to the beginning of the 251 * push buffer if we require it. we can never get GET == PUT 252 * here, so this is safe. 253 */ 254 chan->dma.free = get - chan->dma.cur - 1; 255 } 256 257 return 0; 258 } 259 260