1 /* 2 * Copyright (C) 2007 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #ifndef __NOUVEAU_DMA_H__ 28 #define __NOUVEAU_DMA_H__ 29 30 #ifndef NOUVEAU_DMA_DEBUG 31 #define NOUVEAU_DMA_DEBUG 0 32 #endif 33 34 void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, 35 int delta, int length); 36 37 /* 38 * There's a hw race condition where you can't jump to your PUT offset, 39 * to avoid this we jump to offset + SKIPS and fill the difference with 40 * NOPs. 41 * 42 * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses 43 * a SKIPS value of 8. Lets assume that the race condition is to do 44 * with writing into the fetch area, we configure a fetch size of 128 45 * bytes so we need a larger SKIPS value. 46 */ 47 #define NOUVEAU_DMA_SKIPS (128 / 4) 48 49 /* Hardcoded object assignments to subchannels (subchannel id). */ 50 enum { 51 NvSubM2MF = 0, 52 NvSubSw = 1, 53 NvSub2D = 2, 54 NvSubCtxSurf2D = 2, 55 NvSubGdiRect = 3, 56 NvSubImageBlit = 4 57 }; 58 59 /* Object handles. */ 60 enum { 61 NvM2MF = 0x80000001, 62 NvDmaFB = 0x80000002, 63 NvDmaTT = 0x80000003, 64 NvDmaVRAM = 0x80000004, 65 NvDmaGART = 0x80000005, 66 NvNotify0 = 0x80000006, 67 Nv2D = 0x80000007, 68 NvCtxSurf2D = 0x80000008, 69 NvRop = 0x80000009, 70 NvImagePatt = 0x8000000a, 71 NvClipRect = 0x8000000b, 72 NvGdiRect = 0x8000000c, 73 NvImageBlit = 0x8000000d, 74 NvSw = 0x8000000e, 75 NvSema = 0x8000000f, 76 NvEvoSema0 = 0x80000010, 77 NvEvoSema1 = 0x80000011, 78 79 /* G80+ display objects */ 80 NvEvoVRAM = 0x01000000, 81 NvEvoFB16 = 0x01000001, 82 NvEvoFB32 = 0x01000002, 83 NvEvoVRAM_LP = 0x01000003, 84 NvEvoSync = 0xcafe0000 85 }; 86 87 #define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 88 #define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000 89 #define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050 90 #define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100 91 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 92 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000 93 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001 94 #define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180 95 #define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184 96 #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c 97 98 #define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 99 #define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200 100 #define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c 101 #define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238 102 #define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c 103 104 static __must_check inline int 105 RING_SPACE(struct nouveau_channel *chan, int size) 106 { 107 int ret; 108 109 ret = nouveau_dma_wait(chan, 1, size); 110 if (ret) 111 return ret; 112 113 chan->dma.free -= size; 114 return 0; 115 } 116 117 static inline void 118 OUT_RING(struct nouveau_channel *chan, int data) 119 { 120 if (NOUVEAU_DMA_DEBUG) { 121 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n", 122 chan->id, chan->dma.cur << 2, data); 123 } 124 125 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data); 126 } 127 128 extern void 129 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords); 130 131 static inline void 132 BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size) 133 { 134 OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2)); 135 } 136 137 static inline void 138 BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size) 139 { 140 OUT_RING(chan, (subc << 13) | (size << 18) | mthd); 141 } 142 143 #define WRITE_PUT(val) do { \ 144 DRM_MEMORYBARRIER(); \ 145 nouveau_bo_rd32(chan->pushbuf_bo, 0); \ 146 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \ 147 } while (0) 148 149 static inline void 150 FIRE_RING(struct nouveau_channel *chan) 151 { 152 if (NOUVEAU_DMA_DEBUG) { 153 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n", 154 chan->id, chan->dma.cur << 2); 155 } 156 157 if (chan->dma.cur == chan->dma.put) 158 return; 159 chan->accel_done = true; 160 161 if (chan->dma.ib_max) { 162 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2, 163 (chan->dma.cur - chan->dma.put) << 2); 164 } else { 165 WRITE_PUT(chan->dma.cur); 166 } 167 168 chan->dma.put = chan->dma.cur; 169 } 170 171 static inline void 172 WIND_RING(struct nouveau_channel *chan) 173 { 174 chan->dma.cur = chan->dma.put; 175 } 176 177 #endif 178