xref: /linux/drivers/gpu/drm/nouveau/nouveau_dma.h (revision 7f50548abb5454bd82c25aae15f0a3bf6a530f46)
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #ifndef __NOUVEAU_DMA_H__
28 #define __NOUVEAU_DMA_H__
29 
30 #ifndef NOUVEAU_DMA_DEBUG
31 #define NOUVEAU_DMA_DEBUG 0
32 #endif
33 
34 /*
35  * There's a hw race condition where you can't jump to your PUT offset,
36  * to avoid this we jump to offset + SKIPS and fill the difference with
37  * NOPs.
38  *
39  * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
40  * a SKIPS value of 8.  Lets assume that the race condition is to do
41  * with writing into the fetch area, we configure a fetch size of 128
42  * bytes so we need a larger SKIPS value.
43  */
44 #define NOUVEAU_DMA_SKIPS (128 / 4)
45 
46 /* Hardcoded object assignments to subchannels (subchannel id). */
47 enum {
48 	NvSubM2MF	= 0,
49 	NvSub2D		= 1,
50 	NvSubCtxSurf2D  = 1,
51 	NvSubGdiRect    = 2,
52 	NvSubImageBlit  = 3
53 };
54 
55 /* Object handles. */
56 enum {
57 	NvM2MF		= 0x80000001,
58 	NvDmaFB		= 0x80000002,
59 	NvDmaTT		= 0x80000003,
60 	NvDmaVRAM	= 0x80000004,
61 	NvDmaGART	= 0x80000005,
62 	NvNotify0       = 0x80000006,
63 	Nv2D		= 0x80000007,
64 	NvCtxSurf2D	= 0x80000008,
65 	NvRop		= 0x80000009,
66 	NvImagePatt	= 0x8000000a,
67 	NvClipRect	= 0x8000000b,
68 	NvGdiRect	= 0x8000000c,
69 	NvImageBlit	= 0x8000000d,
70 
71 	/* G80+ display objects */
72 	NvEvoVRAM	= 0x01000000,
73 	NvEvoFB16	= 0x01000001,
74 	NvEvoFB32	= 0x01000002
75 };
76 
77 #define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
78 #define NV_MEMORY_TO_MEMORY_FORMAT_NAME                               0x00000000
79 #define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF                            0x00000050
80 #define NV_MEMORY_TO_MEMORY_FORMAT_NOP                                0x00000100
81 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY                             0x00000104
82 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE                 0x00000000
83 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN       0x00000001
84 #define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY                         0x00000180
85 #define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE                         0x00000184
86 #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN                          0x0000030c
87 
88 #define NV50_MEMORY_TO_MEMORY_FORMAT                                  0x00005039
89 #define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200                           0x00000200
90 #define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C                           0x0000021c
91 #define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH                   0x00000238
92 #define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH                  0x0000023c
93 
94 static __must_check inline int
95 RING_SPACE(struct nouveau_channel *chan, int size)
96 {
97 	if (chan->dma.free < size) {
98 		int ret;
99 
100 		ret = nouveau_dma_wait(chan, size);
101 		if (ret)
102 			return ret;
103 	}
104 
105 	chan->dma.free -= size;
106 	return 0;
107 }
108 
109 static inline void
110 OUT_RING(struct nouveau_channel *chan, int data)
111 {
112 	if (NOUVEAU_DMA_DEBUG) {
113 		NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
114 			chan->id, chan->dma.cur << 2, data);
115 	}
116 
117 	nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
118 }
119 
120 extern void
121 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
122 
123 static inline void
124 BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
125 {
126 	OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
127 }
128 
129 #define WRITE_PUT(val) do {                                                    \
130 	DRM_MEMORYBARRIER();                                                   \
131 	nouveau_bo_rd32(chan->pushbuf_bo, 0);                                  \
132 	nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base);  \
133 } while (0)
134 
135 static inline void
136 FIRE_RING(struct nouveau_channel *chan)
137 {
138 	if (NOUVEAU_DMA_DEBUG) {
139 		NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
140 			chan->id, chan->dma.cur << 2);
141 	}
142 
143 	if (chan->dma.cur == chan->dma.put)
144 		return;
145 	chan->accel_done = true;
146 
147 	WRITE_PUT(chan->dma.cur);
148 	chan->dma.put = chan->dma.cur;
149 }
150 
151 static inline void
152 WIND_RING(struct nouveau_channel *chan)
153 {
154 	chan->dma.cur = chan->dma.put;
155 }
156 
157 #endif
158