xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 /*
2  * Copyright 2016 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "priv.h"
25 #include "chan.h"
26 #include "head.h"
27 #include "ior.h"
28 
29 #include <subdev/timer.h>
30 
31 #include <nvif/class.h>
32 
33 static int
34 gp102_disp_dmac_init(struct nvkm_disp_chan *chan)
35 {
36 	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
37 	struct nvkm_device *device = subdev->device;
38 	int ctrl = chan->chid.ctrl;
39 	int user = chan->chid.user;
40 
41 	/* initialise channel for dma command submission */
42 	nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
43 	nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
44 	nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
45 	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
46 	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
47 	nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
48 
49 	/* wait for it to go inactive */
50 	if (nvkm_msec(device, 2000,
51 		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
52 			break;
53 	) < 0) {
54 		nvkm_error(subdev, "ch %d init: %08x\n", user,
55 			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
56 		return -EBUSY;
57 	}
58 
59 	return 0;
60 }
61 
62 const struct nvkm_disp_chan_func
63 gp102_disp_dmac_func = {
64 	.push = nv50_disp_dmac_push,
65 	.init = gp102_disp_dmac_init,
66 	.fini = gf119_disp_dmac_fini,
67 	.intr = gf119_disp_chan_intr,
68 	.user = nv50_disp_chan_user,
69 	.bind = gf119_disp_dmac_bind,
70 };
71 
72 static const struct nvkm_disp_chan_user
73 gp102_disp_curs = {
74 	.func = &gf119_disp_pioc_func,
75 	.ctrl = 13,
76 	.user = 17,
77 };
78 
79 static const struct nvkm_disp_chan_user
80 gp102_disp_oimm = {
81 	.func = &gf119_disp_pioc_func,
82 	.ctrl = 9,
83 	.user = 13,
84 };
85 
86 static const struct nvkm_disp_chan_user
87 gp102_disp_ovly = {
88 	.func = &gp102_disp_dmac_func,
89 	.ctrl = 5,
90 	.user = 5,
91 	.mthd = &gk104_disp_ovly_mthd,
92 };
93 
94 static const struct nvkm_disp_chan_user
95 gp102_disp_base = {
96 	.func = &gp102_disp_dmac_func,
97 	.ctrl = 1,
98 	.user = 1,
99 	.mthd = &gf119_disp_base_mthd,
100 };
101 
102 static int
103 gp102_disp_core_init(struct nvkm_disp_chan *chan)
104 {
105 	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
106 	struct nvkm_device *device = subdev->device;
107 
108 	/* initialise channel for dma command submission */
109 	nvkm_wr32(device, 0x611494, chan->push);
110 	nvkm_wr32(device, 0x611498, 0x00010000);
111 	nvkm_wr32(device, 0x61149c, 0x00000001);
112 	nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
113 	nvkm_wr32(device, 0x640000, chan->suspend_put);
114 	nvkm_wr32(device, 0x610490, 0x01000013);
115 
116 	/* wait for it to go inactive */
117 	if (nvkm_msec(device, 2000,
118 		if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
119 			break;
120 	) < 0) {
121 		nvkm_error(subdev, "core init: %08x\n",
122 			   nvkm_rd32(device, 0x610490));
123 		return -EBUSY;
124 	}
125 
126 	return 0;
127 }
128 
129 static const struct nvkm_disp_chan_func
130 gp102_disp_core_func = {
131 	.push = nv50_disp_dmac_push,
132 	.init = gp102_disp_core_init,
133 	.fini = gf119_disp_core_fini,
134 	.intr = gf119_disp_chan_intr,
135 	.user = nv50_disp_chan_user,
136 	.bind = gf119_disp_dmac_bind,
137 };
138 
139 static const struct nvkm_disp_chan_user
140 gp102_disp_core = {
141 	.func = &gp102_disp_core_func,
142 	.ctrl = 0,
143 	.user = 0,
144 	.mthd = &gk104_disp_core_mthd,
145 };
146 
147 static void
148 gp102_disp_intr_error(struct nvkm_disp *disp, int chid)
149 {
150 	struct nvkm_subdev *subdev = &disp->engine.subdev;
151 	struct nvkm_device *device = subdev->device;
152 	u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
153 	u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
154 	u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
155 
156 	nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
157 		   chid, (mthd & 0x0000ffc), data, mthd, unkn);
158 
159 	if (chid < ARRAY_SIZE(disp->chan)) {
160 		switch (mthd & 0xffc) {
161 		case 0x0080:
162 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
163 			break;
164 		default:
165 			break;
166 		}
167 	}
168 
169 	nvkm_wr32(device, 0x61009c, (1 << chid));
170 	nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
171 }
172 
173 static const struct nvkm_disp_func
174 gp102_disp = {
175 	.oneinit = nv50_disp_oneinit,
176 	.init = gf119_disp_init,
177 	.fini = gf119_disp_fini,
178 	.intr = gf119_disp_intr,
179 	.intr_error = gp102_disp_intr_error,
180 	.super = gf119_disp_super,
181 	.uevent = &gf119_disp_chan_uevent,
182 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
183 	.sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
184 	.root = { 0,0,GP102_DISP },
185 	.user = {
186 		{{0,0,GK104_DISP_CURSOR             }, nvkm_disp_chan_new, &gp102_disp_curs },
187 		{{0,0,GK104_DISP_OVERLAY            }, nvkm_disp_chan_new, &gp102_disp_oimm },
188 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, nvkm_disp_chan_new, &gp102_disp_base },
189 		{{0,0,GP102_DISP_CORE_CHANNEL_DMA   }, nvkm_disp_core_new, &gp102_disp_core },
190 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gp102_disp_ovly },
191 		{}
192 	},
193 };
194 
195 int
196 gp102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
197 	       struct nvkm_disp **pdisp)
198 {
199 	return nvkm_disp_new_(&gp102_disp, device, type, inst, pdisp);
200 }
201