xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "chan.h"
23 
24 #include <core/oproxy.h>
25 #include <core/ramht.h>
26 
27 #include <nvif/if0014.h>
28 
29 static int
30 nvkm_disp_chan_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **pevent)
31 {
32 	struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
33 	struct nvkm_disp *disp = chan->disp;
34 
35 	switch (type) {
36 	case 0:
37 		*pevent = &disp->uevent;
38 		return 0;
39 	default:
40 		break;
41 	}
42 
43 	return -EINVAL;
44 }
45 
46 static int
47 nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
48 		   enum nvkm_object_map *type, u64 *addr, u64 *size)
49 {
50 	struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
51 	struct nvkm_device *device = chan->disp->engine.subdev.device;
52 	const u64 base = device->func->resource_addr(device, 0);
53 
54 	*type = NVKM_OBJECT_MAP_IO;
55 	*addr = base + chan->func->user(chan, size);
56 	return 0;
57 }
58 
59 struct nvkm_disp_chan_object {
60 	struct nvkm_oproxy oproxy;
61 	struct nvkm_disp *disp;
62 	int hash;
63 };
64 
65 static void
66 nvkm_disp_chan_child_del_(struct nvkm_oproxy *base)
67 {
68 	struct nvkm_disp_chan_object *object = container_of(base, typeof(*object), oproxy);
69 
70 	nvkm_ramht_remove(object->disp->ramht, object->hash);
71 }
72 
73 static const struct nvkm_oproxy_func
74 nvkm_disp_chan_child_func_ = {
75 	.dtor[0] = nvkm_disp_chan_child_del_,
76 };
77 
78 static int
79 nvkm_disp_chan_child_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
80 			 struct nvkm_object **pobject)
81 {
82 	struct nvkm_disp_chan *chan = nvkm_disp_chan(oclass->parent);
83 	struct nvkm_disp *disp = chan->disp;
84 	struct nvkm_device *device = disp->engine.subdev.device;
85 	const struct nvkm_device_oclass *sclass = oclass->priv;
86 	struct nvkm_disp_chan_object *object;
87 	int ret;
88 
89 	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
90 		return -ENOMEM;
91 	nvkm_oproxy_ctor(&nvkm_disp_chan_child_func_, oclass, &object->oproxy);
92 	object->disp = disp;
93 	*pobject = &object->oproxy.base;
94 
95 	ret = sclass->ctor(device, oclass, argv, argc, &object->oproxy.object);
96 	if (ret)
97 		return ret;
98 
99 	object->hash = chan->func->bind(chan, object->oproxy.object, oclass->handle);
100 	if (object->hash < 0)
101 		return object->hash;
102 
103 	return 0;
104 }
105 
106 static int
107 nvkm_disp_chan_child_get(struct nvkm_object *object, int index, struct nvkm_oclass *sclass)
108 {
109 	struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
110 	struct nvkm_device *device = chan->disp->engine.subdev.device;
111 	const struct nvkm_device_oclass *oclass = NULL;
112 
113 	if (chan->func->bind)
114 		sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ, 0);
115 	else
116 		sclass->engine = NULL;
117 
118 	if (sclass->engine && sclass->engine->func->base.sclass) {
119 		sclass->engine->func->base.sclass(sclass, index, &oclass);
120 		if (oclass) {
121 			sclass->ctor = nvkm_disp_chan_child_new;
122 			sclass->priv = oclass;
123 			return 0;
124 		}
125 	}
126 
127 	return -EINVAL;
128 }
129 
130 static int
131 nvkm_disp_chan_fini(struct nvkm_object *object, bool suspend)
132 {
133 	struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
134 
135 	chan->func->fini(chan);
136 	chan->func->intr(chan, false);
137 	return 0;
138 }
139 
140 static int
141 nvkm_disp_chan_init(struct nvkm_object *object)
142 {
143 	struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
144 
145 	chan->func->intr(chan, true);
146 	return chan->func->init(chan);
147 }
148 
149 static void *
150 nvkm_disp_chan_dtor(struct nvkm_object *object)
151 {
152 	struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
153 	struct nvkm_disp *disp = chan->disp;
154 
155 	spin_lock(&disp->client.lock);
156 	if (disp->chan[chan->chid.user] == chan)
157 		disp->chan[chan->chid.user] = NULL;
158 	spin_unlock(&disp->client.lock);
159 
160 	nvkm_memory_unref(&chan->memory);
161 	return chan;
162 }
163 
164 static const struct nvkm_object_func
165 nvkm_disp_chan = {
166 	.dtor = nvkm_disp_chan_dtor,
167 	.init = nvkm_disp_chan_init,
168 	.fini = nvkm_disp_chan_fini,
169 	.ntfy = nvkm_disp_chan_ntfy,
170 	.map = nvkm_disp_chan_map,
171 	.sclass = nvkm_disp_chan_child_get,
172 };
173 
174 static int
175 nvkm_disp_chan_new_(struct nvkm_disp *disp, int nr, const struct nvkm_oclass *oclass,
176 		    void *argv, u32 argc, struct nvkm_object **pobject)
177 {
178 	const struct nvkm_disp_chan_user *user = NULL;
179 	struct nvkm_disp_chan *chan;
180 	union nvif_disp_chan_args *args = argv;
181 	int ret, i;
182 
183 	for (i = 0; disp->func->user[i].ctor; i++) {
184 		if (disp->func->user[i].base.oclass == oclass->base.oclass) {
185 			user = disp->func->user[i].chan;
186 			break;
187 		}
188 	}
189 
190 	if (WARN_ON(!user))
191 		return -EINVAL;
192 
193 	if (argc != sizeof(args->v0) || args->v0.version != 0)
194 		return -ENOSYS;
195 	if (args->v0.id >= nr || !args->v0.pushbuf != !user->func->push)
196 		return -EINVAL;
197 
198 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
199 		return -ENOMEM;
200 	*pobject = &chan->object;
201 
202 	nvkm_object_ctor(&nvkm_disp_chan, oclass, &chan->object);
203 	chan->func = user->func;
204 	chan->mthd = user->mthd;
205 	chan->disp = disp;
206 	chan->chid.ctrl = user->ctrl + args->v0.id;
207 	chan->chid.user = user->user + args->v0.id;
208 	chan->head = args->v0.id;
209 
210 	if (chan->func->push) {
211 		ret = chan->func->push(chan, args->v0.pushbuf);
212 		if (ret)
213 			return ret;
214 	}
215 
216 	spin_lock(&disp->client.lock);
217 	if (disp->chan[chan->chid.user]) {
218 		spin_unlock(&disp->client.lock);
219 		return -EBUSY;
220 	}
221 	disp->chan[chan->chid.user] = chan;
222 	spin_unlock(&disp->client.lock);
223 	return 0;
224 }
225 
226 int
227 nvkm_disp_wndw_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
228 		   struct nvkm_object **pobject)
229 {
230 	struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
231 
232 	return nvkm_disp_chan_new_(disp, disp->wndw.nr, oclass, argv, argc, pobject);
233 }
234 
235 int
236 nvkm_disp_chan_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
237 		   struct nvkm_object **pobject)
238 {
239 	struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
240 
241 	return nvkm_disp_chan_new_(disp, disp->head.nr, oclass, argv, argc, pobject);
242 }
243 
244 int
245 nvkm_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
246 		   struct nvkm_object **pobject)
247 {
248 	struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
249 
250 	return nvkm_disp_chan_new_(disp, 1, oclass, argv, argc, pobject);
251 }
252