xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "conn.h"
26 #include "head.h"
27 #include "ior.h"
28 #include "outp.h"
29 
30 #include <core/client.h>
31 #include <core/ramht.h>
32 
33 #include <nvif/class.h>
34 #include <nvif/cl0046.h>
35 #include <nvif/event.h>
36 #include <nvif/unpack.h>
37 
38 static void
39 nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int id)
40 {
41 	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
42 	struct nvkm_head *head = nvkm_head_find(disp, id);
43 	if (head)
44 		head->func->vblank_put(head);
45 }
46 
47 static void
48 nvkm_disp_vblank_init(struct nvkm_event *event, int type, int id)
49 {
50 	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
51 	struct nvkm_head *head = nvkm_head_find(disp, id);
52 	if (head)
53 		head->func->vblank_get(head);
54 }
55 
56 static const struct nvkm_event_func
57 nvkm_disp_vblank_func = {
58 	.init = nvkm_disp_vblank_init,
59 	.fini = nvkm_disp_vblank_fini,
60 };
61 
62 void
63 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
64 {
65 	nvkm_event_ntfy(&disp->vblank, head, NVKM_DISP_HEAD_EVENT_VBLANK);
66 }
67 
68 static int
69 nvkm_disp_class_new(struct nvkm_device *device,
70 		    const struct nvkm_oclass *oclass, void *data, u32 size,
71 		    struct nvkm_object **pobject)
72 {
73 	return nvkm_udisp_new(oclass, data, size, pobject);
74 }
75 
76 static const struct nvkm_device_oclass
77 nvkm_disp_sclass = {
78 	.ctor = nvkm_disp_class_new,
79 };
80 
81 static int
82 nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
83 		    const struct nvkm_device_oclass **class)
84 {
85 	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
86 	if (index == 0) {
87 		oclass->base = disp->func->root;
88 		*class = &nvkm_disp_sclass;
89 		return 0;
90 	}
91 	return 1;
92 }
93 
94 static void
95 nvkm_disp_intr(struct nvkm_engine *engine)
96 {
97 	struct nvkm_disp *disp = nvkm_disp(engine);
98 	disp->func->intr(disp);
99 }
100 
101 static int
102 nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
103 {
104 	struct nvkm_disp *disp = nvkm_disp(engine);
105 	struct nvkm_outp *outp;
106 
107 	if (disp->func->fini)
108 		disp->func->fini(disp, suspend);
109 
110 	list_for_each_entry(outp, &disp->outps, head) {
111 		if (outp->func->fini)
112 			outp->func->fini(outp);
113 	}
114 
115 	return 0;
116 }
117 
118 static int
119 nvkm_disp_init(struct nvkm_engine *engine)
120 {
121 	struct nvkm_disp *disp = nvkm_disp(engine);
122 	struct nvkm_outp *outp;
123 	struct nvkm_ior *ior;
124 
125 	list_for_each_entry(outp, &disp->outps, head) {
126 		if (outp->func->init)
127 			outp->func->init(outp);
128 	}
129 
130 	if (disp->func->init) {
131 		int ret = disp->func->init(disp);
132 		if (ret)
133 			return ret;
134 	}
135 
136 	/* Set 'normal' (ie. when it's attached to a head) state for
137 	 * each output resource to 'fully enabled'.
138 	 */
139 	list_for_each_entry(ior, &disp->iors, head) {
140 		if (ior->func->power)
141 			ior->func->power(ior, true, true, true, true, true);
142 	}
143 
144 	return 0;
145 }
146 
147 static int
148 nvkm_disp_oneinit(struct nvkm_engine *engine)
149 {
150 	struct nvkm_disp *disp = nvkm_disp(engine);
151 	struct nvkm_subdev *subdev = &disp->engine.subdev;
152 	struct nvkm_head *head;
153 	int ret, i;
154 
155 	if (disp->func->oneinit) {
156 		ret = disp->func->oneinit(disp);
157 		if (ret)
158 			return ret;
159 	}
160 
161 	i = 0;
162 	list_for_each_entry(head, &disp->heads, head)
163 		i = max(i, head->id + 1);
164 
165 	return nvkm_event_init(&nvkm_disp_vblank_func, subdev, 1, i, &disp->vblank);
166 }
167 
168 static void *
169 nvkm_disp_dtor(struct nvkm_engine *engine)
170 {
171 	struct nvkm_disp *disp = nvkm_disp(engine);
172 	struct nvkm_conn *conn;
173 	struct nvkm_outp *outp;
174 	struct nvkm_ior *ior;
175 	struct nvkm_head *head;
176 	void *data = disp;
177 
178 	nvkm_ramht_del(&disp->ramht);
179 	nvkm_gpuobj_del(&disp->inst);
180 
181 	nvkm_event_fini(&disp->uevent);
182 
183 	if (disp->super.wq) {
184 		destroy_workqueue(disp->super.wq);
185 		mutex_destroy(&disp->super.mutex);
186 	}
187 
188 	nvkm_event_fini(&disp->vblank);
189 
190 	while (!list_empty(&disp->conns)) {
191 		conn = list_first_entry(&disp->conns, typeof(*conn), head);
192 		list_del(&conn->head);
193 		nvkm_conn_del(&conn);
194 	}
195 
196 	while (!list_empty(&disp->outps)) {
197 		outp = list_first_entry(&disp->outps, typeof(*outp), head);
198 		list_del(&outp->head);
199 		nvkm_outp_del(&outp);
200 	}
201 
202 	while (!list_empty(&disp->iors)) {
203 		ior = list_first_entry(&disp->iors, typeof(*ior), head);
204 		nvkm_ior_del(&ior);
205 	}
206 
207 	while (!list_empty(&disp->heads)) {
208 		head = list_first_entry(&disp->heads, typeof(*head), head);
209 		nvkm_head_del(&head);
210 	}
211 
212 	if (disp->func && disp->func->dtor)
213 		disp->func->dtor(disp);
214 
215 	return data;
216 }
217 
218 static const struct nvkm_engine_func
219 nvkm_disp = {
220 	.dtor = nvkm_disp_dtor,
221 	.oneinit = nvkm_disp_oneinit,
222 	.init = nvkm_disp_init,
223 	.fini = nvkm_disp_fini,
224 	.intr = nvkm_disp_intr,
225 	.base.sclass = nvkm_disp_class_get,
226 };
227 
228 int
229 nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
230 	       enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
231 {
232 	struct nvkm_disp *disp;
233 	int ret;
234 
235 	if (!(disp = *pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
236 		return -ENOMEM;
237 
238 	disp->func = func;
239 	INIT_LIST_HEAD(&disp->heads);
240 	INIT_LIST_HEAD(&disp->iors);
241 	INIT_LIST_HEAD(&disp->outps);
242 	INIT_LIST_HEAD(&disp->conns);
243 	spin_lock_init(&disp->client.lock);
244 
245 	ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
246 	if (ret) {
247 		disp->func = NULL;
248 		return ret;
249 	}
250 
251 	if (func->super) {
252 		disp->super.wq = create_singlethread_workqueue("nvkm-disp");
253 		if (!disp->super.wq)
254 			return -ENOMEM;
255 
256 		INIT_WORK(&disp->super.work, func->super);
257 		mutex_init(&disp->super.mutex);
258 	}
259 
260 	return nvkm_event_init(func->uevent, &disp->engine.subdev, 1, ARRAY_SIZE(disp->chan),
261 			       &disp->uevent);
262 }
263