xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c (revision 8a922b7728a93d837954315c98b84f6b78de0c4f)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "conn.h"
26 #include "dp.h"
27 #include "head.h"
28 #include "ior.h"
29 #include "outp.h"
30 
31 #include <core/client.h>
32 #include <core/ramht.h>
33 #include <subdev/bios.h>
34 #include <subdev/bios/dcb.h>
35 
36 #include <nvif/class.h>
37 #include <nvif/cl0046.h>
38 #include <nvif/event.h>
39 #include <nvif/unpack.h>
40 
41 static void
42 nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int id)
43 {
44 	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
45 	struct nvkm_head *head = nvkm_head_find(disp, id);
46 	if (head)
47 		head->func->vblank_put(head);
48 }
49 
50 static void
51 nvkm_disp_vblank_init(struct nvkm_event *event, int type, int id)
52 {
53 	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
54 	struct nvkm_head *head = nvkm_head_find(disp, id);
55 	if (head)
56 		head->func->vblank_get(head);
57 }
58 
59 static const struct nvkm_event_func
60 nvkm_disp_vblank_func = {
61 	.init = nvkm_disp_vblank_init,
62 	.fini = nvkm_disp_vblank_fini,
63 };
64 
65 void
66 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
67 {
68 	nvkm_event_ntfy(&disp->vblank, head, NVKM_DISP_HEAD_EVENT_VBLANK);
69 }
70 
71 static int
72 nvkm_disp_class_new(struct nvkm_device *device,
73 		    const struct nvkm_oclass *oclass, void *data, u32 size,
74 		    struct nvkm_object **pobject)
75 {
76 	return nvkm_udisp_new(oclass, data, size, pobject);
77 }
78 
79 static const struct nvkm_device_oclass
80 nvkm_disp_sclass = {
81 	.ctor = nvkm_disp_class_new,
82 };
83 
84 static int
85 nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
86 		    const struct nvkm_device_oclass **class)
87 {
88 	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
89 	if (index == 0) {
90 		oclass->base = disp->func->root;
91 		*class = &nvkm_disp_sclass;
92 		return 0;
93 	}
94 	return 1;
95 }
96 
97 static void
98 nvkm_disp_intr(struct nvkm_engine *engine)
99 {
100 	struct nvkm_disp *disp = nvkm_disp(engine);
101 	disp->func->intr(disp);
102 }
103 
104 static int
105 nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
106 {
107 	struct nvkm_disp *disp = nvkm_disp(engine);
108 	struct nvkm_conn *conn;
109 	struct nvkm_outp *outp;
110 
111 	if (disp->func->fini)
112 		disp->func->fini(disp);
113 
114 	list_for_each_entry(outp, &disp->outps, head) {
115 		nvkm_outp_fini(outp);
116 	}
117 
118 	list_for_each_entry(conn, &disp->conns, head) {
119 		nvkm_conn_fini(conn);
120 	}
121 
122 	return 0;
123 }
124 
125 static int
126 nvkm_disp_init(struct nvkm_engine *engine)
127 {
128 	struct nvkm_disp *disp = nvkm_disp(engine);
129 	struct nvkm_conn *conn;
130 	struct nvkm_outp *outp;
131 	struct nvkm_ior *ior;
132 
133 	list_for_each_entry(conn, &disp->conns, head) {
134 		nvkm_conn_init(conn);
135 	}
136 
137 	list_for_each_entry(outp, &disp->outps, head) {
138 		nvkm_outp_init(outp);
139 	}
140 
141 	if (disp->func->init) {
142 		int ret = disp->func->init(disp);
143 		if (ret)
144 			return ret;
145 	}
146 
147 	/* Set 'normal' (ie. when it's attached to a head) state for
148 	 * each output resource to 'fully enabled'.
149 	 */
150 	list_for_each_entry(ior, &disp->iors, head) {
151 		ior->func->power(ior, true, true, true, true, true);
152 	}
153 
154 	return 0;
155 }
156 
157 static int
158 nvkm_disp_oneinit(struct nvkm_engine *engine)
159 {
160 	struct nvkm_disp *disp = nvkm_disp(engine);
161 	struct nvkm_subdev *subdev = &disp->engine.subdev;
162 	struct nvkm_bios *bios = subdev->device->bios;
163 	struct nvkm_outp *outp, *outt, *pair;
164 	struct nvkm_conn *conn;
165 	struct nvkm_head *head;
166 	struct nvkm_ior *ior;
167 	struct nvbios_connE connE;
168 	struct dcb_output dcbE;
169 	u8  hpd = 0, ver, hdr;
170 	u32 data;
171 	int ret, i;
172 
173 	/* Create output path objects for each VBIOS display path. */
174 	i = -1;
175 	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
176 		if (ver < 0x40) /* No support for chipsets prior to NV50. */
177 			break;
178 		if (dcbE.type == DCB_OUTPUT_UNUSED)
179 			continue;
180 		if (dcbE.type == DCB_OUTPUT_EOL)
181 			break;
182 		outp = NULL;
183 
184 		switch (dcbE.type) {
185 		case DCB_OUTPUT_ANALOG:
186 		case DCB_OUTPUT_TV:
187 		case DCB_OUTPUT_TMDS:
188 		case DCB_OUTPUT_LVDS:
189 			ret = nvkm_outp_new(disp, i, &dcbE, &outp);
190 			break;
191 		case DCB_OUTPUT_DP:
192 			ret = nvkm_dp_new(disp, i, &dcbE, &outp);
193 			break;
194 		case DCB_OUTPUT_WFD:
195 			/* No support for WFD yet. */
196 			ret = -ENODEV;
197 			continue;
198 		default:
199 			nvkm_warn(subdev, "dcb %d type %d unknown\n",
200 				  i, dcbE.type);
201 			continue;
202 		}
203 
204 		if (ret) {
205 			if (outp) {
206 				if (ret != -ENODEV)
207 					OUTP_ERR(outp, "ctor failed: %d", ret);
208 				else
209 					OUTP_DBG(outp, "not supported");
210 				nvkm_outp_del(&outp);
211 				continue;
212 			}
213 			nvkm_error(subdev, "failed to create outp %d\n", i);
214 			continue;
215 		}
216 
217 		list_add_tail(&outp->head, &disp->outps);
218 		hpd = max(hpd, (u8)(dcbE.connector + 1));
219 	}
220 
221 	/* Create connector objects based on available output paths. */
222 	list_for_each_entry_safe(outp, outt, &disp->outps, head) {
223 		/* VBIOS data *should* give us the most useful information. */
224 		data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
225 				     &connE);
226 
227 		/* No bios connector data... */
228 		if (!data) {
229 			/* Heuristic: anything with the same ccb index is
230 			 * considered to be on the same connector, any
231 			 * output path without an associated ccb entry will
232 			 * be put on its own connector.
233 			 */
234 			int ccb_index = outp->info.i2c_index;
235 			if (ccb_index != 0xf) {
236 				list_for_each_entry(pair, &disp->outps, head) {
237 					if (pair->info.i2c_index == ccb_index) {
238 						outp->conn = pair->conn;
239 						break;
240 					}
241 				}
242 			}
243 
244 			/* Connector shared with another output path. */
245 			if (outp->conn)
246 				continue;
247 
248 			memset(&connE, 0x00, sizeof(connE));
249 			connE.type = DCB_CONNECTOR_NONE;
250 			i = -1;
251 		} else {
252 			i = outp->info.connector;
253 		}
254 
255 		/* Check that we haven't already created this connector. */
256 		list_for_each_entry(conn, &disp->conns, head) {
257 			if (conn->index == outp->info.connector) {
258 				outp->conn = conn;
259 				break;
260 			}
261 		}
262 
263 		if (outp->conn)
264 			continue;
265 
266 		/* Apparently we need to create a new one! */
267 		ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
268 		if (ret) {
269 			nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
270 			nvkm_conn_del(&outp->conn);
271 			list_del(&outp->head);
272 			nvkm_outp_del(&outp);
273 			continue;
274 		}
275 
276 		list_add_tail(&outp->conn->head, &disp->conns);
277 	}
278 
279 	if (disp->func->oneinit) {
280 		ret = disp->func->oneinit(disp);
281 		if (ret)
282 			return ret;
283 	}
284 
285 	/* Enforce identity-mapped SOR assignment for panels, which have
286 	 * certain bits (ie. backlight controls) wired to a specific SOR.
287 	 */
288 	list_for_each_entry(outp, &disp->outps, head) {
289 		if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
290 		    outp->conn->info.type == DCB_CONNECTOR_eDP) {
291 			ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
292 			if (!WARN_ON(!ior))
293 				ior->identity = true;
294 			outp->identity = true;
295 		}
296 	}
297 
298 	i = 0;
299 	list_for_each_entry(head, &disp->heads, head)
300 		i = max(i, head->id + 1);
301 
302 	return nvkm_event_init(&nvkm_disp_vblank_func, subdev, 1, i, &disp->vblank);
303 }
304 
305 static void *
306 nvkm_disp_dtor(struct nvkm_engine *engine)
307 {
308 	struct nvkm_disp *disp = nvkm_disp(engine);
309 	struct nvkm_conn *conn;
310 	struct nvkm_outp *outp;
311 	struct nvkm_ior *ior;
312 	struct nvkm_head *head;
313 	void *data = disp;
314 
315 	nvkm_ramht_del(&disp->ramht);
316 	nvkm_gpuobj_del(&disp->inst);
317 
318 	nvkm_event_fini(&disp->uevent);
319 
320 	if (disp->super.wq) {
321 		destroy_workqueue(disp->super.wq);
322 		mutex_destroy(&disp->super.mutex);
323 	}
324 
325 	nvkm_event_fini(&disp->vblank);
326 
327 	while (!list_empty(&disp->conns)) {
328 		conn = list_first_entry(&disp->conns, typeof(*conn), head);
329 		list_del(&conn->head);
330 		nvkm_conn_del(&conn);
331 	}
332 
333 	while (!list_empty(&disp->outps)) {
334 		outp = list_first_entry(&disp->outps, typeof(*outp), head);
335 		list_del(&outp->head);
336 		nvkm_outp_del(&outp);
337 	}
338 
339 	while (!list_empty(&disp->iors)) {
340 		ior = list_first_entry(&disp->iors, typeof(*ior), head);
341 		nvkm_ior_del(&ior);
342 	}
343 
344 	while (!list_empty(&disp->heads)) {
345 		head = list_first_entry(&disp->heads, typeof(*head), head);
346 		nvkm_head_del(&head);
347 	}
348 
349 	return data;
350 }
351 
352 static const struct nvkm_engine_func
353 nvkm_disp = {
354 	.dtor = nvkm_disp_dtor,
355 	.oneinit = nvkm_disp_oneinit,
356 	.init = nvkm_disp_init,
357 	.fini = nvkm_disp_fini,
358 	.intr = nvkm_disp_intr,
359 	.base.sclass = nvkm_disp_class_get,
360 };
361 
362 int
363 nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
364 	       enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
365 {
366 	struct nvkm_disp *disp;
367 	int ret;
368 
369 	if (!(disp = *pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
370 		return -ENOMEM;
371 
372 	disp->func = func;
373 	INIT_LIST_HEAD(&disp->heads);
374 	INIT_LIST_HEAD(&disp->iors);
375 	INIT_LIST_HEAD(&disp->outps);
376 	INIT_LIST_HEAD(&disp->conns);
377 	spin_lock_init(&disp->client.lock);
378 
379 	ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
380 	if (ret)
381 		return ret;
382 
383 	if (func->super) {
384 		disp->super.wq = create_singlethread_workqueue("nvkm-disp");
385 		if (!disp->super.wq)
386 			return -ENOMEM;
387 
388 		INIT_WORK(&disp->super.work, func->super);
389 		mutex_init(&disp->super.mutex);
390 	}
391 
392 	return nvkm_event_init(func->uevent, &disp->engine.subdev, 1, ARRAY_SIZE(disp->chan),
393 			       &disp->uevent);
394 }
395