xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 /*
2  * Copyright 2014 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "outp.h"
25 #include "conn.h"
26 #include "dp.h"
27 #include "ior.h"
28 
29 #include <subdev/bios.h>
30 #include <subdev/bios/dcb.h>
31 #include <subdev/gpio.h>
32 #include <subdev/i2c.h>
33 
34 static void
nvkm_outp_route(struct nvkm_disp * disp)35 nvkm_outp_route(struct nvkm_disp *disp)
36 {
37 	struct nvkm_outp *outp;
38 	struct nvkm_ior *ior;
39 
40 	list_for_each_entry(ior, &disp->iors, head) {
41 		if ((outp = ior->arm.outp) && ior->arm.outp != ior->asy.outp) {
42 			OUTP_DBG(outp, "release %s", ior->name);
43 			if (ior->func->route.set)
44 				ior->func->route.set(outp, NULL);
45 			ior->arm.outp = NULL;
46 		}
47 	}
48 
49 	list_for_each_entry(ior, &disp->iors, head) {
50 		if ((outp = ior->asy.outp)) {
51 			if (ior->asy.outp != ior->arm.outp) {
52 				OUTP_DBG(outp, "acquire %s", ior->name);
53 				if (ior->func->route.set)
54 					ior->func->route.set(outp, ior);
55 				ior->arm.outp = ior->asy.outp;
56 			}
57 		}
58 	}
59 }
60 
61 static enum nvkm_ior_proto
nvkm_outp_xlat(struct nvkm_outp * outp,enum nvkm_ior_type * type)62 nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
63 {
64 	switch (outp->info.location) {
65 	case 0:
66 		switch (outp->info.type) {
67 		case DCB_OUTPUT_ANALOG: *type = DAC; return  CRT;
68 		case DCB_OUTPUT_TV    : *type = DAC; return   TV;
69 		case DCB_OUTPUT_TMDS  : *type = SOR; return TMDS;
70 		case DCB_OUTPUT_LVDS  : *type = SOR; return LVDS;
71 		case DCB_OUTPUT_DP    : *type = SOR; return   DP;
72 		default:
73 			break;
74 		}
75 		break;
76 	case 1:
77 		switch (outp->info.type) {
78 		case DCB_OUTPUT_TMDS: *type = PIOR; return TMDS;
79 		case DCB_OUTPUT_DP  : *type = PIOR; return TMDS; /* not a bug */
80 		default:
81 			break;
82 		}
83 		break;
84 	default:
85 		break;
86 	}
87 	WARN_ON(1);
88 	return UNKNOWN;
89 }
90 
91 void
nvkm_outp_release_or(struct nvkm_outp * outp,u8 user)92 nvkm_outp_release_or(struct nvkm_outp *outp, u8 user)
93 {
94 	struct nvkm_ior *ior = outp->ior;
95 	OUTP_TRACE(outp, "release %02x &= %02x %p", outp->acquired, ~user, ior);
96 	if (ior) {
97 		outp->acquired &= ~user;
98 		if (!outp->acquired) {
99 			outp->ior->asy.outp = NULL;
100 			outp->ior = NULL;
101 		}
102 	}
103 }
104 
105 int
nvkm_outp_acquire_ior(struct nvkm_outp * outp,u8 user,struct nvkm_ior * ior)106 nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
107 {
108 	outp->ior = ior;
109 	outp->ior->asy.outp = outp;
110 	outp->ior->asy.link = outp->info.sorconf.link;
111 	outp->acquired |= user;
112 	return 0;
113 }
114 
115 static inline int
nvkm_outp_acquire_hda(struct nvkm_outp * outp,enum nvkm_ior_type type,u8 user,bool hda)116 nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
117 		      u8 user, bool hda)
118 {
119 	struct nvkm_ior *ior;
120 
121 	/* Failing that, a completely unused OR is the next best thing. */
122 	list_for_each_entry(ior, &outp->disp->iors, head) {
123 		if (!ior->identity && ior->hda == hda &&
124 		    !ior->asy.outp && ior->type == type && !ior->arm.outp &&
125 		    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
126 			return nvkm_outp_acquire_ior(outp, user, ior);
127 	}
128 
129 	/* Last resort is to assign an OR that's already active on HW,
130 	 * but will be released during the next modeset.
131 	 */
132 	list_for_each_entry(ior, &outp->disp->iors, head) {
133 		if (!ior->identity && ior->hda == hda &&
134 		    !ior->asy.outp && ior->type == type &&
135 		    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
136 			return nvkm_outp_acquire_ior(outp, user, ior);
137 	}
138 
139 	return -ENOSPC;
140 }
141 
142 int
nvkm_outp_acquire_or(struct nvkm_outp * outp,u8 user,bool hda)143 nvkm_outp_acquire_or(struct nvkm_outp *outp, u8 user, bool hda)
144 {
145 	struct nvkm_ior *ior = outp->ior;
146 	enum nvkm_ior_proto proto;
147 	enum nvkm_ior_type type;
148 
149 	OUTP_TRACE(outp, "acquire %02x |= %02x %p", outp->acquired, user, ior);
150 	if (ior) {
151 		outp->acquired |= user;
152 		return 0;
153 	}
154 
155 	/* Lookup a compatible, and unused, OR to assign to the device. */
156 	proto = nvkm_outp_xlat(outp, &type);
157 	if (proto == UNKNOWN)
158 		return -ENOSYS;
159 
160 	/* Deal with panels requiring identity-mapped SOR assignment. */
161 	if (outp->identity) {
162 		ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
163 		if (WARN_ON(!ior))
164 			return -ENOSPC;
165 		return nvkm_outp_acquire_ior(outp, user, ior);
166 	}
167 
168 	/* First preference is to reuse the OR that is currently armed
169 	 * on HW, if any, in order to prevent unnecessary switching.
170 	 */
171 	list_for_each_entry(ior, &outp->disp->iors, head) {
172 		if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
173 			/*XXX: For various complicated reasons, we can't outright switch
174 			 *     the boot-time OR on the first modeset without some fairly
175 			 *     invasive changes.
176 			 *
177 			 *     The systems that were fixed by modifying the OR selection
178 			 *     code to account for HDA support shouldn't regress here as
179 			 *     the HDA-enabled ORs match the relevant output's pad macro
180 			 *     index, and the firmware seems to select an OR this way.
181 			 *
182 			 *     This warning is to make it obvious if that proves wrong.
183 			 */
184 			WARN_ON(hda && !ior->hda);
185 			return nvkm_outp_acquire_ior(outp, user, ior);
186 		}
187 	}
188 
189 	/* If we don't need HDA, first try to acquire an OR that doesn't
190 	 * support it to leave free the ones that do.
191 	 */
192 	if (!hda) {
193 		if (!nvkm_outp_acquire_hda(outp, type, user, false))
194 			return 0;
195 
196 		/* Use a HDA-supporting SOR anyway. */
197 		return nvkm_outp_acquire_hda(outp, type, user, true);
198 	}
199 
200 	/* We want HDA, try to acquire an OR that supports it. */
201 	if (!nvkm_outp_acquire_hda(outp, type, user, true))
202 		return 0;
203 
204 	/* There weren't any free ORs that support HDA, grab one that
205 	 * doesn't and at least allow display to work still.
206 	 */
207 	return nvkm_outp_acquire_hda(outp, type, user, false);
208 }
209 
210 int
nvkm_outp_bl_set(struct nvkm_outp * outp,int level)211 nvkm_outp_bl_set(struct nvkm_outp *outp, int level)
212 {
213 	int ret;
214 
215 	ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false);
216 	if (ret)
217 		return ret;
218 
219 	if (outp->ior->func->bl)
220 		ret = outp->ior->func->bl->set(outp->ior, level);
221 	else
222 		ret = -EINVAL;
223 
224 	nvkm_outp_release_or(outp, NVKM_OUTP_PRIV);
225 	return ret;
226 }
227 
228 int
nvkm_outp_bl_get(struct nvkm_outp * outp)229 nvkm_outp_bl_get(struct nvkm_outp *outp)
230 {
231 	int ret;
232 
233 	ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false);
234 	if (ret)
235 		return ret;
236 
237 	if (outp->ior->func->bl)
238 		ret = outp->ior->func->bl->get(outp->ior);
239 	else
240 		ret = -EINVAL;
241 
242 	nvkm_outp_release_or(outp, NVKM_OUTP_PRIV);
243 	return ret;
244 }
245 
246 int
nvkm_outp_detect(struct nvkm_outp * outp)247 nvkm_outp_detect(struct nvkm_outp *outp)
248 {
249 	struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
250 	int ret = -EINVAL;
251 
252 	if (outp->conn->info.hpd != DCB_GPIO_UNUSED) {
253 		ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, outp->conn->info.hpd);
254 		if (ret < 0)
255 			return ret;
256 		if (ret)
257 			return 1;
258 
259 		/*TODO: Look into returning NOT_PRESENT if !HPD on DVI/HDMI.
260 		 *
261 		 *      It's uncertain whether this is accurate for all older chipsets,
262 		 *      so we're returning UNKNOWN, and the DRM will probe DDC instead.
263 		 */
264 		if (outp->info.type == DCB_OUTPUT_DP)
265 			return 0;
266 	}
267 
268 	return ret;
269 }
270 
271 void
nvkm_outp_release(struct nvkm_outp * outp)272 nvkm_outp_release(struct nvkm_outp *outp)
273 {
274 	nvkm_outp_release_or(outp, NVKM_OUTP_USER);
275 	nvkm_outp_route(outp->disp);
276 }
277 
278 int
nvkm_outp_acquire(struct nvkm_outp * outp,bool hda)279 nvkm_outp_acquire(struct nvkm_outp *outp, bool hda)
280 {
281 	int ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_USER, hda);
282 
283 	if (ret)
284 		return ret;
285 
286 	nvkm_outp_route(outp->disp);
287 	return 0;
288 }
289 
290 struct nvkm_ior *
nvkm_outp_inherit(struct nvkm_outp * outp)291 nvkm_outp_inherit(struct nvkm_outp *outp)
292 {
293 	struct nvkm_disp *disp = outp->disp;
294 	struct nvkm_ior *ior;
295 	enum nvkm_ior_proto proto;
296 	enum nvkm_ior_type type;
297 	int id, link;
298 
299 	/* Find any OR from the class that is able to support this device. */
300 	proto = nvkm_outp_xlat(outp, &type);
301 	if (proto == UNKNOWN)
302 		return NULL;
303 
304 	ior = nvkm_ior_find(disp, type, -1);
305 	if (WARN_ON(!ior))
306 		return NULL;
307 
308 	/* Determine the specific OR, if any, this device is attached to. */
309 	if (ior->func->route.get) {
310 		id = ior->func->route.get(outp, &link);
311 		if (id < 0) {
312 			OUTP_DBG(outp, "no route");
313 			return NULL;
314 		}
315 	} else {
316 		/* Prior to DCB 4.1, this is hardwired like so. */
317 		id   = ffs(outp->info.or) - 1;
318 		link = (ior->type == SOR) ? outp->info.sorconf.link : 0;
319 	}
320 
321 	ior = nvkm_ior_find(disp, type, id);
322 	if (WARN_ON(!ior))
323 		return NULL;
324 
325 	return ior;
326 }
327 
328 void
nvkm_outp_init(struct nvkm_outp * outp)329 nvkm_outp_init(struct nvkm_outp *outp)
330 {
331 	enum nvkm_ior_proto proto;
332 	enum nvkm_ior_type type;
333 	struct nvkm_ior *ior;
334 
335 	/* Find any OR from the class that is able to support this device. */
336 	proto = nvkm_outp_xlat(outp, &type);
337 	ior = outp->func->inherit(outp);
338 	if (!ior)
339 		return;
340 
341 	/* Determine if the OR is already configured for this device. */
342 	ior->func->state(ior, &ior->arm);
343 	if (!ior->arm.head || ior->arm.proto != proto) {
344 		OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
345 			 ior->arm.proto, proto);
346 
347 		/* The EFI GOP driver on Ampere can leave unused DP links routed,
348 		 * which we don't expect.  The DisableLT IED script *should* get
349 		 * us back to where we need to be.
350 		 */
351 		if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
352 			nvkm_dp_disable(outp, ior);
353 
354 		return;
355 	}
356 
357 	OUTP_DBG(outp, "on %s link %x", ior->name, ior->arm.link);
358 	ior->arm.outp = outp;
359 }
360 
361 void
nvkm_outp_del(struct nvkm_outp ** poutp)362 nvkm_outp_del(struct nvkm_outp **poutp)
363 {
364 	struct nvkm_outp *outp = *poutp;
365 	if (outp && !WARN_ON(!outp->func)) {
366 		if (outp->func->dtor)
367 			*poutp = outp->func->dtor(outp);
368 		kfree(*poutp);
369 		*poutp = NULL;
370 	}
371 }
372 
373 int
nvkm_outp_new_(const struct nvkm_outp_func * func,struct nvkm_disp * disp,int index,struct dcb_output * dcbE,struct nvkm_outp ** poutp)374 nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
375 	       int index, struct dcb_output *dcbE, struct nvkm_outp **poutp)
376 {
377 	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
378 	struct nvkm_outp *outp;
379 	enum nvkm_ior_proto proto;
380 	enum nvkm_ior_type type;
381 
382 	if (!(outp = *poutp = kzalloc(sizeof(*outp), GFP_KERNEL)))
383 		return -ENOMEM;
384 
385 	outp->func = func;
386 	outp->disp = disp;
387 	outp->index = index;
388 	outp->info = *dcbE;
389 	if (!disp->rm.client.gsp)
390 		outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
391 
392 	OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
393 		       "edid %x bus %d head %x",
394 		 outp->info.type, outp->info.location, outp->info.or,
395 		 outp->info.type >= 2 ? outp->info.sorconf.link : 0,
396 		 outp->info.connector, outp->info.i2c_index,
397 		 outp->info.bus, outp->info.heads);
398 
399 	/* Cull output paths we can't map to an output resource. */
400 	proto = nvkm_outp_xlat(outp, &type);
401 	if (proto == UNKNOWN)
402 		return -ENODEV;
403 
404 	return 0;
405 }
406 
407 static const struct nvkm_outp_func
408 nvkm_outp = {
409 	.init = nvkm_outp_init,
410 	.detect = nvkm_outp_detect,
411 	.inherit = nvkm_outp_inherit,
412 	.acquire = nvkm_outp_acquire,
413 	.release = nvkm_outp_release,
414 	.bl.get = nvkm_outp_bl_get,
415 	.bl.set = nvkm_outp_bl_set,
416 };
417 
418 int
nvkm_outp_new(struct nvkm_disp * disp,int index,struct dcb_output * dcbE,struct nvkm_outp ** poutp)419 nvkm_outp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
420 	      struct nvkm_outp **poutp)
421 {
422 	return nvkm_outp_new_(&nvkm_outp, disp, index, dcbE, poutp);
423 }
424