xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c (revision 9dbbc3b9d09d6deba9f3b9e1d5b355032ed46a75)
1 /*
2  * Copyright 2014 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "dp.h"
25 #include "conn.h"
26 #include "head.h"
27 #include "ior.h"
28 
29 #include <subdev/bios.h>
30 #include <subdev/bios/init.h>
31 #include <subdev/gpio.h>
32 #include <subdev/i2c.h>
33 
34 #include <nvif/event.h>
35 
36 /* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
37  * the x86 option ROM.  However, the relevant VBIOS table versions weren't modified,
38  * so we're unable to detect this in a nice way.
39  */
40 #define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
41 
42 struct lt_state {
43 	struct nvkm_dp *dp;
44 	u8  stat[6];
45 	u8  conf[4];
46 	bool pc2;
47 	u8  pc2stat;
48 	u8  pc2conf[2];
49 };
50 
51 static int
52 nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
53 {
54 	struct nvkm_dp *dp = lt->dp;
55 	int ret;
56 
57 	if (dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
58 		mdelay(dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
59 	else
60 		udelay(delay);
61 
62 	ret = nvkm_rdaux(dp->aux, DPCD_LS02, lt->stat, 6);
63 	if (ret)
64 		return ret;
65 
66 	if (pc) {
67 		ret = nvkm_rdaux(dp->aux, DPCD_LS0C, &lt->pc2stat, 1);
68 		if (ret)
69 			lt->pc2stat = 0x00;
70 		OUTP_TRACE(&dp->outp, "status %6ph pc2 %02x",
71 			   lt->stat, lt->pc2stat);
72 	} else {
73 		OUTP_TRACE(&dp->outp, "status %6ph", lt->stat);
74 	}
75 
76 	return 0;
77 }
78 
79 static int
80 nvkm_dp_train_drive(struct lt_state *lt, bool pc)
81 {
82 	struct nvkm_dp *dp = lt->dp;
83 	struct nvkm_ior *ior = dp->outp.ior;
84 	struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
85 	struct nvbios_dpout info;
86 	struct nvbios_dpcfg ocfg;
87 	u8  ver, hdr, cnt, len;
88 	u32 data;
89 	int ret, i;
90 
91 	for (i = 0; i < ior->dp.nr; i++) {
92 		u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
93 		u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
94 		u8 lpre = (lane & 0x0c) >> 2;
95 		u8 lvsw = (lane & 0x03) >> 0;
96 		u8 hivs = 3 - lpre;
97 		u8 hipe = 3;
98 		u8 hipc = 3;
99 
100 		if (lpc2 >= hipc)
101 			lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
102 		if (lpre >= hipe) {
103 			lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
104 			lvsw = hivs = 3 - (lpre & 3);
105 		} else
106 		if (lvsw >= hivs) {
107 			lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
108 		}
109 
110 		lt->conf[i] = (lpre << 3) | lvsw;
111 		lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
112 
113 		OUTP_TRACE(&dp->outp, "config lane %d %02x %02x",
114 			   i, lt->conf[i], lpc2);
115 
116 		data = nvbios_dpout_match(bios, dp->outp.info.hasht,
117 						dp->outp.info.hashm,
118 					  &ver, &hdr, &cnt, &len, &info);
119 		if (!data)
120 			continue;
121 
122 		data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3,
123 					  lpre & 3, &ver, &hdr, &cnt, &len,
124 					  &ocfg);
125 		if (!data)
126 			continue;
127 
128 		ior->func->dp.drive(ior, i, ocfg.pc, ocfg.dc,
129 					    ocfg.pe, ocfg.tx_pu);
130 	}
131 
132 	ret = nvkm_wraux(dp->aux, DPCD_LC03(0), lt->conf, 4);
133 	if (ret)
134 		return ret;
135 
136 	if (pc) {
137 		ret = nvkm_wraux(dp->aux, DPCD_LC0F, lt->pc2conf, 2);
138 		if (ret)
139 			return ret;
140 	}
141 
142 	return 0;
143 }
144 
145 static void
146 nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
147 {
148 	struct nvkm_dp *dp = lt->dp;
149 	u8 sink_tp;
150 
151 	OUTP_TRACE(&dp->outp, "training pattern %d", pattern);
152 	dp->outp.ior->func->dp.pattern(dp->outp.ior, pattern);
153 
154 	nvkm_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
155 	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
156 	sink_tp |= pattern;
157 	nvkm_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
158 }
159 
160 static int
161 nvkm_dp_train_eq(struct lt_state *lt)
162 {
163 	bool eq_done = false, cr_done = true;
164 	int tries = 0, i;
165 
166 	if (lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
167 		nvkm_dp_train_pattern(lt, 3);
168 	else
169 		nvkm_dp_train_pattern(lt, 2);
170 
171 	do {
172 		if ((tries &&
173 		    nvkm_dp_train_drive(lt, lt->pc2)) ||
174 		    nvkm_dp_train_sense(lt, lt->pc2, 400))
175 			break;
176 
177 		eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
178 		for (i = 0; i < lt->dp->outp.ior->dp.nr && eq_done; i++) {
179 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
180 			if (!(lane & DPCD_LS02_LANE0_CR_DONE))
181 				cr_done = false;
182 			if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
183 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
184 				eq_done = false;
185 		}
186 	} while (!eq_done && cr_done && ++tries <= 5);
187 
188 	return eq_done ? 0 : -1;
189 }
190 
191 static int
192 nvkm_dp_train_cr(struct lt_state *lt)
193 {
194 	bool cr_done = false, abort = false;
195 	int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
196 	int tries = 0, i;
197 
198 	nvkm_dp_train_pattern(lt, 1);
199 
200 	do {
201 		if (nvkm_dp_train_drive(lt, false) ||
202 		    nvkm_dp_train_sense(lt, false, 100))
203 			break;
204 
205 		cr_done = true;
206 		for (i = 0; i < lt->dp->outp.ior->dp.nr; i++) {
207 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
208 			if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
209 				cr_done = false;
210 				if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
211 					abort = true;
212 				break;
213 			}
214 		}
215 
216 		if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
217 			voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
218 			tries = 0;
219 		}
220 	} while (!cr_done && !abort && ++tries < 5);
221 
222 	return cr_done ? 0 : -1;
223 }
224 
225 static int
226 nvkm_dp_train_links(struct nvkm_dp *dp)
227 {
228 	struct nvkm_ior *ior = dp->outp.ior;
229 	struct nvkm_disp *disp = dp->outp.disp;
230 	struct nvkm_subdev *subdev = &disp->engine.subdev;
231 	struct nvkm_bios *bios = subdev->device->bios;
232 	struct lt_state lt = {
233 		.dp = dp,
234 	};
235 	u32 lnkcmp;
236 	u8 sink[2];
237 	int ret;
238 
239 	OUTP_DBG(&dp->outp, "training %d x %d MB/s",
240 		 ior->dp.nr, ior->dp.bw * 27);
241 
242 	/* Intersect misc. capabilities of the OR and sink. */
243 	if (disp->engine.subdev.device->chipset < 0xd0)
244 		dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
245 	lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
246 
247 	if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
248 		/* Execute BeforeLinkTraining script from DP Info table. */
249 		while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
250 			lnkcmp += 3;
251 		lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
252 
253 		nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
254 			init.outp = &dp->outp.info;
255 			init.or   = ior->id;
256 			init.link = ior->asy.link;
257 		);
258 	}
259 
260 	/* Set desired link configuration on the source. */
261 	if ((lnkcmp = lt.dp->info.lnkcmp)) {
262 		if (dp->version < 0x30) {
263 			while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
264 				lnkcmp += 4;
265 			lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
266 		} else {
267 			while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
268 				lnkcmp += 3;
269 			lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
270 		}
271 
272 		nvbios_init(subdev, lnkcmp,
273 			init.outp = &dp->outp.info;
274 			init.or   = ior->id;
275 			init.link = ior->asy.link;
276 		);
277 	}
278 
279 	ret = ior->func->dp.links(ior, dp->aux);
280 	if (ret) {
281 		if (ret < 0) {
282 			OUTP_ERR(&dp->outp, "train failed with %d", ret);
283 			return ret;
284 		}
285 		return 0;
286 	}
287 
288 	ior->func->dp.power(ior, ior->dp.nr);
289 
290 	/* Set desired link configuration on the sink. */
291 	sink[0] = ior->dp.bw;
292 	sink[1] = ior->dp.nr;
293 	if (ior->dp.ef)
294 		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
295 
296 	ret = nvkm_wraux(dp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
297 	if (ret)
298 		return ret;
299 
300 	/* Attempt to train the link in this configuration. */
301 	memset(lt.stat, 0x00, sizeof(lt.stat));
302 	ret = nvkm_dp_train_cr(&lt);
303 	if (ret == 0)
304 		ret = nvkm_dp_train_eq(&lt);
305 	nvkm_dp_train_pattern(&lt, 0);
306 	return ret;
307 }
308 
309 static void
310 nvkm_dp_train_fini(struct nvkm_dp *dp)
311 {
312 	/* Execute AfterLinkTraining script from DP Info table. */
313 	nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[1],
314 		init.outp = &dp->outp.info;
315 		init.or   = dp->outp.ior->id;
316 		init.link = dp->outp.ior->asy.link;
317 	);
318 }
319 
320 static void
321 nvkm_dp_train_init(struct nvkm_dp *dp)
322 {
323 	/* Execute EnableSpread/DisableSpread script from DP Info table. */
324 	if (dp->dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
325 		nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[2],
326 			init.outp = &dp->outp.info;
327 			init.or   = dp->outp.ior->id;
328 			init.link = dp->outp.ior->asy.link;
329 		);
330 	} else {
331 		nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[3],
332 			init.outp = &dp->outp.info;
333 			init.or   = dp->outp.ior->id;
334 			init.link = dp->outp.ior->asy.link;
335 		);
336 	}
337 
338 	if (!AMPERE_IED_HACK(dp->outp.disp)) {
339 		/* Execute BeforeLinkTraining script from DP Info table. */
340 		nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
341 			init.outp = &dp->outp.info;
342 			init.or   = dp->outp.ior->id;
343 			init.link = dp->outp.ior->asy.link;
344 		);
345 	}
346 }
347 
348 static const struct dp_rates {
349 	u32 rate;
350 	u8  bw;
351 	u8  nr;
352 } nvkm_dp_rates[] = {
353 	{ 2160000, 0x14, 4 },
354 	{ 1080000, 0x0a, 4 },
355 	{ 1080000, 0x14, 2 },
356 	{  648000, 0x06, 4 },
357 	{  540000, 0x0a, 2 },
358 	{  540000, 0x14, 1 },
359 	{  324000, 0x06, 2 },
360 	{  270000, 0x0a, 1 },
361 	{  162000, 0x06, 1 },
362 	{}
363 };
364 
365 static int
366 nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
367 {
368 	struct nvkm_ior *ior = dp->outp.ior;
369 	const u8 sink_nr = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
370 	const u8 sink_bw = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
371 	const u8 outp_nr = dp->outp.info.dpconf.link_nr;
372 	const u8 outp_bw = dp->outp.info.dpconf.link_bw;
373 	const struct dp_rates *failsafe = NULL, *cfg;
374 	int ret = -EINVAL;
375 	u8  pwr;
376 
377 	/* Find the lowest configuration of the OR that can support
378 	 * the required link rate.
379 	 *
380 	 * We will refuse to program the OR to lower rates, even if
381 	 * link training fails at higher rates (or even if the sink
382 	 * can't support the rate at all, though the DD is supposed
383 	 * to prevent such situations from happening).
384 	 *
385 	 * Attempting to do so can cause the entire display to hang,
386 	 * and it's better to have a failed modeset than that.
387 	 */
388 	for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
389 		if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) {
390 			/* Try to respect sink limits too when selecting
391 			 * lowest link configuration.
392 			 */
393 			if (!failsafe ||
394 			    (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
395 				failsafe = cfg;
396 		}
397 
398 		if (failsafe && cfg[1].rate < dataKBps)
399 			break;
400 	}
401 
402 	if (WARN_ON(!failsafe))
403 		return ret;
404 
405 	/* Ensure sink is not in a low-power state. */
406 	if (!nvkm_rdaux(dp->aux, DPCD_SC00, &pwr, 1)) {
407 		if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
408 			pwr &= ~DPCD_SC00_SET_POWER;
409 			pwr |=  DPCD_SC00_SET_POWER_D0;
410 			nvkm_wraux(dp->aux, DPCD_SC00, &pwr, 1);
411 		}
412 	}
413 
414 	/* Link training. */
415 	OUTP_DBG(&dp->outp, "training (min: %d x %d MB/s)",
416 		 failsafe->nr, failsafe->bw * 27);
417 	nvkm_dp_train_init(dp);
418 	for (cfg = nvkm_dp_rates; ret < 0 && cfg <= failsafe; cfg++) {
419 		/* Skip configurations not supported by both OR and sink. */
420 		if ((cfg->nr > outp_nr || cfg->bw > outp_bw ||
421 		     cfg->nr > sink_nr || cfg->bw > sink_bw)) {
422 			if (cfg != failsafe)
423 				continue;
424 			OUTP_ERR(&dp->outp, "link rate unsupported by sink");
425 		}
426 		ior->dp.mst = dp->lt.mst;
427 		ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
428 		ior->dp.bw = cfg->bw;
429 		ior->dp.nr = cfg->nr;
430 
431 		/* Program selected link configuration. */
432 		ret = nvkm_dp_train_links(dp);
433 	}
434 	nvkm_dp_train_fini(dp);
435 	if (ret < 0)
436 		OUTP_ERR(&dp->outp, "training failed");
437 	else
438 		OUTP_DBG(&dp->outp, "training done");
439 	atomic_set(&dp->lt.done, 1);
440 	return ret;
441 }
442 
443 static void
444 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
445 {
446 	struct nvkm_dp *dp = nvkm_dp(outp);
447 
448 	/* Execute DisableLT script from DP Info Table. */
449 	nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
450 		init.outp = &dp->outp.info;
451 		init.or   = ior->id;
452 		init.link = ior->arm.link;
453 	);
454 }
455 
456 static void
457 nvkm_dp_release(struct nvkm_outp *outp)
458 {
459 	struct nvkm_dp *dp = nvkm_dp(outp);
460 
461 	/* Prevent link from being retrained if sink sends an IRQ. */
462 	atomic_set(&dp->lt.done, 0);
463 	dp->outp.ior->dp.nr = 0;
464 }
465 
466 static int
467 nvkm_dp_acquire(struct nvkm_outp *outp)
468 {
469 	struct nvkm_dp *dp = nvkm_dp(outp);
470 	struct nvkm_ior *ior = dp->outp.ior;
471 	struct nvkm_head *head;
472 	bool retrain = true;
473 	u32 datakbps = 0;
474 	u32 dataKBps;
475 	u32 linkKBps;
476 	u8  stat[3];
477 	int ret, i;
478 
479 	mutex_lock(&dp->mutex);
480 
481 	/* Check that link configuration meets current requirements. */
482 	list_for_each_entry(head, &outp->disp->head, head) {
483 		if (ior->asy.head & (1 << head->id)) {
484 			u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
485 			datakbps += khz * head->asy.or.depth;
486 		}
487 	}
488 
489 	linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
490 	dataKBps = DIV_ROUND_UP(datakbps, 8);
491 	OUTP_DBG(&dp->outp, "data %d KB/s link %d KB/s mst %d->%d",
492 		 dataKBps, linkKBps, ior->dp.mst, dp->lt.mst);
493 	if (linkKBps < dataKBps || ior->dp.mst != dp->lt.mst) {
494 		OUTP_DBG(&dp->outp, "link requirements changed");
495 		goto done;
496 	}
497 
498 	/* Check that link is still trained. */
499 	ret = nvkm_rdaux(dp->aux, DPCD_LS02, stat, 3);
500 	if (ret) {
501 		OUTP_DBG(&dp->outp,
502 			 "failed to read link status, assuming no sink");
503 		goto done;
504 	}
505 
506 	if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
507 		for (i = 0; i < ior->dp.nr; i++) {
508 			u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
509 			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
510 			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
511 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
512 				OUTP_DBG(&dp->outp,
513 					 "lane %d not equalised", lane);
514 				goto done;
515 			}
516 		}
517 		retrain = false;
518 	} else {
519 		OUTP_DBG(&dp->outp, "no inter-lane alignment");
520 	}
521 
522 done:
523 	if (retrain || !atomic_read(&dp->lt.done))
524 		ret = nvkm_dp_train(dp, dataKBps);
525 	mutex_unlock(&dp->mutex);
526 	return ret;
527 }
528 
529 static bool
530 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
531 {
532 	struct nvkm_i2c_aux *aux = dp->aux;
533 
534 	if (enable) {
535 		if (!dp->present) {
536 			OUTP_DBG(&dp->outp, "aux power -> always");
537 			nvkm_i2c_aux_monitor(aux, true);
538 			dp->present = true;
539 		}
540 
541 		if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
542 				sizeof(dp->dpcd)))
543 			return true;
544 	}
545 
546 	if (dp->present) {
547 		OUTP_DBG(&dp->outp, "aux power -> demand");
548 		nvkm_i2c_aux_monitor(aux, false);
549 		dp->present = false;
550 	}
551 
552 	atomic_set(&dp->lt.done, 0);
553 	return false;
554 }
555 
556 static int
557 nvkm_dp_hpd(struct nvkm_notify *notify)
558 {
559 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
560 	struct nvkm_dp *dp = container_of(notify, typeof(*dp), hpd);
561 	struct nvkm_conn *conn = dp->outp.conn;
562 	struct nvkm_disp *disp = dp->outp.disp;
563 	struct nvif_notify_conn_rep_v0 rep = {};
564 
565 	OUTP_DBG(&dp->outp, "HPD: %d", line->mask);
566 	if (line->mask & NVKM_I2C_IRQ) {
567 		if (atomic_read(&dp->lt.done))
568 			dp->outp.func->acquire(&dp->outp);
569 		rep.mask |= NVIF_NOTIFY_CONN_V0_IRQ;
570 	} else {
571 		nvkm_dp_enable(dp, true);
572 	}
573 
574 	if (line->mask & NVKM_I2C_UNPLUG)
575 		rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
576 	if (line->mask & NVKM_I2C_PLUG)
577 		rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
578 
579 	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
580 	return NVKM_NOTIFY_KEEP;
581 }
582 
583 static void
584 nvkm_dp_fini(struct nvkm_outp *outp)
585 {
586 	struct nvkm_dp *dp = nvkm_dp(outp);
587 	nvkm_notify_put(&dp->hpd);
588 	nvkm_dp_enable(dp, false);
589 }
590 
591 static void
592 nvkm_dp_init(struct nvkm_outp *outp)
593 {
594 	struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
595 	struct nvkm_dp *dp = nvkm_dp(outp);
596 
597 	nvkm_notify_put(&dp->outp.conn->hpd);
598 
599 	/* eDP panels need powering on by us (if the VBIOS doesn't default it
600 	 * to on) before doing any AUX channel transactions.  LVDS panel power
601 	 * is handled by the SOR itself, and not required for LVDS DDC.
602 	 */
603 	if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
604 		int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
605 		if (power == 0)
606 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
607 
608 		/* We delay here unconditionally, even if already powered,
609 		 * because some laptop panels having a significant resume
610 		 * delay before the panel begins responding.
611 		 *
612 		 * This is likely a bit of a hack, but no better idea for
613 		 * handling this at the moment.
614 		 */
615 		msleep(300);
616 
617 		/* If the eDP panel can't be detected, we need to restore
618 		 * the panel power GPIO to avoid breaking another output.
619 		 */
620 		if (!nvkm_dp_enable(dp, true) && power == 0)
621 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
622 	} else {
623 		nvkm_dp_enable(dp, true);
624 	}
625 
626 	nvkm_notify_get(&dp->hpd);
627 }
628 
629 static void *
630 nvkm_dp_dtor(struct nvkm_outp *outp)
631 {
632 	struct nvkm_dp *dp = nvkm_dp(outp);
633 	nvkm_notify_fini(&dp->hpd);
634 	return dp;
635 }
636 
637 static const struct nvkm_outp_func
638 nvkm_dp_func = {
639 	.dtor = nvkm_dp_dtor,
640 	.init = nvkm_dp_init,
641 	.fini = nvkm_dp_fini,
642 	.acquire = nvkm_dp_acquire,
643 	.release = nvkm_dp_release,
644 	.disable = nvkm_dp_disable,
645 };
646 
647 static int
648 nvkm_dp_ctor(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
649 	     struct nvkm_i2c_aux *aux, struct nvkm_dp *dp)
650 {
651 	struct nvkm_device *device = disp->engine.subdev.device;
652 	struct nvkm_bios *bios = device->bios;
653 	struct nvkm_i2c *i2c = device->i2c;
654 	u8  hdr, cnt, len;
655 	u32 data;
656 	int ret;
657 
658 	ret = nvkm_outp_ctor(&nvkm_dp_func, disp, index, dcbE, &dp->outp);
659 	if (ret)
660 		return ret;
661 
662 	dp->aux = aux;
663 	if (!dp->aux) {
664 		OUTP_ERR(&dp->outp, "no aux");
665 		return -EINVAL;
666 	}
667 
668 	/* bios data is not optional */
669 	data = nvbios_dpout_match(bios, dp->outp.info.hasht,
670 				  dp->outp.info.hashm, &dp->version,
671 				  &hdr, &cnt, &len, &dp->info);
672 	if (!data) {
673 		OUTP_ERR(&dp->outp, "no bios dp data");
674 		return -EINVAL;
675 	}
676 
677 	OUTP_DBG(&dp->outp, "bios dp %02x %02x %02x %02x",
678 		 dp->version, hdr, cnt, len);
679 
680 	/* hotplug detect, replaces gpio-based mechanism with aux events */
681 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_dp_hpd, true,
682 			       &(struct nvkm_i2c_ntfy_req) {
683 				.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG |
684 					NVKM_I2C_IRQ,
685 				.port = dp->aux->id,
686 			       },
687 			       sizeof(struct nvkm_i2c_ntfy_req),
688 			       sizeof(struct nvkm_i2c_ntfy_rep),
689 			       &dp->hpd);
690 	if (ret) {
691 		OUTP_ERR(&dp->outp, "error monitoring aux hpd: %d", ret);
692 		return ret;
693 	}
694 
695 	mutex_init(&dp->mutex);
696 	atomic_set(&dp->lt.done, 0);
697 	return 0;
698 }
699 
700 int
701 nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
702 	    struct nvkm_outp **poutp)
703 {
704 	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
705 	struct nvkm_i2c_aux *aux;
706 	struct nvkm_dp *dp;
707 
708 	if (dcbE->location == 0)
709 		aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
710 	else
711 		aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
712 
713 	if (!(dp = kzalloc(sizeof(*dp), GFP_KERNEL)))
714 		return -ENOMEM;
715 	*poutp = &dp->outp;
716 
717 	return nvkm_dp_ctor(disp, index, dcbE, aux, dp);
718 }
719