xref: /linux/drivers/gpu/drm/i915/display/intel_dp_aux.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020-2021 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 
8 #include "intel_de.h"
9 #include "intel_display_jiffies.h"
10 #include "intel_display_types.h"
11 #include "intel_display_utils.h"
12 #include "intel_dp.h"
13 #include "intel_dp_aux.h"
14 #include "intel_dp_aux_regs.h"
15 #include "intel_pps.h"
16 #include "intel_quirks.h"
17 #include "intel_tc.h"
18 #include "intel_uncore_trace.h"
19 
20 #define AUX_CH_NAME_BUFSIZE	6
21 
22 static const char *aux_ch_name(struct intel_display *display,
23 			       char *buf, int size, enum aux_ch aux_ch)
24 {
25 	if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD)
26 		snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
27 	else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1)
28 		snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
29 	else
30 		snprintf(buf, size, "%c", 'A' + aux_ch);
31 
32 	return buf;
33 }
34 
35 u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
36 {
37 	int i;
38 	u32 v = 0;
39 
40 	if (src_bytes > 4)
41 		src_bytes = 4;
42 	for (i = 0; i < src_bytes; i++)
43 		v |= ((u32)src[i]) << ((3 - i) * 8);
44 	return v;
45 }
46 
47 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
48 {
49 	int i;
50 
51 	if (dst_bytes > 4)
52 		dst_bytes = 4;
53 	for (i = 0; i < dst_bytes; i++)
54 		dst[i] = src >> ((3 - i) * 8);
55 }
56 
57 static u32
58 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
59 {
60 	struct intel_display *display = to_intel_display(intel_dp);
61 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
62 	const unsigned int timeout_ms = 10;
63 	u32 status;
64 	bool done;
65 
66 #define C (((status = intel_de_read_notrace(display, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
67 	done = wait_event_timeout(display->gmbus.wait_queue, C,
68 				  msecs_to_jiffies_timeout(timeout_ms));
69 
70 	if (!done)
71 		drm_err(display->drm,
72 			"%s: did not complete or timeout within %ums (status 0x%08x)\n",
73 			intel_dp->aux.name, timeout_ms, status);
74 #undef C
75 
76 	return status;
77 }
78 
79 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
80 {
81 	struct intel_display *display = to_intel_display(intel_dp);
82 
83 	if (index)
84 		return 0;
85 
86 	/*
87 	 * The clock divider is based off the hrawclk, and would like to run at
88 	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
89 	 */
90 	return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000);
91 }
92 
93 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
94 {
95 	struct intel_display *display = to_intel_display(intel_dp);
96 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
97 	u32 freq;
98 
99 	if (index)
100 		return 0;
101 
102 	/*
103 	 * The clock divider is based off the cdclk or PCH rawclk, and would
104 	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
105 	 * divide by 2000 and use that
106 	 */
107 	if (dig_port->aux_ch == AUX_CH_A)
108 		freq = display->cdclk.hw.cdclk;
109 	else
110 		freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq;
111 	return DIV_ROUND_CLOSEST(freq, 2000);
112 }
113 
114 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
115 {
116 	struct intel_display *display = to_intel_display(intel_dp);
117 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
118 
119 	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(display)) {
120 		/* Workaround for non-ULT HSW */
121 		switch (index) {
122 		case 0: return 63;
123 		case 1: return 72;
124 		default: return 0;
125 		}
126 	}
127 
128 	return ilk_get_aux_clock_divider(intel_dp, index);
129 }
130 
131 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
132 {
133 	/*
134 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
135 	 * derive the clock from CDCLK automatically). We still implement the
136 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
137 	 */
138 	return index ? 0 : 1;
139 }
140 
141 static int intel_dp_aux_sync_len(void)
142 {
143 	int precharge = 16; /* 10-16 */
144 	int preamble = 16;
145 
146 	return precharge + preamble;
147 }
148 
149 int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp)
150 {
151 	int precharge = 10; /* 10-16 */
152 	int preamble = 8;
153 
154 	/*
155 	 * We faced some glitches on Dell Precision 5490 MTL laptop with panel:
156 	 * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
157 	 * is fixing these problems with the panel. It is still within range
158 	 * mentioned in eDP specification. Increasing Fast Wake sync length is
159 	 * causing problems with other panels: increase length as a quirk for
160 	 * this specific laptop.
161 	 */
162 	if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN))
163 		precharge += 2;
164 
165 	return precharge + preamble;
166 }
167 
168 static int g4x_dp_aux_precharge_len(void)
169 {
170 	int precharge_min = 10;
171 	int preamble = 16;
172 
173 	/* HW wants the length of the extra precharge in 2us units */
174 	return (intel_dp_aux_sync_len() -
175 		precharge_min - preamble) / 2;
176 }
177 
178 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
179 				int send_bytes,
180 				u32 aux_clock_divider)
181 {
182 	struct intel_display *display = to_intel_display(intel_dp);
183 	u32 timeout;
184 
185 	/* Max timeout value on G4x-BDW: 1.6ms */
186 	if (display->platform.broadwell)
187 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
188 	else
189 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
190 
191 	return DP_AUX_CH_CTL_SEND_BUSY |
192 		DP_AUX_CH_CTL_DONE |
193 		DP_AUX_CH_CTL_INTERRUPT |
194 		DP_AUX_CH_CTL_TIME_OUT_ERROR |
195 		timeout |
196 		DP_AUX_CH_CTL_RECEIVE_ERROR |
197 		DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
198 		DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) |
199 		DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider);
200 }
201 
202 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
203 				int send_bytes,
204 				u32 unused)
205 {
206 	struct intel_display *display = to_intel_display(intel_dp);
207 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
208 	u32 ret;
209 
210 	/*
211 	 * Max timeout values:
212 	 * SKL-GLK: 1.6ms
213 	 * ICL+: 4ms
214 	 */
215 	ret = DP_AUX_CH_CTL_SEND_BUSY |
216 		DP_AUX_CH_CTL_DONE |
217 		DP_AUX_CH_CTL_INTERRUPT |
218 		DP_AUX_CH_CTL_TIME_OUT_ERROR |
219 		DP_AUX_CH_CTL_TIME_OUT_MAX |
220 		DP_AUX_CH_CTL_RECEIVE_ERROR |
221 		DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
222 		DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) |
223 		DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
224 
225 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
226 		ret |= DP_AUX_CH_CTL_TBT_IO;
227 
228 	/*
229 	 * Power request bit is already set during aux power well enable.
230 	 * Preserve the bit across aux transactions.
231 	 */
232 	if (DISPLAY_VER(display) >= 14)
233 		ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
234 
235 	return ret;
236 }
237 
238 static int
239 intel_dp_aux_xfer(struct intel_dp *intel_dp,
240 		  const u8 *send, int send_bytes,
241 		  u8 *recv, int recv_size,
242 		  u32 aux_send_ctl_flags)
243 {
244 	struct intel_display *display = to_intel_display(intel_dp);
245 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
246 	struct intel_encoder *encoder = &dig_port->base;
247 	i915_reg_t ch_ctl, ch_data[5];
248 	u32 aux_clock_divider;
249 	enum intel_display_power_domain aux_domain;
250 	struct ref_tracker *aux_wakeref;
251 	struct ref_tracker *pps_wakeref = NULL;
252 	int i, ret, recv_bytes;
253 	int try, clock = 0;
254 	u32 status;
255 	bool vdd;
256 
257 	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
258 	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
259 		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
260 
261 	intel_digital_port_lock(encoder);
262 	/*
263 	 * Abort transfers on a disconnected port as required by
264 	 * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
265 	 * timeouts that would otherwise happen.
266 	 */
267 	if (!intel_dp_is_edp(intel_dp) &&
268 	    !intel_digital_port_connected_locked(&dig_port->base)) {
269 		ret = -ENXIO;
270 		goto out_unlock;
271 	}
272 
273 	aux_domain = intel_aux_power_domain(dig_port);
274 
275 	aux_wakeref = intel_display_power_get(display, aux_domain);
276 
277 	/*
278 	 * The PPS state needs to be locked for:
279 	 * - eDP on all platforms, since AUX transfers on eDP need VDD power
280 	 *   (either forced or via panel power) which depends on the PPS
281 	 *   state.
282 	 * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV),
283 	 *   since changing the PPS state (via a parallel modeset for
284 	 *   instance) may interfere with the AUX transfers on a non-eDP
285 	 *   output as well.
286 	 */
287 	if (intel_dp_is_edp(intel_dp) ||
288 	    display->platform.valleyview || display->platform.cherryview)
289 		pps_wakeref = intel_pps_lock(intel_dp);
290 
291 	/*
292 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
293 	 * In such cases we want to leave VDD enabled and it's up to upper layers
294 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
295 	 * ourselves.
296 	 */
297 	vdd = intel_pps_vdd_on_unlocked(intel_dp);
298 
299 	/*
300 	 * dp aux is extremely sensitive to irq latency, hence request the
301 	 * lowest possible wakeup latency and so prevent the cpu from going into
302 	 * deep sleep states.
303 	 */
304 	cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
305 
306 	intel_pps_check_power_unlocked(intel_dp);
307 
308 	/*
309 	 * FIXME PSR should be disabled here to prevent
310 	 * it using the same AUX CH simultaneously
311 	 */
312 
313 	/* Try to wait for any previous AUX channel activity */
314 	for (try = 0; try < 3; try++) {
315 		status = intel_de_read_notrace(display, ch_ctl);
316 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
317 			break;
318 		msleep(1);
319 	}
320 	/* just trace the final value */
321 	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
322 
323 	if (try == 3) {
324 		const u32 status = intel_de_read(display, ch_ctl);
325 
326 		if (status != intel_dp->aux_busy_last_status) {
327 			drm_WARN(display->drm, 1,
328 				 "%s: not started (status 0x%08x)\n",
329 				 intel_dp->aux.name, status);
330 			intel_dp->aux_busy_last_status = status;
331 		}
332 
333 		ret = -EBUSY;
334 		goto out;
335 	}
336 
337 	/* Only 5 data registers! */
338 	if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) {
339 		ret = -E2BIG;
340 		goto out;
341 	}
342 
343 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
344 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
345 							  send_bytes,
346 							  aux_clock_divider);
347 
348 		send_ctl |= aux_send_ctl_flags;
349 
350 		/* Must try at least 3 times according to DP spec */
351 		for (try = 0; try < 5; try++) {
352 			/* Load the send data into the aux channel data registers */
353 			for (i = 0; i < send_bytes; i += 4)
354 				intel_de_write(display, ch_data[i >> 2],
355 					       intel_dp_aux_pack(send + i,
356 								 send_bytes - i));
357 
358 			/* Send the command and wait for it to complete */
359 			intel_de_write(display, ch_ctl, send_ctl);
360 
361 			status = intel_dp_aux_wait_done(intel_dp);
362 
363 			/* Clear done status and any errors */
364 			intel_de_write(display, ch_ctl,
365 				       status | DP_AUX_CH_CTL_DONE |
366 				       DP_AUX_CH_CTL_TIME_OUT_ERROR |
367 				       DP_AUX_CH_CTL_RECEIVE_ERROR);
368 
369 			/*
370 			 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
371 			 *   400us delay required for errors and timeouts
372 			 *   Timeout errors from the HW already meet this
373 			 *   requirement so skip to next iteration
374 			 */
375 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
376 				continue;
377 
378 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
379 				usleep_range(400, 500);
380 				continue;
381 			}
382 			if (status & DP_AUX_CH_CTL_DONE)
383 				goto done;
384 		}
385 	}
386 
387 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
388 		drm_err(display->drm, "%s: not done (status 0x%08x)\n",
389 			intel_dp->aux.name, status);
390 		ret = -EBUSY;
391 		goto out;
392 	}
393 
394 done:
395 	/*
396 	 * Check for timeout or receive error. Timeouts occur when the sink is
397 	 * not connected.
398 	 */
399 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
400 		drm_err(display->drm, "%s: receive error (status 0x%08x)\n",
401 			intel_dp->aux.name, status);
402 		ret = -EIO;
403 		goto out;
404 	}
405 
406 	/*
407 	 * Timeouts occur when the device isn't connected, so they're "normal"
408 	 * -- don't fill the kernel log with these
409 	 */
410 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
411 		drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n",
412 			    intel_dp->aux.name, status);
413 		ret = -ETIMEDOUT;
414 		goto out;
415 	}
416 
417 	/* Unload any bytes sent back from the other side */
418 	recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status);
419 
420 	/*
421 	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
422 	 * We have no idea of what happened so we return -EBUSY so
423 	 * drm layer takes care for the necessary retries.
424 	 */
425 	if (recv_bytes == 0 || recv_bytes > 20) {
426 		drm_dbg_kms(display->drm,
427 			    "%s: Forbidden recv_bytes = %d on aux transaction\n",
428 			    intel_dp->aux.name, recv_bytes);
429 		ret = -EBUSY;
430 		goto out;
431 	}
432 
433 	if (recv_bytes > recv_size)
434 		recv_bytes = recv_size;
435 
436 	for (i = 0; i < recv_bytes; i += 4)
437 		intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]),
438 				    recv + i, recv_bytes - i);
439 
440 	ret = recv_bytes;
441 out:
442 	cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
443 
444 	if (vdd)
445 		intel_pps_vdd_off_unlocked(intel_dp, false);
446 
447 	if (pps_wakeref)
448 		intel_pps_unlock(intel_dp, pps_wakeref);
449 
450 	intel_display_power_put_async(display, aux_domain, aux_wakeref);
451 out_unlock:
452 	intel_digital_port_unlock(encoder);
453 
454 	return ret;
455 }
456 
457 #define BARE_ADDRESS_SIZE	3
458 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
459 
460 static void
461 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
462 		    const struct drm_dp_aux_msg *msg)
463 {
464 	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
465 	txbuf[1] = (msg->address >> 8) & 0xff;
466 	txbuf[2] = msg->address & 0xff;
467 	txbuf[3] = msg->size - 1;
468 }
469 
470 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
471 {
472 	/*
473 	 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
474 	 * select bit to inform the hardware to send the Aksv after our header
475 	 * since we can't access that data from software.
476 	 */
477 	if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
478 	    msg->address == DP_AUX_HDCP_AKSV)
479 		return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
480 
481 	return 0;
482 }
483 
484 static ssize_t
485 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
486 {
487 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
488 	struct intel_display *display = to_intel_display(intel_dp);
489 	u8 txbuf[20], rxbuf[20];
490 	size_t txsize, rxsize;
491 	u32 flags = intel_dp_aux_xfer_flags(msg);
492 	int ret;
493 
494 	intel_dp_aux_header(txbuf, msg);
495 
496 	switch (msg->request & ~DP_AUX_I2C_MOT) {
497 	case DP_AUX_NATIVE_WRITE:
498 	case DP_AUX_I2C_WRITE:
499 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
500 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
501 		rxsize = 2; /* 0 or 1 data bytes */
502 
503 		if (drm_WARN_ON(display->drm, txsize > 20))
504 			return -E2BIG;
505 
506 		drm_WARN_ON(display->drm, !msg->buffer != !msg->size);
507 
508 		if (msg->buffer)
509 			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
510 
511 		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
512 					rxbuf, rxsize, flags);
513 		if (ret > 0) {
514 			msg->reply = rxbuf[0] >> 4;
515 
516 			if (ret > 1) {
517 				/* Number of bytes written in a short write. */
518 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
519 			} else {
520 				/* Return payload size. */
521 				ret = msg->size;
522 			}
523 		}
524 		break;
525 
526 	case DP_AUX_NATIVE_READ:
527 	case DP_AUX_I2C_READ:
528 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
529 		rxsize = msg->size + 1;
530 
531 		if (drm_WARN_ON(display->drm, rxsize > 20))
532 			return -E2BIG;
533 
534 		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
535 					rxbuf, rxsize, flags);
536 		if (ret > 0) {
537 			msg->reply = rxbuf[0] >> 4;
538 			/*
539 			 * Assume happy day, and copy the data. The caller is
540 			 * expected to check msg->reply before touching it.
541 			 *
542 			 * Return payload size.
543 			 */
544 			ret--;
545 			memcpy(msg->buffer, rxbuf + 1, ret);
546 		}
547 		break;
548 
549 	default:
550 		ret = -EINVAL;
551 		break;
552 	}
553 
554 	return ret;
555 }
556 
557 static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
558 {
559 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
560 	enum aux_ch aux_ch = dig_port->aux_ch;
561 
562 	switch (aux_ch) {
563 	case AUX_CH_B:
564 	case AUX_CH_C:
565 	case AUX_CH_D:
566 		return VLV_DP_AUX_CH_CTL(aux_ch);
567 	default:
568 		MISSING_CASE(aux_ch);
569 		return VLV_DP_AUX_CH_CTL(AUX_CH_B);
570 	}
571 }
572 
573 static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
574 {
575 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
576 	enum aux_ch aux_ch = dig_port->aux_ch;
577 
578 	switch (aux_ch) {
579 	case AUX_CH_B:
580 	case AUX_CH_C:
581 	case AUX_CH_D:
582 		return VLV_DP_AUX_CH_DATA(aux_ch, index);
583 	default:
584 		MISSING_CASE(aux_ch);
585 		return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
586 	}
587 }
588 
589 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
590 {
591 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
592 	enum aux_ch aux_ch = dig_port->aux_ch;
593 
594 	switch (aux_ch) {
595 	case AUX_CH_B:
596 	case AUX_CH_C:
597 	case AUX_CH_D:
598 		return DP_AUX_CH_CTL(aux_ch);
599 	default:
600 		MISSING_CASE(aux_ch);
601 		return DP_AUX_CH_CTL(AUX_CH_B);
602 	}
603 }
604 
605 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
606 {
607 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
608 	enum aux_ch aux_ch = dig_port->aux_ch;
609 
610 	switch (aux_ch) {
611 	case AUX_CH_B:
612 	case AUX_CH_C:
613 	case AUX_CH_D:
614 		return DP_AUX_CH_DATA(aux_ch, index);
615 	default:
616 		MISSING_CASE(aux_ch);
617 		return DP_AUX_CH_DATA(AUX_CH_B, index);
618 	}
619 }
620 
621 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
622 {
623 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
624 	enum aux_ch aux_ch = dig_port->aux_ch;
625 
626 	switch (aux_ch) {
627 	case AUX_CH_A:
628 		return DP_AUX_CH_CTL(aux_ch);
629 	case AUX_CH_B:
630 	case AUX_CH_C:
631 	case AUX_CH_D:
632 		return PCH_DP_AUX_CH_CTL(aux_ch);
633 	default:
634 		MISSING_CASE(aux_ch);
635 		return DP_AUX_CH_CTL(AUX_CH_A);
636 	}
637 }
638 
639 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
640 {
641 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
642 	enum aux_ch aux_ch = dig_port->aux_ch;
643 
644 	switch (aux_ch) {
645 	case AUX_CH_A:
646 		return DP_AUX_CH_DATA(aux_ch, index);
647 	case AUX_CH_B:
648 	case AUX_CH_C:
649 	case AUX_CH_D:
650 		return PCH_DP_AUX_CH_DATA(aux_ch, index);
651 	default:
652 		MISSING_CASE(aux_ch);
653 		return DP_AUX_CH_DATA(AUX_CH_A, index);
654 	}
655 }
656 
657 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
658 {
659 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
660 	enum aux_ch aux_ch = dig_port->aux_ch;
661 
662 	switch (aux_ch) {
663 	case AUX_CH_A:
664 	case AUX_CH_B:
665 	case AUX_CH_C:
666 	case AUX_CH_D:
667 	case AUX_CH_E:
668 	case AUX_CH_F:
669 		return DP_AUX_CH_CTL(aux_ch);
670 	default:
671 		MISSING_CASE(aux_ch);
672 		return DP_AUX_CH_CTL(AUX_CH_A);
673 	}
674 }
675 
676 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
677 {
678 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
679 	enum aux_ch aux_ch = dig_port->aux_ch;
680 
681 	switch (aux_ch) {
682 	case AUX_CH_A:
683 	case AUX_CH_B:
684 	case AUX_CH_C:
685 	case AUX_CH_D:
686 	case AUX_CH_E:
687 	case AUX_CH_F:
688 		return DP_AUX_CH_DATA(aux_ch, index);
689 	default:
690 		MISSING_CASE(aux_ch);
691 		return DP_AUX_CH_DATA(AUX_CH_A, index);
692 	}
693 }
694 
695 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
696 {
697 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
698 	enum aux_ch aux_ch = dig_port->aux_ch;
699 
700 	switch (aux_ch) {
701 	case AUX_CH_A:
702 	case AUX_CH_B:
703 	case AUX_CH_C:
704 	case AUX_CH_USBC1:
705 	case AUX_CH_USBC2:
706 	case AUX_CH_USBC3:
707 	case AUX_CH_USBC4:
708 	case AUX_CH_USBC5:  /* aka AUX_CH_D_XELPD */
709 	case AUX_CH_USBC6:  /* aka AUX_CH_E_XELPD */
710 		return DP_AUX_CH_CTL(aux_ch);
711 	default:
712 		MISSING_CASE(aux_ch);
713 		return DP_AUX_CH_CTL(AUX_CH_A);
714 	}
715 }
716 
717 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
718 {
719 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
720 	enum aux_ch aux_ch = dig_port->aux_ch;
721 
722 	switch (aux_ch) {
723 	case AUX_CH_A:
724 	case AUX_CH_B:
725 	case AUX_CH_C:
726 	case AUX_CH_USBC1:
727 	case AUX_CH_USBC2:
728 	case AUX_CH_USBC3:
729 	case AUX_CH_USBC4:
730 	case AUX_CH_USBC5:  /* aka AUX_CH_D_XELPD */
731 	case AUX_CH_USBC6:  /* aka AUX_CH_E_XELPD */
732 		return DP_AUX_CH_DATA(aux_ch, index);
733 	default:
734 		MISSING_CASE(aux_ch);
735 		return DP_AUX_CH_DATA(AUX_CH_A, index);
736 	}
737 }
738 
739 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
740 {
741 	struct intel_display *display = to_intel_display(intel_dp);
742 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
743 	enum aux_ch aux_ch = dig_port->aux_ch;
744 
745 	switch (aux_ch) {
746 	case AUX_CH_A:
747 	case AUX_CH_B:
748 	case AUX_CH_USBC1:
749 	case AUX_CH_USBC2:
750 	case AUX_CH_USBC3:
751 	case AUX_CH_USBC4:
752 		return XELPDP_DP_AUX_CH_CTL(display, aux_ch);
753 	default:
754 		MISSING_CASE(aux_ch);
755 		return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A);
756 	}
757 }
758 
759 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
760 {
761 	struct intel_display *display = to_intel_display(intel_dp);
762 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
763 	enum aux_ch aux_ch = dig_port->aux_ch;
764 
765 	switch (aux_ch) {
766 	case AUX_CH_A:
767 	case AUX_CH_B:
768 	case AUX_CH_USBC1:
769 	case AUX_CH_USBC2:
770 	case AUX_CH_USBC3:
771 	case AUX_CH_USBC4:
772 		return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index);
773 	default:
774 		MISSING_CASE(aux_ch);
775 		return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index);
776 	}
777 }
778 
779 void intel_dp_aux_fini(struct intel_dp *intel_dp)
780 {
781 	if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
782 		cpu_latency_qos_remove_request(&intel_dp->pm_qos);
783 
784 	kfree(intel_dp->aux.name);
785 }
786 
787 void intel_dp_aux_init(struct intel_dp *intel_dp)
788 {
789 	struct intel_display *display = to_intel_display(intel_dp);
790 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
791 	struct intel_encoder *encoder = &dig_port->base;
792 	enum aux_ch aux_ch = dig_port->aux_ch;
793 	char buf[AUX_CH_NAME_BUFSIZE];
794 
795 	if (DISPLAY_VER(display) >= 14) {
796 		intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
797 		intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
798 	} else if (DISPLAY_VER(display) >= 12) {
799 		intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
800 		intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
801 	} else if (DISPLAY_VER(display) >= 9) {
802 		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
803 		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
804 	} else if (HAS_PCH_SPLIT(display)) {
805 		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
806 		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
807 	} else if (display->platform.valleyview || display->platform.cherryview) {
808 		intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
809 		intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
810 	} else {
811 		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
812 		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
813 	}
814 
815 	if (DISPLAY_VER(display) >= 9)
816 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
817 	else if (display->platform.broadwell || display->platform.haswell)
818 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
819 	else if (HAS_PCH_SPLIT(display))
820 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
821 	else
822 		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
823 
824 	if (DISPLAY_VER(display) >= 9)
825 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
826 	else
827 		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
828 
829 	intel_dp->aux.drm_dev = display->drm;
830 	drm_dp_aux_init(&intel_dp->aux);
831 
832 	/* Failure to allocate our preferred name is not critical */
833 	intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
834 				       aux_ch_name(display, buf, sizeof(buf), aux_ch),
835 				       encoder->base.name);
836 
837 	intel_dp->aux.transfer = intel_dp_aux_transfer;
838 	cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
839 
840 	intel_dp_dpcd_set_probe(intel_dp, true);
841 }
842 
843 static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
844 {
845 	struct intel_display *display = to_intel_display(encoder);
846 
847 	/* SKL has DDI E but no AUX E */
848 	if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E)
849 		return AUX_CH_A;
850 
851 	return (enum aux_ch)encoder->port;
852 }
853 
854 static struct intel_encoder *
855 get_encoder_by_aux_ch(struct intel_encoder *encoder,
856 		      enum aux_ch aux_ch)
857 {
858 	struct intel_display *display = to_intel_display(encoder);
859 	struct intel_encoder *other;
860 
861 	for_each_intel_encoder(display->drm, other) {
862 		if (other == encoder)
863 			continue;
864 
865 		if (!intel_encoder_is_dig_port(other))
866 			continue;
867 
868 		if (enc_to_dig_port(other)->aux_ch == aux_ch)
869 			return other;
870 	}
871 
872 	return NULL;
873 }
874 
875 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
876 {
877 	struct intel_display *display = to_intel_display(encoder);
878 	struct intel_encoder *other;
879 	const char *source;
880 	enum aux_ch aux_ch;
881 	char buf[AUX_CH_NAME_BUFSIZE];
882 
883 	aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
884 	source = "VBT";
885 
886 	if (aux_ch == AUX_CH_NONE) {
887 		aux_ch = default_aux_ch(encoder);
888 		source = "platform default";
889 	}
890 
891 	if (aux_ch == AUX_CH_NONE)
892 		return AUX_CH_NONE;
893 
894 	/* FIXME validate aux_ch against platform caps */
895 
896 	other = get_encoder_by_aux_ch(encoder, aux_ch);
897 	if (other) {
898 		drm_dbg_kms(display->drm,
899 			    "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
900 			    encoder->base.base.id, encoder->base.name,
901 			    aux_ch_name(display, buf, sizeof(buf), aux_ch),
902 			    other->base.base.id, other->base.name);
903 		return AUX_CH_NONE;
904 	}
905 
906 	drm_dbg_kms(display->drm,
907 		    "[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
908 		    encoder->base.base.id, encoder->base.name,
909 		    aux_ch_name(display, buf, sizeof(buf), aux_ch), source);
910 
911 	return aux_ch;
912 }
913 
914 void intel_dp_aux_irq_handler(struct intel_display *display)
915 {
916 	wake_up_all(&display->gmbus.wait_queue);
917 }
918