xref: /linux/drivers/gpu/drm/i915/display/intel_dp_aux.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020-2021 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "i915_trace.h"
9 #include "intel_bios.h"
10 #include "intel_de.h"
11 #include "intel_display_types.h"
12 #include "intel_dp_aux.h"
13 #include "intel_dp_aux_regs.h"
14 #include "intel_pps.h"
15 #include "intel_tc.h"
16 
17 #define AUX_CH_NAME_BUFSIZE	6
18 
19 static const char *aux_ch_name(struct drm_i915_private *i915,
20 			       char *buf, int size, enum aux_ch aux_ch)
21 {
22 	if (DISPLAY_VER(i915) >= 13 && aux_ch >= AUX_CH_D_XELPD)
23 		snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
24 	else if (DISPLAY_VER(i915) >= 12 && aux_ch >= AUX_CH_USBC1)
25 		snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
26 	else
27 		snprintf(buf, size, "%c", 'A' + aux_ch);
28 
29 	return buf;
30 }
31 
32 u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
33 {
34 	int i;
35 	u32 v = 0;
36 
37 	if (src_bytes > 4)
38 		src_bytes = 4;
39 	for (i = 0; i < src_bytes; i++)
40 		v |= ((u32)src[i]) << ((3 - i) * 8);
41 	return v;
42 }
43 
44 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
45 {
46 	int i;
47 
48 	if (dst_bytes > 4)
49 		dst_bytes = 4;
50 	for (i = 0; i < dst_bytes; i++)
51 		dst[i] = src >> ((3 - i) * 8);
52 }
53 
54 static u32
55 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
56 {
57 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
58 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
59 	const unsigned int timeout_ms = 10;
60 	u32 status;
61 	int ret;
62 
63 	ret = __intel_de_wait_for_register(i915, ch_ctl,
64 					   DP_AUX_CH_CTL_SEND_BUSY, 0,
65 					   2, timeout_ms, &status);
66 
67 	if (ret == -ETIMEDOUT)
68 		drm_err(&i915->drm,
69 			"%s: did not complete or timeout within %ums (status 0x%08x)\n",
70 			intel_dp->aux.name, timeout_ms, status);
71 
72 	return status;
73 }
74 
75 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
76 {
77 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
78 
79 	if (index)
80 		return 0;
81 
82 	/*
83 	 * The clock divider is based off the hrawclk, and would like to run at
84 	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
85 	 */
86 	return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
87 }
88 
89 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
90 {
91 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
92 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
93 	u32 freq;
94 
95 	if (index)
96 		return 0;
97 
98 	/*
99 	 * The clock divider is based off the cdclk or PCH rawclk, and would
100 	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
101 	 * divide by 2000 and use that
102 	 */
103 	if (dig_port->aux_ch == AUX_CH_A)
104 		freq = dev_priv->display.cdclk.hw.cdclk;
105 	else
106 		freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
107 	return DIV_ROUND_CLOSEST(freq, 2000);
108 }
109 
110 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
111 {
112 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
113 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
114 
115 	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
116 		/* Workaround for non-ULT HSW */
117 		switch (index) {
118 		case 0: return 63;
119 		case 1: return 72;
120 		default: return 0;
121 		}
122 	}
123 
124 	return ilk_get_aux_clock_divider(intel_dp, index);
125 }
126 
127 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
128 {
129 	/*
130 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
131 	 * derive the clock from CDCLK automatically). We still implement the
132 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
133 	 */
134 	return index ? 0 : 1;
135 }
136 
137 static int intel_dp_aux_sync_len(void)
138 {
139 	int precharge = 16; /* 10-16 */
140 	int preamble = 16;
141 
142 	return precharge + preamble;
143 }
144 
145 static int intel_dp_aux_fw_sync_len(void)
146 {
147 	int precharge = 10; /* 10-16 */
148 	int preamble = 8;
149 
150 	return precharge + preamble;
151 }
152 
153 static int g4x_dp_aux_precharge_len(void)
154 {
155 	int precharge_min = 10;
156 	int preamble = 16;
157 
158 	/* HW wants the length of the extra precharge in 2us units */
159 	return (intel_dp_aux_sync_len() -
160 		precharge_min - preamble) / 2;
161 }
162 
163 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
164 				int send_bytes,
165 				u32 aux_clock_divider)
166 {
167 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
168 	struct drm_i915_private *dev_priv =
169 			to_i915(dig_port->base.base.dev);
170 	u32 timeout;
171 
172 	/* Max timeout value on G4x-BDW: 1.6ms */
173 	if (IS_BROADWELL(dev_priv))
174 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
175 	else
176 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
177 
178 	return DP_AUX_CH_CTL_SEND_BUSY |
179 		DP_AUX_CH_CTL_DONE |
180 		DP_AUX_CH_CTL_INTERRUPT |
181 		DP_AUX_CH_CTL_TIME_OUT_ERROR |
182 		timeout |
183 		DP_AUX_CH_CTL_RECEIVE_ERROR |
184 		DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
185 		DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) |
186 		DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider);
187 }
188 
189 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
190 				int send_bytes,
191 				u32 unused)
192 {
193 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
194 	struct drm_i915_private *i915 =	to_i915(dig_port->base.base.dev);
195 	u32 ret;
196 
197 	/*
198 	 * Max timeout values:
199 	 * SKL-GLK: 1.6ms
200 	 * ICL+: 4ms
201 	 */
202 	ret = DP_AUX_CH_CTL_SEND_BUSY |
203 		DP_AUX_CH_CTL_DONE |
204 		DP_AUX_CH_CTL_INTERRUPT |
205 		DP_AUX_CH_CTL_TIME_OUT_ERROR |
206 		DP_AUX_CH_CTL_TIME_OUT_MAX |
207 		DP_AUX_CH_CTL_RECEIVE_ERROR |
208 		DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
209 		DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
210 		DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
211 
212 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
213 		ret |= DP_AUX_CH_CTL_TBT_IO;
214 
215 	/*
216 	 * Power request bit is already set during aux power well enable.
217 	 * Preserve the bit across aux transactions.
218 	 */
219 	if (DISPLAY_VER(i915) >= 14)
220 		ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
221 
222 	return ret;
223 }
224 
225 static int
226 intel_dp_aux_xfer(struct intel_dp *intel_dp,
227 		  const u8 *send, int send_bytes,
228 		  u8 *recv, int recv_size,
229 		  u32 aux_send_ctl_flags)
230 {
231 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
232 	struct drm_i915_private *i915 =
233 			to_i915(dig_port->base.base.dev);
234 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
235 	bool is_tc_port = intel_phy_is_tc(i915, phy);
236 	i915_reg_t ch_ctl, ch_data[5];
237 	u32 aux_clock_divider;
238 	enum intel_display_power_domain aux_domain;
239 	intel_wakeref_t aux_wakeref;
240 	intel_wakeref_t pps_wakeref;
241 	int i, ret, recv_bytes;
242 	int try, clock = 0;
243 	u32 status;
244 	bool vdd;
245 
246 	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
247 	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
248 		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
249 
250 	if (is_tc_port) {
251 		intel_tc_port_lock(dig_port);
252 		/*
253 		 * Abort transfers on a disconnected port as required by
254 		 * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
255 		 * timeouts that would otherwise happen.
256 		 * TODO: abort the transfer on non-TC ports as well.
257 		 */
258 		if (!intel_tc_port_connected_locked(&dig_port->base)) {
259 			ret = -ENXIO;
260 			goto out_unlock;
261 		}
262 	}
263 
264 	aux_domain = intel_aux_power_domain(dig_port);
265 
266 	aux_wakeref = intel_display_power_get(i915, aux_domain);
267 	pps_wakeref = intel_pps_lock(intel_dp);
268 
269 	/*
270 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
271 	 * In such cases we want to leave VDD enabled and it's up to upper layers
272 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
273 	 * ourselves.
274 	 */
275 	vdd = intel_pps_vdd_on_unlocked(intel_dp);
276 
277 	/*
278 	 * dp aux is extremely sensitive to irq latency, hence request the
279 	 * lowest possible wakeup latency and so prevent the cpu from going into
280 	 * deep sleep states.
281 	 */
282 	cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
283 
284 	intel_pps_check_power_unlocked(intel_dp);
285 
286 	/*
287 	 * FIXME PSR should be disabled here to prevent
288 	 * it using the same AUX CH simultaneously
289 	 */
290 
291 	/* Try to wait for any previous AUX channel activity */
292 	for (try = 0; try < 3; try++) {
293 		status = intel_de_read_notrace(i915, ch_ctl);
294 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
295 			break;
296 		msleep(1);
297 	}
298 	/* just trace the final value */
299 	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
300 
301 	if (try == 3) {
302 		const u32 status = intel_de_read(i915, ch_ctl);
303 
304 		if (status != intel_dp->aux_busy_last_status) {
305 			drm_WARN(&i915->drm, 1,
306 				 "%s: not started (status 0x%08x)\n",
307 				 intel_dp->aux.name, status);
308 			intel_dp->aux_busy_last_status = status;
309 		}
310 
311 		ret = -EBUSY;
312 		goto out;
313 	}
314 
315 	/* Only 5 data registers! */
316 	if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
317 		ret = -E2BIG;
318 		goto out;
319 	}
320 
321 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
322 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
323 							  send_bytes,
324 							  aux_clock_divider);
325 
326 		send_ctl |= aux_send_ctl_flags;
327 
328 		/* Must try at least 3 times according to DP spec */
329 		for (try = 0; try < 5; try++) {
330 			/* Load the send data into the aux channel data registers */
331 			for (i = 0; i < send_bytes; i += 4)
332 				intel_de_write(i915, ch_data[i >> 2],
333 					       intel_dp_aux_pack(send + i,
334 								 send_bytes - i));
335 
336 			/* Send the command and wait for it to complete */
337 			intel_de_write(i915, ch_ctl, send_ctl);
338 
339 			status = intel_dp_aux_wait_done(intel_dp);
340 
341 			/* Clear done status and any errors */
342 			intel_de_write(i915, ch_ctl,
343 				       status | DP_AUX_CH_CTL_DONE |
344 				       DP_AUX_CH_CTL_TIME_OUT_ERROR |
345 				       DP_AUX_CH_CTL_RECEIVE_ERROR);
346 
347 			/*
348 			 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
349 			 *   400us delay required for errors and timeouts
350 			 *   Timeout errors from the HW already meet this
351 			 *   requirement so skip to next iteration
352 			 */
353 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
354 				continue;
355 
356 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
357 				usleep_range(400, 500);
358 				continue;
359 			}
360 			if (status & DP_AUX_CH_CTL_DONE)
361 				goto done;
362 		}
363 	}
364 
365 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
366 		drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
367 			intel_dp->aux.name, status);
368 		ret = -EBUSY;
369 		goto out;
370 	}
371 
372 done:
373 	/*
374 	 * Check for timeout or receive error. Timeouts occur when the sink is
375 	 * not connected.
376 	 */
377 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
378 		drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
379 			intel_dp->aux.name, status);
380 		ret = -EIO;
381 		goto out;
382 	}
383 
384 	/*
385 	 * Timeouts occur when the device isn't connected, so they're "normal"
386 	 * -- don't fill the kernel log with these
387 	 */
388 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
389 		drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
390 			    intel_dp->aux.name, status);
391 		ret = -ETIMEDOUT;
392 		goto out;
393 	}
394 
395 	/* Unload any bytes sent back from the other side */
396 	recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status);
397 
398 	/*
399 	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
400 	 * We have no idea of what happened so we return -EBUSY so
401 	 * drm layer takes care for the necessary retries.
402 	 */
403 	if (recv_bytes == 0 || recv_bytes > 20) {
404 		drm_dbg_kms(&i915->drm,
405 			    "%s: Forbidden recv_bytes = %d on aux transaction\n",
406 			    intel_dp->aux.name, recv_bytes);
407 		ret = -EBUSY;
408 		goto out;
409 	}
410 
411 	if (recv_bytes > recv_size)
412 		recv_bytes = recv_size;
413 
414 	for (i = 0; i < recv_bytes; i += 4)
415 		intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]),
416 				    recv + i, recv_bytes - i);
417 
418 	ret = recv_bytes;
419 out:
420 	cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
421 
422 	if (vdd)
423 		intel_pps_vdd_off_unlocked(intel_dp, false);
424 
425 	intel_pps_unlock(intel_dp, pps_wakeref);
426 	intel_display_power_put_async(i915, aux_domain, aux_wakeref);
427 out_unlock:
428 	if (is_tc_port)
429 		intel_tc_port_unlock(dig_port);
430 
431 	return ret;
432 }
433 
434 #define BARE_ADDRESS_SIZE	3
435 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
436 
437 static void
438 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
439 		    const struct drm_dp_aux_msg *msg)
440 {
441 	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
442 	txbuf[1] = (msg->address >> 8) & 0xff;
443 	txbuf[2] = msg->address & 0xff;
444 	txbuf[3] = msg->size - 1;
445 }
446 
447 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
448 {
449 	/*
450 	 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
451 	 * select bit to inform the hardware to send the Aksv after our header
452 	 * since we can't access that data from software.
453 	 */
454 	if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
455 	    msg->address == DP_AUX_HDCP_AKSV)
456 		return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
457 
458 	return 0;
459 }
460 
461 static ssize_t
462 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
463 {
464 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
465 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
466 	u8 txbuf[20], rxbuf[20];
467 	size_t txsize, rxsize;
468 	u32 flags = intel_dp_aux_xfer_flags(msg);
469 	int ret;
470 
471 	intel_dp_aux_header(txbuf, msg);
472 
473 	switch (msg->request & ~DP_AUX_I2C_MOT) {
474 	case DP_AUX_NATIVE_WRITE:
475 	case DP_AUX_I2C_WRITE:
476 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
477 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
478 		rxsize = 2; /* 0 or 1 data bytes */
479 
480 		if (drm_WARN_ON(&i915->drm, txsize > 20))
481 			return -E2BIG;
482 
483 		drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
484 
485 		if (msg->buffer)
486 			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
487 
488 		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
489 					rxbuf, rxsize, flags);
490 		if (ret > 0) {
491 			msg->reply = rxbuf[0] >> 4;
492 
493 			if (ret > 1) {
494 				/* Number of bytes written in a short write. */
495 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
496 			} else {
497 				/* Return payload size. */
498 				ret = msg->size;
499 			}
500 		}
501 		break;
502 
503 	case DP_AUX_NATIVE_READ:
504 	case DP_AUX_I2C_READ:
505 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
506 		rxsize = msg->size + 1;
507 
508 		if (drm_WARN_ON(&i915->drm, rxsize > 20))
509 			return -E2BIG;
510 
511 		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
512 					rxbuf, rxsize, flags);
513 		if (ret > 0) {
514 			msg->reply = rxbuf[0] >> 4;
515 			/*
516 			 * Assume happy day, and copy the data. The caller is
517 			 * expected to check msg->reply before touching it.
518 			 *
519 			 * Return payload size.
520 			 */
521 			ret--;
522 			memcpy(msg->buffer, rxbuf + 1, ret);
523 		}
524 		break;
525 
526 	default:
527 		ret = -EINVAL;
528 		break;
529 	}
530 
531 	return ret;
532 }
533 
534 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
535 {
536 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
537 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
538 	enum aux_ch aux_ch = dig_port->aux_ch;
539 
540 	switch (aux_ch) {
541 	case AUX_CH_B:
542 	case AUX_CH_C:
543 	case AUX_CH_D:
544 		return DP_AUX_CH_CTL(aux_ch);
545 	default:
546 		MISSING_CASE(aux_ch);
547 		return DP_AUX_CH_CTL(AUX_CH_B);
548 	}
549 }
550 
551 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
552 {
553 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
554 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
555 	enum aux_ch aux_ch = dig_port->aux_ch;
556 
557 	switch (aux_ch) {
558 	case AUX_CH_B:
559 	case AUX_CH_C:
560 	case AUX_CH_D:
561 		return DP_AUX_CH_DATA(aux_ch, index);
562 	default:
563 		MISSING_CASE(aux_ch);
564 		return DP_AUX_CH_DATA(AUX_CH_B, index);
565 	}
566 }
567 
568 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
569 {
570 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
571 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
572 	enum aux_ch aux_ch = dig_port->aux_ch;
573 
574 	switch (aux_ch) {
575 	case AUX_CH_A:
576 		return DP_AUX_CH_CTL(aux_ch);
577 	case AUX_CH_B:
578 	case AUX_CH_C:
579 	case AUX_CH_D:
580 		return PCH_DP_AUX_CH_CTL(aux_ch);
581 	default:
582 		MISSING_CASE(aux_ch);
583 		return DP_AUX_CH_CTL(AUX_CH_A);
584 	}
585 }
586 
587 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
588 {
589 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
590 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
591 	enum aux_ch aux_ch = dig_port->aux_ch;
592 
593 	switch (aux_ch) {
594 	case AUX_CH_A:
595 		return DP_AUX_CH_DATA(aux_ch, index);
596 	case AUX_CH_B:
597 	case AUX_CH_C:
598 	case AUX_CH_D:
599 		return PCH_DP_AUX_CH_DATA(aux_ch, index);
600 	default:
601 		MISSING_CASE(aux_ch);
602 		return DP_AUX_CH_DATA(AUX_CH_A, index);
603 	}
604 }
605 
606 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
607 {
608 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
609 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
610 	enum aux_ch aux_ch = dig_port->aux_ch;
611 
612 	switch (aux_ch) {
613 	case AUX_CH_A:
614 	case AUX_CH_B:
615 	case AUX_CH_C:
616 	case AUX_CH_D:
617 	case AUX_CH_E:
618 	case AUX_CH_F:
619 		return DP_AUX_CH_CTL(aux_ch);
620 	default:
621 		MISSING_CASE(aux_ch);
622 		return DP_AUX_CH_CTL(AUX_CH_A);
623 	}
624 }
625 
626 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
627 {
628 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
629 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
630 	enum aux_ch aux_ch = dig_port->aux_ch;
631 
632 	switch (aux_ch) {
633 	case AUX_CH_A:
634 	case AUX_CH_B:
635 	case AUX_CH_C:
636 	case AUX_CH_D:
637 	case AUX_CH_E:
638 	case AUX_CH_F:
639 		return DP_AUX_CH_DATA(aux_ch, index);
640 	default:
641 		MISSING_CASE(aux_ch);
642 		return DP_AUX_CH_DATA(AUX_CH_A, index);
643 	}
644 }
645 
646 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
647 {
648 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
649 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
650 	enum aux_ch aux_ch = dig_port->aux_ch;
651 
652 	switch (aux_ch) {
653 	case AUX_CH_A:
654 	case AUX_CH_B:
655 	case AUX_CH_C:
656 	case AUX_CH_USBC1:
657 	case AUX_CH_USBC2:
658 	case AUX_CH_USBC3:
659 	case AUX_CH_USBC4:
660 	case AUX_CH_USBC5:  /* aka AUX_CH_D_XELPD */
661 	case AUX_CH_USBC6:  /* aka AUX_CH_E_XELPD */
662 		return DP_AUX_CH_CTL(aux_ch);
663 	default:
664 		MISSING_CASE(aux_ch);
665 		return DP_AUX_CH_CTL(AUX_CH_A);
666 	}
667 }
668 
669 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
670 {
671 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
672 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
673 	enum aux_ch aux_ch = dig_port->aux_ch;
674 
675 	switch (aux_ch) {
676 	case AUX_CH_A:
677 	case AUX_CH_B:
678 	case AUX_CH_C:
679 	case AUX_CH_USBC1:
680 	case AUX_CH_USBC2:
681 	case AUX_CH_USBC3:
682 	case AUX_CH_USBC4:
683 	case AUX_CH_USBC5:  /* aka AUX_CH_D_XELPD */
684 	case AUX_CH_USBC6:  /* aka AUX_CH_E_XELPD */
685 		return DP_AUX_CH_DATA(aux_ch, index);
686 	default:
687 		MISSING_CASE(aux_ch);
688 		return DP_AUX_CH_DATA(AUX_CH_A, index);
689 	}
690 }
691 
692 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
693 {
694 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
695 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
696 	enum aux_ch aux_ch = dig_port->aux_ch;
697 
698 	switch (aux_ch) {
699 	case AUX_CH_A:
700 	case AUX_CH_B:
701 	case AUX_CH_USBC1:
702 	case AUX_CH_USBC2:
703 	case AUX_CH_USBC3:
704 	case AUX_CH_USBC4:
705 		return XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch);
706 	default:
707 		MISSING_CASE(aux_ch);
708 		return XELPDP_DP_AUX_CH_CTL(dev_priv, AUX_CH_A);
709 	}
710 }
711 
712 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
713 {
714 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
715 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
716 	enum aux_ch aux_ch = dig_port->aux_ch;
717 
718 	switch (aux_ch) {
719 	case AUX_CH_A:
720 	case AUX_CH_B:
721 	case AUX_CH_USBC1:
722 	case AUX_CH_USBC2:
723 	case AUX_CH_USBC3:
724 	case AUX_CH_USBC4:
725 		return XELPDP_DP_AUX_CH_DATA(dev_priv, aux_ch, index);
726 	default:
727 		MISSING_CASE(aux_ch);
728 		return XELPDP_DP_AUX_CH_DATA(dev_priv, AUX_CH_A, index);
729 	}
730 }
731 
732 void intel_dp_aux_fini(struct intel_dp *intel_dp)
733 {
734 	if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
735 		cpu_latency_qos_remove_request(&intel_dp->pm_qos);
736 
737 	kfree(intel_dp->aux.name);
738 }
739 
740 void intel_dp_aux_init(struct intel_dp *intel_dp)
741 {
742 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
743 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
744 	struct intel_encoder *encoder = &dig_port->base;
745 	enum aux_ch aux_ch = dig_port->aux_ch;
746 	char buf[AUX_CH_NAME_BUFSIZE];
747 
748 	if (DISPLAY_VER(dev_priv) >= 14) {
749 		intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
750 		intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
751 	} else if (DISPLAY_VER(dev_priv) >= 12) {
752 		intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
753 		intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
754 	} else if (DISPLAY_VER(dev_priv) >= 9) {
755 		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
756 		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
757 	} else if (HAS_PCH_SPLIT(dev_priv)) {
758 		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
759 		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
760 	} else {
761 		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
762 		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
763 	}
764 
765 	if (DISPLAY_VER(dev_priv) >= 9)
766 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
767 	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
768 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
769 	else if (HAS_PCH_SPLIT(dev_priv))
770 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
771 	else
772 		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
773 
774 	if (DISPLAY_VER(dev_priv) >= 9)
775 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
776 	else
777 		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
778 
779 	intel_dp->aux.drm_dev = &dev_priv->drm;
780 	drm_dp_aux_init(&intel_dp->aux);
781 
782 	/* Failure to allocate our preferred name is not critical */
783 	intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
784 				       aux_ch_name(dev_priv, buf, sizeof(buf), aux_ch),
785 				       encoder->base.name);
786 
787 	intel_dp->aux.transfer = intel_dp_aux_transfer;
788 	cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
789 }
790 
791 static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
792 {
793 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
794 
795 	/* SKL has DDI E but no AUX E */
796 	if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
797 		return AUX_CH_A;
798 
799 	return (enum aux_ch)encoder->port;
800 }
801 
802 static struct intel_encoder *
803 get_encoder_by_aux_ch(struct intel_encoder *encoder,
804 		      enum aux_ch aux_ch)
805 {
806 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
807 	struct intel_encoder *other;
808 
809 	for_each_intel_encoder(&i915->drm, other) {
810 		if (other == encoder)
811 			continue;
812 
813 		if (!intel_encoder_is_dig_port(other))
814 			continue;
815 
816 		if (enc_to_dig_port(other)->aux_ch == aux_ch)
817 			return other;
818 	}
819 
820 	return NULL;
821 }
822 
823 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
824 {
825 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
826 	struct intel_encoder *other;
827 	const char *source;
828 	enum aux_ch aux_ch;
829 	char buf[AUX_CH_NAME_BUFSIZE];
830 
831 	aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
832 	source = "VBT";
833 
834 	if (aux_ch == AUX_CH_NONE) {
835 		aux_ch = default_aux_ch(encoder);
836 		source = "platform default";
837 	}
838 
839 	if (aux_ch == AUX_CH_NONE)
840 		return AUX_CH_NONE;
841 
842 	/* FIXME validate aux_ch against platform caps */
843 
844 	other = get_encoder_by_aux_ch(encoder, aux_ch);
845 	if (other) {
846 		drm_dbg_kms(&i915->drm,
847 			    "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
848 			    encoder->base.base.id, encoder->base.name,
849 			    aux_ch_name(i915, buf, sizeof(buf), aux_ch),
850 			    other->base.base.id, other->base.name);
851 		return AUX_CH_NONE;
852 	}
853 
854 	drm_dbg_kms(&i915->drm,
855 		    "[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
856 		    encoder->base.base.id, encoder->base.name,
857 		    aux_ch_name(i915, buf, sizeof(buf), aux_ch), source);
858 
859 	return aux_ch;
860 }
861 
862 void intel_dp_aux_irq_handler(struct drm_i915_private *i915)
863 {
864 	wake_up_all(&i915->display.gmbus.wait_queue);
865 }
866