1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020-2021 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_de.h"
9 #include "intel_display_types.h"
10 #include "intel_dp.h"
11 #include "intel_dp_aux.h"
12 #include "intel_dp_aux_regs.h"
13 #include "intel_pps.h"
14 #include "intel_quirks.h"
15 #include "intel_tc.h"
16 #include "intel_uncore_trace.h"
17
18 #define AUX_CH_NAME_BUFSIZE 6
19
aux_ch_name(struct intel_display * display,char * buf,int size,enum aux_ch aux_ch)20 static const char *aux_ch_name(struct intel_display *display,
21 char *buf, int size, enum aux_ch aux_ch)
22 {
23 if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD)
24 snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
25 else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1)
26 snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
27 else
28 snprintf(buf, size, "%c", 'A' + aux_ch);
29
30 return buf;
31 }
32
intel_dp_aux_pack(const u8 * src,int src_bytes)33 u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
34 {
35 int i;
36 u32 v = 0;
37
38 if (src_bytes > 4)
39 src_bytes = 4;
40 for (i = 0; i < src_bytes; i++)
41 v |= ((u32)src[i]) << ((3 - i) * 8);
42 return v;
43 }
44
intel_dp_aux_unpack(u32 src,u8 * dst,int dst_bytes)45 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
46 {
47 int i;
48
49 if (dst_bytes > 4)
50 dst_bytes = 4;
51 for (i = 0; i < dst_bytes; i++)
52 dst[i] = src >> ((3 - i) * 8);
53 }
54
55 static u32
intel_dp_aux_wait_done(struct intel_dp * intel_dp)56 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
57 {
58 struct intel_display *display = to_intel_display(intel_dp);
59 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
60 const unsigned int timeout_ms = 10;
61 u32 status;
62 int ret;
63
64 ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
65 0,
66 2, timeout_ms, &status);
67
68 if (ret == -ETIMEDOUT)
69 drm_err(display->drm,
70 "%s: did not complete or timeout within %ums (status 0x%08x)\n",
71 intel_dp->aux.name, timeout_ms, status);
72
73 return status;
74 }
75
g4x_get_aux_clock_divider(struct intel_dp * intel_dp,int index)76 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
77 {
78 struct intel_display *display = to_intel_display(intel_dp);
79
80 if (index)
81 return 0;
82
83 /*
84 * The clock divider is based off the hrawclk, and would like to run at
85 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
86 */
87 return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000);
88 }
89
ilk_get_aux_clock_divider(struct intel_dp * intel_dp,int index)90 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
91 {
92 struct intel_display *display = to_intel_display(intel_dp);
93 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
94 u32 freq;
95
96 if (index)
97 return 0;
98
99 /*
100 * The clock divider is based off the cdclk or PCH rawclk, and would
101 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
102 * divide by 2000 and use that
103 */
104 if (dig_port->aux_ch == AUX_CH_A)
105 freq = display->cdclk.hw.cdclk;
106 else
107 freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq;
108 return DIV_ROUND_CLOSEST(freq, 2000);
109 }
110
hsw_get_aux_clock_divider(struct intel_dp * intel_dp,int index)111 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
112 {
113 struct intel_display *display = to_intel_display(intel_dp);
114 struct drm_i915_private *i915 = to_i915(display->drm);
115 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
116
117 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
118 /* Workaround for non-ULT HSW */
119 switch (index) {
120 case 0: return 63;
121 case 1: return 72;
122 default: return 0;
123 }
124 }
125
126 return ilk_get_aux_clock_divider(intel_dp, index);
127 }
128
skl_get_aux_clock_divider(struct intel_dp * intel_dp,int index)129 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
130 {
131 /*
132 * SKL doesn't need us to program the AUX clock divider (Hardware will
133 * derive the clock from CDCLK automatically). We still implement the
134 * get_aux_clock_divider vfunc to plug-in into the existing code.
135 */
136 return index ? 0 : 1;
137 }
138
intel_dp_aux_sync_len(void)139 static int intel_dp_aux_sync_len(void)
140 {
141 int precharge = 16; /* 10-16 */
142 int preamble = 16;
143
144 return precharge + preamble;
145 }
146
intel_dp_aux_fw_sync_len(struct intel_dp * intel_dp)147 int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp)
148 {
149 int precharge = 10; /* 10-16 */
150 int preamble = 8;
151
152 /*
153 * We faced some glitches on Dell Precision 5490 MTL laptop with panel:
154 * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
155 * is fixing these problems with the panel. It is still within range
156 * mentioned in eDP specification. Increasing Fast Wake sync length is
157 * causing problems with other panels: increase length as a quirk for
158 * this specific laptop.
159 */
160 if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN))
161 precharge += 2;
162
163 return precharge + preamble;
164 }
165
g4x_dp_aux_precharge_len(void)166 static int g4x_dp_aux_precharge_len(void)
167 {
168 int precharge_min = 10;
169 int preamble = 16;
170
171 /* HW wants the length of the extra precharge in 2us units */
172 return (intel_dp_aux_sync_len() -
173 precharge_min - preamble) / 2;
174 }
175
g4x_get_aux_send_ctl(struct intel_dp * intel_dp,int send_bytes,u32 aux_clock_divider)176 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
177 int send_bytes,
178 u32 aux_clock_divider)
179 {
180 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
181 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
182 u32 timeout;
183
184 /* Max timeout value on G4x-BDW: 1.6ms */
185 if (IS_BROADWELL(i915))
186 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
187 else
188 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
189
190 return DP_AUX_CH_CTL_SEND_BUSY |
191 DP_AUX_CH_CTL_DONE |
192 DP_AUX_CH_CTL_INTERRUPT |
193 DP_AUX_CH_CTL_TIME_OUT_ERROR |
194 timeout |
195 DP_AUX_CH_CTL_RECEIVE_ERROR |
196 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
197 DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) |
198 DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider);
199 }
200
skl_get_aux_send_ctl(struct intel_dp * intel_dp,int send_bytes,u32 unused)201 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
202 int send_bytes,
203 u32 unused)
204 {
205 struct intel_display *display = to_intel_display(intel_dp);
206 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
207 u32 ret;
208
209 /*
210 * Max timeout values:
211 * SKL-GLK: 1.6ms
212 * ICL+: 4ms
213 */
214 ret = DP_AUX_CH_CTL_SEND_BUSY |
215 DP_AUX_CH_CTL_DONE |
216 DP_AUX_CH_CTL_INTERRUPT |
217 DP_AUX_CH_CTL_TIME_OUT_ERROR |
218 DP_AUX_CH_CTL_TIME_OUT_MAX |
219 DP_AUX_CH_CTL_RECEIVE_ERROR |
220 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
221 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) |
222 DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
223
224 if (intel_tc_port_in_tbt_alt_mode(dig_port))
225 ret |= DP_AUX_CH_CTL_TBT_IO;
226
227 /*
228 * Power request bit is already set during aux power well enable.
229 * Preserve the bit across aux transactions.
230 */
231 if (DISPLAY_VER(display) >= 14)
232 ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
233
234 return ret;
235 }
236
237 static int
intel_dp_aux_xfer(struct intel_dp * intel_dp,const u8 * send,int send_bytes,u8 * recv,int recv_size,u32 aux_send_ctl_flags)238 intel_dp_aux_xfer(struct intel_dp *intel_dp,
239 const u8 *send, int send_bytes,
240 u8 *recv, int recv_size,
241 u32 aux_send_ctl_flags)
242 {
243 struct intel_display *display = to_intel_display(intel_dp);
244 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
245 struct intel_encoder *encoder = &dig_port->base;
246 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
247 i915_reg_t ch_ctl, ch_data[5];
248 u32 aux_clock_divider;
249 enum intel_display_power_domain aux_domain;
250 intel_wakeref_t aux_wakeref;
251 intel_wakeref_t pps_wakeref;
252 int i, ret, recv_bytes;
253 int try, clock = 0;
254 u32 status;
255 bool vdd;
256
257 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
258 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
259 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
260
261 intel_digital_port_lock(encoder);
262 /*
263 * Abort transfers on a disconnected port as required by
264 * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
265 * timeouts that would otherwise happen.
266 */
267 if (!intel_dp_is_edp(intel_dp) &&
268 !intel_digital_port_connected_locked(&dig_port->base)) {
269 ret = -ENXIO;
270 goto out_unlock;
271 }
272
273 aux_domain = intel_aux_power_domain(dig_port);
274
275 aux_wakeref = intel_display_power_get(i915, aux_domain);
276 pps_wakeref = intel_pps_lock(intel_dp);
277
278 /*
279 * We will be called with VDD already enabled for dpcd/edid/oui reads.
280 * In such cases we want to leave VDD enabled and it's up to upper layers
281 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
282 * ourselves.
283 */
284 vdd = intel_pps_vdd_on_unlocked(intel_dp);
285
286 /*
287 * dp aux is extremely sensitive to irq latency, hence request the
288 * lowest possible wakeup latency and so prevent the cpu from going into
289 * deep sleep states.
290 */
291 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
292
293 intel_pps_check_power_unlocked(intel_dp);
294
295 /*
296 * FIXME PSR should be disabled here to prevent
297 * it using the same AUX CH simultaneously
298 */
299
300 /* Try to wait for any previous AUX channel activity */
301 for (try = 0; try < 3; try++) {
302 status = intel_de_read_notrace(display, ch_ctl);
303 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
304 break;
305 msleep(1);
306 }
307 /* just trace the final value */
308 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
309
310 if (try == 3) {
311 const u32 status = intel_de_read(display, ch_ctl);
312
313 if (status != intel_dp->aux_busy_last_status) {
314 drm_WARN(display->drm, 1,
315 "%s: not started (status 0x%08x)\n",
316 intel_dp->aux.name, status);
317 intel_dp->aux_busy_last_status = status;
318 }
319
320 ret = -EBUSY;
321 goto out;
322 }
323
324 /* Only 5 data registers! */
325 if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) {
326 ret = -E2BIG;
327 goto out;
328 }
329
330 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
331 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
332 send_bytes,
333 aux_clock_divider);
334
335 send_ctl |= aux_send_ctl_flags;
336
337 /* Must try at least 3 times according to DP spec */
338 for (try = 0; try < 5; try++) {
339 /* Load the send data into the aux channel data registers */
340 for (i = 0; i < send_bytes; i += 4)
341 intel_de_write(display, ch_data[i >> 2],
342 intel_dp_aux_pack(send + i,
343 send_bytes - i));
344
345 /* Send the command and wait for it to complete */
346 intel_de_write(display, ch_ctl, send_ctl);
347
348 status = intel_dp_aux_wait_done(intel_dp);
349
350 /* Clear done status and any errors */
351 intel_de_write(display, ch_ctl,
352 status | DP_AUX_CH_CTL_DONE |
353 DP_AUX_CH_CTL_TIME_OUT_ERROR |
354 DP_AUX_CH_CTL_RECEIVE_ERROR);
355
356 /*
357 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
358 * 400us delay required for errors and timeouts
359 * Timeout errors from the HW already meet this
360 * requirement so skip to next iteration
361 */
362 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
363 continue;
364
365 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
366 usleep_range(400, 500);
367 continue;
368 }
369 if (status & DP_AUX_CH_CTL_DONE)
370 goto done;
371 }
372 }
373
374 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
375 drm_err(display->drm, "%s: not done (status 0x%08x)\n",
376 intel_dp->aux.name, status);
377 ret = -EBUSY;
378 goto out;
379 }
380
381 done:
382 /*
383 * Check for timeout or receive error. Timeouts occur when the sink is
384 * not connected.
385 */
386 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
387 drm_err(display->drm, "%s: receive error (status 0x%08x)\n",
388 intel_dp->aux.name, status);
389 ret = -EIO;
390 goto out;
391 }
392
393 /*
394 * Timeouts occur when the device isn't connected, so they're "normal"
395 * -- don't fill the kernel log with these
396 */
397 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
398 drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n",
399 intel_dp->aux.name, status);
400 ret = -ETIMEDOUT;
401 goto out;
402 }
403
404 /* Unload any bytes sent back from the other side */
405 recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status);
406
407 /*
408 * By BSpec: "Message sizes of 0 or >20 are not allowed."
409 * We have no idea of what happened so we return -EBUSY so
410 * drm layer takes care for the necessary retries.
411 */
412 if (recv_bytes == 0 || recv_bytes > 20) {
413 drm_dbg_kms(display->drm,
414 "%s: Forbidden recv_bytes = %d on aux transaction\n",
415 intel_dp->aux.name, recv_bytes);
416 ret = -EBUSY;
417 goto out;
418 }
419
420 if (recv_bytes > recv_size)
421 recv_bytes = recv_size;
422
423 for (i = 0; i < recv_bytes; i += 4)
424 intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]),
425 recv + i, recv_bytes - i);
426
427 ret = recv_bytes;
428 out:
429 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
430
431 if (vdd)
432 intel_pps_vdd_off_unlocked(intel_dp, false);
433
434 intel_pps_unlock(intel_dp, pps_wakeref);
435 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
436 out_unlock:
437 intel_digital_port_unlock(encoder);
438
439 return ret;
440 }
441
442 #define BARE_ADDRESS_SIZE 3
443 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
444
445 static void
intel_dp_aux_header(u8 txbuf[HEADER_SIZE],const struct drm_dp_aux_msg * msg)446 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
447 const struct drm_dp_aux_msg *msg)
448 {
449 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
450 txbuf[1] = (msg->address >> 8) & 0xff;
451 txbuf[2] = msg->address & 0xff;
452 txbuf[3] = msg->size - 1;
453 }
454
intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg * msg)455 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
456 {
457 /*
458 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
459 * select bit to inform the hardware to send the Aksv after our header
460 * since we can't access that data from software.
461 */
462 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
463 msg->address == DP_AUX_HDCP_AKSV)
464 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
465
466 return 0;
467 }
468
469 static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)470 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
471 {
472 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
473 struct intel_display *display = to_intel_display(intel_dp);
474 u8 txbuf[20], rxbuf[20];
475 size_t txsize, rxsize;
476 u32 flags = intel_dp_aux_xfer_flags(msg);
477 int ret;
478
479 intel_dp_aux_header(txbuf, msg);
480
481 switch (msg->request & ~DP_AUX_I2C_MOT) {
482 case DP_AUX_NATIVE_WRITE:
483 case DP_AUX_I2C_WRITE:
484 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
485 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
486 rxsize = 2; /* 0 or 1 data bytes */
487
488 if (drm_WARN_ON(display->drm, txsize > 20))
489 return -E2BIG;
490
491 drm_WARN_ON(display->drm, !msg->buffer != !msg->size);
492
493 if (msg->buffer)
494 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
495
496 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
497 rxbuf, rxsize, flags);
498 if (ret > 0) {
499 msg->reply = rxbuf[0] >> 4;
500
501 if (ret > 1) {
502 /* Number of bytes written in a short write. */
503 ret = clamp_t(int, rxbuf[1], 0, msg->size);
504 } else {
505 /* Return payload size. */
506 ret = msg->size;
507 }
508 }
509 break;
510
511 case DP_AUX_NATIVE_READ:
512 case DP_AUX_I2C_READ:
513 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
514 rxsize = msg->size + 1;
515
516 if (drm_WARN_ON(display->drm, rxsize > 20))
517 return -E2BIG;
518
519 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
520 rxbuf, rxsize, flags);
521 if (ret > 0) {
522 msg->reply = rxbuf[0] >> 4;
523 /*
524 * Assume happy day, and copy the data. The caller is
525 * expected to check msg->reply before touching it.
526 *
527 * Return payload size.
528 */
529 ret--;
530 memcpy(msg->buffer, rxbuf + 1, ret);
531 }
532 break;
533
534 default:
535 ret = -EINVAL;
536 break;
537 }
538
539 return ret;
540 }
541
vlv_aux_ctl_reg(struct intel_dp * intel_dp)542 static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
543 {
544 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
545 enum aux_ch aux_ch = dig_port->aux_ch;
546
547 switch (aux_ch) {
548 case AUX_CH_B:
549 case AUX_CH_C:
550 case AUX_CH_D:
551 return VLV_DP_AUX_CH_CTL(aux_ch);
552 default:
553 MISSING_CASE(aux_ch);
554 return VLV_DP_AUX_CH_CTL(AUX_CH_B);
555 }
556 }
557
vlv_aux_data_reg(struct intel_dp * intel_dp,int index)558 static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
559 {
560 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
561 enum aux_ch aux_ch = dig_port->aux_ch;
562
563 switch (aux_ch) {
564 case AUX_CH_B:
565 case AUX_CH_C:
566 case AUX_CH_D:
567 return VLV_DP_AUX_CH_DATA(aux_ch, index);
568 default:
569 MISSING_CASE(aux_ch);
570 return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
571 }
572 }
573
g4x_aux_ctl_reg(struct intel_dp * intel_dp)574 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
575 {
576 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
577 enum aux_ch aux_ch = dig_port->aux_ch;
578
579 switch (aux_ch) {
580 case AUX_CH_B:
581 case AUX_CH_C:
582 case AUX_CH_D:
583 return DP_AUX_CH_CTL(aux_ch);
584 default:
585 MISSING_CASE(aux_ch);
586 return DP_AUX_CH_CTL(AUX_CH_B);
587 }
588 }
589
g4x_aux_data_reg(struct intel_dp * intel_dp,int index)590 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
591 {
592 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
593 enum aux_ch aux_ch = dig_port->aux_ch;
594
595 switch (aux_ch) {
596 case AUX_CH_B:
597 case AUX_CH_C:
598 case AUX_CH_D:
599 return DP_AUX_CH_DATA(aux_ch, index);
600 default:
601 MISSING_CASE(aux_ch);
602 return DP_AUX_CH_DATA(AUX_CH_B, index);
603 }
604 }
605
ilk_aux_ctl_reg(struct intel_dp * intel_dp)606 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
607 {
608 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
609 enum aux_ch aux_ch = dig_port->aux_ch;
610
611 switch (aux_ch) {
612 case AUX_CH_A:
613 return DP_AUX_CH_CTL(aux_ch);
614 case AUX_CH_B:
615 case AUX_CH_C:
616 case AUX_CH_D:
617 return PCH_DP_AUX_CH_CTL(aux_ch);
618 default:
619 MISSING_CASE(aux_ch);
620 return DP_AUX_CH_CTL(AUX_CH_A);
621 }
622 }
623
ilk_aux_data_reg(struct intel_dp * intel_dp,int index)624 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
625 {
626 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
627 enum aux_ch aux_ch = dig_port->aux_ch;
628
629 switch (aux_ch) {
630 case AUX_CH_A:
631 return DP_AUX_CH_DATA(aux_ch, index);
632 case AUX_CH_B:
633 case AUX_CH_C:
634 case AUX_CH_D:
635 return PCH_DP_AUX_CH_DATA(aux_ch, index);
636 default:
637 MISSING_CASE(aux_ch);
638 return DP_AUX_CH_DATA(AUX_CH_A, index);
639 }
640 }
641
skl_aux_ctl_reg(struct intel_dp * intel_dp)642 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
643 {
644 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
645 enum aux_ch aux_ch = dig_port->aux_ch;
646
647 switch (aux_ch) {
648 case AUX_CH_A:
649 case AUX_CH_B:
650 case AUX_CH_C:
651 case AUX_CH_D:
652 case AUX_CH_E:
653 case AUX_CH_F:
654 return DP_AUX_CH_CTL(aux_ch);
655 default:
656 MISSING_CASE(aux_ch);
657 return DP_AUX_CH_CTL(AUX_CH_A);
658 }
659 }
660
skl_aux_data_reg(struct intel_dp * intel_dp,int index)661 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
662 {
663 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
664 enum aux_ch aux_ch = dig_port->aux_ch;
665
666 switch (aux_ch) {
667 case AUX_CH_A:
668 case AUX_CH_B:
669 case AUX_CH_C:
670 case AUX_CH_D:
671 case AUX_CH_E:
672 case AUX_CH_F:
673 return DP_AUX_CH_DATA(aux_ch, index);
674 default:
675 MISSING_CASE(aux_ch);
676 return DP_AUX_CH_DATA(AUX_CH_A, index);
677 }
678 }
679
tgl_aux_ctl_reg(struct intel_dp * intel_dp)680 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
681 {
682 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
683 enum aux_ch aux_ch = dig_port->aux_ch;
684
685 switch (aux_ch) {
686 case AUX_CH_A:
687 case AUX_CH_B:
688 case AUX_CH_C:
689 case AUX_CH_USBC1:
690 case AUX_CH_USBC2:
691 case AUX_CH_USBC3:
692 case AUX_CH_USBC4:
693 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
694 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
695 return DP_AUX_CH_CTL(aux_ch);
696 default:
697 MISSING_CASE(aux_ch);
698 return DP_AUX_CH_CTL(AUX_CH_A);
699 }
700 }
701
tgl_aux_data_reg(struct intel_dp * intel_dp,int index)702 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
703 {
704 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
705 enum aux_ch aux_ch = dig_port->aux_ch;
706
707 switch (aux_ch) {
708 case AUX_CH_A:
709 case AUX_CH_B:
710 case AUX_CH_C:
711 case AUX_CH_USBC1:
712 case AUX_CH_USBC2:
713 case AUX_CH_USBC3:
714 case AUX_CH_USBC4:
715 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
716 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
717 return DP_AUX_CH_DATA(aux_ch, index);
718 default:
719 MISSING_CASE(aux_ch);
720 return DP_AUX_CH_DATA(AUX_CH_A, index);
721 }
722 }
723
xelpdp_aux_ctl_reg(struct intel_dp * intel_dp)724 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
725 {
726 struct intel_display *display = to_intel_display(intel_dp);
727 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
728 enum aux_ch aux_ch = dig_port->aux_ch;
729
730 switch (aux_ch) {
731 case AUX_CH_A:
732 case AUX_CH_B:
733 case AUX_CH_USBC1:
734 case AUX_CH_USBC2:
735 case AUX_CH_USBC3:
736 case AUX_CH_USBC4:
737 return XELPDP_DP_AUX_CH_CTL(display, aux_ch);
738 default:
739 MISSING_CASE(aux_ch);
740 return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A);
741 }
742 }
743
xelpdp_aux_data_reg(struct intel_dp * intel_dp,int index)744 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
745 {
746 struct intel_display *display = to_intel_display(intel_dp);
747 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
748 enum aux_ch aux_ch = dig_port->aux_ch;
749
750 switch (aux_ch) {
751 case AUX_CH_A:
752 case AUX_CH_B:
753 case AUX_CH_USBC1:
754 case AUX_CH_USBC2:
755 case AUX_CH_USBC3:
756 case AUX_CH_USBC4:
757 return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index);
758 default:
759 MISSING_CASE(aux_ch);
760 return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index);
761 }
762 }
763
intel_dp_aux_fini(struct intel_dp * intel_dp)764 void intel_dp_aux_fini(struct intel_dp *intel_dp)
765 {
766 if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
767 cpu_latency_qos_remove_request(&intel_dp->pm_qos);
768
769 kfree(intel_dp->aux.name);
770 }
771
intel_dp_aux_init(struct intel_dp * intel_dp)772 void intel_dp_aux_init(struct intel_dp *intel_dp)
773 {
774 struct intel_display *display = to_intel_display(intel_dp);
775 struct drm_i915_private *i915 = to_i915(display->drm);
776 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
777 struct intel_encoder *encoder = &dig_port->base;
778 enum aux_ch aux_ch = dig_port->aux_ch;
779 char buf[AUX_CH_NAME_BUFSIZE];
780
781 if (DISPLAY_VER(display) >= 14) {
782 intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
783 intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
784 } else if (DISPLAY_VER(display) >= 12) {
785 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
786 intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
787 } else if (DISPLAY_VER(display) >= 9) {
788 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
789 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
790 } else if (HAS_PCH_SPLIT(i915)) {
791 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
792 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
793 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
794 intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
795 intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
796 } else {
797 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
798 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
799 }
800
801 if (DISPLAY_VER(display) >= 9)
802 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
803 else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
804 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
805 else if (HAS_PCH_SPLIT(i915))
806 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
807 else
808 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
809
810 if (DISPLAY_VER(display) >= 9)
811 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
812 else
813 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
814
815 intel_dp->aux.drm_dev = display->drm;
816 drm_dp_aux_init(&intel_dp->aux);
817
818 /* Failure to allocate our preferred name is not critical */
819 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
820 aux_ch_name(display, buf, sizeof(buf), aux_ch),
821 encoder->base.name);
822
823 intel_dp->aux.transfer = intel_dp_aux_transfer;
824 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
825 }
826
default_aux_ch(struct intel_encoder * encoder)827 static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
828 {
829 struct intel_display *display = to_intel_display(encoder);
830
831 /* SKL has DDI E but no AUX E */
832 if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E)
833 return AUX_CH_A;
834
835 return (enum aux_ch)encoder->port;
836 }
837
838 static struct intel_encoder *
get_encoder_by_aux_ch(struct intel_encoder * encoder,enum aux_ch aux_ch)839 get_encoder_by_aux_ch(struct intel_encoder *encoder,
840 enum aux_ch aux_ch)
841 {
842 struct intel_display *display = to_intel_display(encoder);
843 struct intel_encoder *other;
844
845 for_each_intel_encoder(display->drm, other) {
846 if (other == encoder)
847 continue;
848
849 if (!intel_encoder_is_dig_port(other))
850 continue;
851
852 if (enc_to_dig_port(other)->aux_ch == aux_ch)
853 return other;
854 }
855
856 return NULL;
857 }
858
intel_dp_aux_ch(struct intel_encoder * encoder)859 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
860 {
861 struct intel_display *display = to_intel_display(encoder);
862 struct intel_encoder *other;
863 const char *source;
864 enum aux_ch aux_ch;
865 char buf[AUX_CH_NAME_BUFSIZE];
866
867 aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
868 source = "VBT";
869
870 if (aux_ch == AUX_CH_NONE) {
871 aux_ch = default_aux_ch(encoder);
872 source = "platform default";
873 }
874
875 if (aux_ch == AUX_CH_NONE)
876 return AUX_CH_NONE;
877
878 /* FIXME validate aux_ch against platform caps */
879
880 other = get_encoder_by_aux_ch(encoder, aux_ch);
881 if (other) {
882 drm_dbg_kms(display->drm,
883 "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
884 encoder->base.base.id, encoder->base.name,
885 aux_ch_name(display, buf, sizeof(buf), aux_ch),
886 other->base.base.id, other->base.name);
887 return AUX_CH_NONE;
888 }
889
890 drm_dbg_kms(display->drm,
891 "[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
892 encoder->base.base.id, encoder->base.name,
893 aux_ch_name(display, buf, sizeof(buf), aux_ch), source);
894
895 return aux_ch;
896 }
897
intel_dp_aux_irq_handler(struct intel_display * display)898 void intel_dp_aux_irq_handler(struct intel_display *display)
899 {
900 wake_up_all(&display->gmbus.wait_queue);
901 }
902