xref: /linux/drivers/gpu/drm/msm/dp/dp_aux.c (revision 222408cde4d0ab17e54d4db26751c2b5cab9ac2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/phy/phy.h>
8 #include <drm/drm_print.h>
9 
10 #include "dp_reg.h"
11 #include "dp_aux.h"
12 
13 enum msm_dp_aux_err {
14 	DP_AUX_ERR_NONE,
15 	DP_AUX_ERR_ADDR,
16 	DP_AUX_ERR_TOUT,
17 	DP_AUX_ERR_NACK,
18 	DP_AUX_ERR_DEFER,
19 	DP_AUX_ERR_NACK_DEFER,
20 	DP_AUX_ERR_PHY,
21 };
22 
23 struct dp_aux_private {
24 	struct device *dev;
25 	struct dp_catalog *catalog;
26 
27 	struct phy *phy;
28 
29 	struct mutex mutex;
30 	struct completion comp;
31 
32 	enum msm_dp_aux_err aux_error_num;
33 	u32 retry_cnt;
34 	bool cmd_busy;
35 	bool native;
36 	bool read;
37 	bool no_send_addr;
38 	bool no_send_stop;
39 	bool initted;
40 	bool is_edp;
41 	u32 offset;
42 	u32 segment;
43 
44 	struct drm_dp_aux dp_aux;
45 };
46 
47 #define MAX_AUX_RETRIES			5
48 
49 static ssize_t dp_aux_write(struct dp_aux_private *aux,
50 			struct drm_dp_aux_msg *msg)
51 {
52 	u8 data[4];
53 	u32 reg;
54 	ssize_t len;
55 	u8 *msgdata = msg->buffer;
56 	int const AUX_CMD_FIFO_LEN = 128;
57 	int i = 0;
58 
59 	if (aux->read)
60 		len = 0;
61 	else
62 		len = msg->size;
63 
64 	/*
65 	 * cmd fifo only has depth of 144 bytes
66 	 * limit buf length to 128 bytes here
67 	 */
68 	if (len > AUX_CMD_FIFO_LEN - 4) {
69 		DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
70 		return -EINVAL;
71 	}
72 
73 	/* Pack cmd and write to HW */
74 	data[0] = (msg->address >> 16) & 0xf;	/* addr[19:16] */
75 	if (aux->read)
76 		data[0] |=  BIT(4);		/* R/W */
77 
78 	data[1] = msg->address >> 8;		/* addr[15:8] */
79 	data[2] = msg->address;			/* addr[7:0] */
80 	data[3] = msg->size - 1;		/* len[7:0] */
81 
82 	for (i = 0; i < len + 4; i++) {
83 		reg = (i < 4) ? data[i] : msgdata[i - 4];
84 		reg <<= DP_AUX_DATA_OFFSET;
85 		reg &= DP_AUX_DATA_MASK;
86 		reg |= DP_AUX_DATA_WRITE;
87 		/* index = 0, write */
88 		if (i == 0)
89 			reg |= DP_AUX_DATA_INDEX_WRITE;
90 		aux->catalog->aux_data = reg;
91 		dp_catalog_aux_write_data(aux->catalog);
92 	}
93 
94 	dp_catalog_aux_clear_trans(aux->catalog, false);
95 	dp_catalog_aux_clear_hw_interrupts(aux->catalog);
96 
97 	reg = 0; /* Transaction number == 1 */
98 	if (!aux->native) { /* i2c */
99 		reg |= DP_AUX_TRANS_CTRL_I2C;
100 
101 		if (aux->no_send_addr)
102 			reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
103 
104 		if (aux->no_send_stop)
105 			reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
106 	}
107 
108 	reg |= DP_AUX_TRANS_CTRL_GO;
109 	aux->catalog->aux_data = reg;
110 	dp_catalog_aux_write_trans(aux->catalog);
111 
112 	return len;
113 }
114 
115 static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
116 			      struct drm_dp_aux_msg *msg)
117 {
118 	ssize_t ret;
119 	unsigned long time_left;
120 
121 	reinit_completion(&aux->comp);
122 
123 	ret = dp_aux_write(aux, msg);
124 	if (ret < 0)
125 		return ret;
126 
127 	time_left = wait_for_completion_timeout(&aux->comp,
128 						msecs_to_jiffies(250));
129 	if (!time_left)
130 		return -ETIMEDOUT;
131 
132 	return ret;
133 }
134 
135 static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
136 		struct drm_dp_aux_msg *msg)
137 {
138 	u32 data;
139 	u8 *dp;
140 	u32 i, actual_i;
141 	u32 len = msg->size;
142 
143 	dp_catalog_aux_clear_trans(aux->catalog, true);
144 
145 	data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
146 	data |= DP_AUX_DATA_READ;  /* read */
147 
148 	aux->catalog->aux_data = data;
149 	dp_catalog_aux_write_data(aux->catalog);
150 
151 	dp = msg->buffer;
152 
153 	/* discard first byte */
154 	data = dp_catalog_aux_read_data(aux->catalog);
155 
156 	for (i = 0; i < len; i++) {
157 		data = dp_catalog_aux_read_data(aux->catalog);
158 		*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
159 
160 		actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
161 		if (i != actual_i)
162 			break;
163 	}
164 
165 	return i;
166 }
167 
168 static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
169 					     struct drm_dp_aux_msg *input_msg)
170 {
171 	u32 edid_address = 0x50;
172 	u32 segment_address = 0x30;
173 	bool i2c_read = input_msg->request &
174 		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
175 	u8 *data;
176 
177 	if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
178 		(input_msg->address != segment_address)))
179 		return;
180 
181 
182 	data = input_msg->buffer;
183 	if (input_msg->address == segment_address)
184 		aux->segment = *data;
185 	else
186 		aux->offset = *data;
187 }
188 
189 /**
190  * dp_aux_transfer_helper() - helper function for EDID read transactions
191  *
192  * @aux: DP AUX private structure
193  * @input_msg: input message from DRM upstream APIs
194  * @send_seg: send the segment to sink
195  *
196  * return: void
197  *
198  * This helper function is used to fix EDID reads for non-compliant
199  * sinks that do not handle the i2c middle-of-transaction flag correctly.
200  */
201 static void dp_aux_transfer_helper(struct dp_aux_private *aux,
202 				   struct drm_dp_aux_msg *input_msg,
203 				   bool send_seg)
204 {
205 	struct drm_dp_aux_msg helper_msg;
206 	u32 message_size = 0x10;
207 	u32 segment_address = 0x30;
208 	u32 const edid_block_length = 0x80;
209 	bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
210 	bool i2c_read = input_msg->request &
211 		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
212 
213 	if (!i2c_mot || !i2c_read || (input_msg->size == 0))
214 		return;
215 
216 	/*
217 	 * Sending the segment value and EDID offset will be performed
218 	 * from the DRM upstream EDID driver for each block. Avoid
219 	 * duplicate AUX transactions related to this while reading the
220 	 * first 16 bytes of each block.
221 	 */
222 	if (!(aux->offset % edid_block_length) || !send_seg)
223 		goto end;
224 
225 	aux->read = false;
226 	aux->cmd_busy = true;
227 	aux->no_send_addr = true;
228 	aux->no_send_stop = true;
229 
230 	/*
231 	 * Send the segment address for every i2c read in which the
232 	 * middle-of-tranaction flag is set. This is required to support EDID
233 	 * reads of more than 2 blocks as the segment address is reset to 0
234 	 * since we are overriding the middle-of-transaction flag for read
235 	 * transactions.
236 	 */
237 
238 	if (aux->segment) {
239 		memset(&helper_msg, 0, sizeof(helper_msg));
240 		helper_msg.address = segment_address;
241 		helper_msg.buffer = &aux->segment;
242 		helper_msg.size = 1;
243 		dp_aux_cmd_fifo_tx(aux, &helper_msg);
244 	}
245 
246 	/*
247 	 * Send the offset address for every i2c read in which the
248 	 * middle-of-transaction flag is set. This will ensure that the sink
249 	 * will update its read pointer and return the correct portion of the
250 	 * EDID buffer in the subsequent i2c read trasntion triggered in the
251 	 * native AUX transfer function.
252 	 */
253 	memset(&helper_msg, 0, sizeof(helper_msg));
254 	helper_msg.address = input_msg->address;
255 	helper_msg.buffer = &aux->offset;
256 	helper_msg.size = 1;
257 	dp_aux_cmd_fifo_tx(aux, &helper_msg);
258 
259 end:
260 	aux->offset += message_size;
261 	if (aux->offset == 0x80 || aux->offset == 0x100)
262 		aux->segment = 0x0; /* reset segment at end of block */
263 }
264 
265 /*
266  * This function does the real job to process an AUX transaction.
267  * It will call aux_reset() function to reset the AUX channel,
268  * if the waiting is timeout.
269  */
270 static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
271 			       struct drm_dp_aux_msg *msg)
272 {
273 	ssize_t ret;
274 	int const aux_cmd_native_max = 16;
275 	int const aux_cmd_i2c_max = 128;
276 	struct dp_aux_private *aux;
277 
278 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
279 
280 	aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
281 
282 	/* Ignore address only message */
283 	if (msg->size == 0 || !msg->buffer) {
284 		msg->reply = aux->native ?
285 			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
286 		return msg->size;
287 	}
288 
289 	/* msg sanity check */
290 	if ((aux->native && msg->size > aux_cmd_native_max) ||
291 	    msg->size > aux_cmd_i2c_max) {
292 		DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
293 			__func__, msg->size, msg->request);
294 		return -EINVAL;
295 	}
296 
297 	ret = pm_runtime_resume_and_get(dp_aux->dev);
298 	if (ret)
299 		return  ret;
300 
301 	mutex_lock(&aux->mutex);
302 	if (!aux->initted) {
303 		ret = -EIO;
304 		goto exit;
305 	}
306 
307 	/*
308 	 * For eDP it's important to give a reasonably long wait here for HPD
309 	 * to be asserted. This is because the panel driver may have _just_
310 	 * turned on the panel and then tried to do an AUX transfer. The panel
311 	 * driver has no way of knowing when the panel is ready, so it's up
312 	 * to us to wait. For DP we never get into this situation so let's
313 	 * avoid ever doing the extra long wait for DP.
314 	 */
315 	if (aux->is_edp) {
316 		ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
317 		if (ret) {
318 			DRM_DEBUG_DP("Panel not ready for aux transactions\n");
319 			goto exit;
320 		}
321 	}
322 
323 	dp_aux_update_offset_and_segment(aux, msg);
324 	dp_aux_transfer_helper(aux, msg, true);
325 
326 	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
327 	aux->cmd_busy = true;
328 
329 	if (aux->read) {
330 		aux->no_send_addr = true;
331 		aux->no_send_stop = false;
332 	} else {
333 		aux->no_send_addr = true;
334 		aux->no_send_stop = true;
335 	}
336 
337 	ret = dp_aux_cmd_fifo_tx(aux, msg);
338 	if (ret < 0) {
339 		if (aux->native) {
340 			aux->retry_cnt++;
341 			if (!(aux->retry_cnt % MAX_AUX_RETRIES))
342 				phy_calibrate(aux->phy);
343 		}
344 		/* reset aux if link is in connected state */
345 		if (dp_catalog_link_is_connected(aux->catalog))
346 			dp_catalog_aux_reset(aux->catalog);
347 	} else {
348 		aux->retry_cnt = 0;
349 		switch (aux->aux_error_num) {
350 		case DP_AUX_ERR_NONE:
351 			if (aux->read)
352 				ret = dp_aux_cmd_fifo_rx(aux, msg);
353 			msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
354 			break;
355 		case DP_AUX_ERR_DEFER:
356 			msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
357 			break;
358 		case DP_AUX_ERR_PHY:
359 		case DP_AUX_ERR_ADDR:
360 		case DP_AUX_ERR_NACK:
361 		case DP_AUX_ERR_NACK_DEFER:
362 			msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_NACK : DP_AUX_I2C_REPLY_NACK;
363 			break;
364 		case DP_AUX_ERR_TOUT:
365 			ret = -ETIMEDOUT;
366 			break;
367 		}
368 	}
369 
370 	aux->cmd_busy = false;
371 
372 exit:
373 	mutex_unlock(&aux->mutex);
374 	pm_runtime_put_sync(dp_aux->dev);
375 
376 	return ret;
377 }
378 
379 irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
380 {
381 	u32 isr;
382 	struct dp_aux_private *aux;
383 
384 	if (!dp_aux) {
385 		DRM_ERROR("invalid input\n");
386 		return IRQ_NONE;
387 	}
388 
389 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
390 
391 	isr = dp_catalog_aux_get_irq(aux->catalog);
392 
393 	/* no interrupts pending, return immediately */
394 	if (!isr)
395 		return IRQ_NONE;
396 
397 	if (!aux->cmd_busy) {
398 		DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
399 		return IRQ_NONE;
400 	}
401 
402 	/*
403 	 * The logic below assumes only one error bit is set (other than "done"
404 	 * which can apparently be set at the same time as some of the other
405 	 * bits). Warn if more than one get set so we know we need to improve
406 	 * the logic.
407 	 */
408 	if (hweight32(isr & ~DP_INTR_AUX_XFER_DONE) > 1)
409 		DRM_WARN("Some DP AUX interrupts unhandled: %#010x\n", isr);
410 
411 	if (isr & DP_INTR_AUX_ERROR) {
412 		aux->aux_error_num = DP_AUX_ERR_PHY;
413 		dp_catalog_aux_clear_hw_interrupts(aux->catalog);
414 	} else if (isr & DP_INTR_NACK_DEFER) {
415 		aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
416 	} else if (isr & DP_INTR_WRONG_ADDR) {
417 		aux->aux_error_num = DP_AUX_ERR_ADDR;
418 	} else if (isr & DP_INTR_TIMEOUT) {
419 		aux->aux_error_num = DP_AUX_ERR_TOUT;
420 	} else if (!aux->native && (isr & DP_INTR_I2C_NACK)) {
421 		aux->aux_error_num = DP_AUX_ERR_NACK;
422 	} else if (!aux->native && (isr & DP_INTR_I2C_DEFER)) {
423 		if (isr & DP_INTR_AUX_XFER_DONE)
424 			aux->aux_error_num = DP_AUX_ERR_NACK;
425 		else
426 			aux->aux_error_num = DP_AUX_ERR_DEFER;
427 	} else if (isr & DP_INTR_AUX_XFER_DONE) {
428 		aux->aux_error_num = DP_AUX_ERR_NONE;
429 	} else {
430 		DRM_WARN("Unexpected interrupt: %#010x\n", isr);
431 		return IRQ_NONE;
432 	}
433 
434 	complete(&aux->comp);
435 
436 	return IRQ_HANDLED;
437 }
438 
439 void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
440 {
441 	struct dp_aux_private *aux;
442 
443 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
444 
445 	phy_calibrate(aux->phy);
446 	dp_catalog_aux_reset(aux->catalog);
447 }
448 
449 void dp_aux_init(struct drm_dp_aux *dp_aux)
450 {
451 	struct dp_aux_private *aux;
452 
453 	if (!dp_aux) {
454 		DRM_ERROR("invalid input\n");
455 		return;
456 	}
457 
458 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
459 
460 	mutex_lock(&aux->mutex);
461 
462 	dp_catalog_aux_enable(aux->catalog, true);
463 	aux->retry_cnt = 0;
464 	aux->initted = true;
465 
466 	mutex_unlock(&aux->mutex);
467 }
468 
469 void dp_aux_deinit(struct drm_dp_aux *dp_aux)
470 {
471 	struct dp_aux_private *aux;
472 
473 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
474 
475 	mutex_lock(&aux->mutex);
476 
477 	aux->initted = false;
478 	dp_catalog_aux_enable(aux->catalog, false);
479 
480 	mutex_unlock(&aux->mutex);
481 }
482 
483 int dp_aux_register(struct drm_dp_aux *dp_aux)
484 {
485 	int ret;
486 
487 	if (!dp_aux) {
488 		DRM_ERROR("invalid input\n");
489 		return -EINVAL;
490 	}
491 
492 	ret = drm_dp_aux_register(dp_aux);
493 	if (ret) {
494 		DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
495 				ret);
496 		return ret;
497 	}
498 
499 	return 0;
500 }
501 
502 void dp_aux_unregister(struct drm_dp_aux *dp_aux)
503 {
504 	drm_dp_aux_unregister(dp_aux);
505 }
506 
507 static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
508 				 unsigned long wait_us)
509 {
510 	int ret;
511 	struct dp_aux_private *aux;
512 
513 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
514 
515 	pm_runtime_get_sync(aux->dev);
516 	ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
517 	pm_runtime_put_sync(aux->dev);
518 
519 	return ret;
520 }
521 
522 struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
523 			      struct phy *phy,
524 			      bool is_edp)
525 {
526 	struct dp_aux_private *aux;
527 
528 	if (!catalog) {
529 		DRM_ERROR("invalid input\n");
530 		return ERR_PTR(-ENODEV);
531 	}
532 
533 	aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
534 	if (!aux)
535 		return ERR_PTR(-ENOMEM);
536 
537 	init_completion(&aux->comp);
538 	aux->cmd_busy = false;
539 	aux->is_edp = is_edp;
540 	mutex_init(&aux->mutex);
541 
542 	aux->dev = dev;
543 	aux->catalog = catalog;
544 	aux->phy = phy;
545 	aux->retry_cnt = 0;
546 
547 	/*
548 	 * Use the drm_dp_aux_init() to use the aux adapter
549 	 * before registering AUX with the DRM device so that
550 	 * msm eDP panel can be detected by generic_dep_panel_probe().
551 	 */
552 	aux->dp_aux.name = "dpu_dp_aux";
553 	aux->dp_aux.dev = dev;
554 	aux->dp_aux.transfer = dp_aux_transfer;
555 	aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted;
556 	drm_dp_aux_init(&aux->dp_aux);
557 
558 	return &aux->dp_aux;
559 }
560 
561 void dp_aux_put(struct drm_dp_aux *dp_aux)
562 {
563 	struct dp_aux_private *aux;
564 
565 	if (!dp_aux)
566 		return;
567 
568 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
569 
570 	mutex_destroy(&aux->mutex);
571 
572 	devm_kfree(aux->dev, aux);
573 }
574