xref: /linux/drivers/gpu/drm/msm/dp/dp_aux.c (revision 8886640dade4ae2595fcdce511c8bcc716aa47d3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 #include <drm/drm_print.h>
8 
9 #include "dp_reg.h"
10 #include "dp_aux.h"
11 
12 enum msm_dp_aux_err {
13 	DP_AUX_ERR_NONE,
14 	DP_AUX_ERR_ADDR,
15 	DP_AUX_ERR_TOUT,
16 	DP_AUX_ERR_NACK,
17 	DP_AUX_ERR_DEFER,
18 	DP_AUX_ERR_NACK_DEFER,
19 	DP_AUX_ERR_PHY,
20 };
21 
22 struct dp_aux_private {
23 	struct device *dev;
24 	struct dp_catalog *catalog;
25 
26 	struct mutex mutex;
27 	struct completion comp;
28 
29 	enum msm_dp_aux_err aux_error_num;
30 	u32 retry_cnt;
31 	bool cmd_busy;
32 	bool native;
33 	bool read;
34 	bool no_send_addr;
35 	bool no_send_stop;
36 	bool initted;
37 	bool is_edp;
38 	u32 offset;
39 	u32 segment;
40 
41 	struct drm_dp_aux dp_aux;
42 };
43 
44 #define MAX_AUX_RETRIES			5
45 
46 static ssize_t dp_aux_write(struct dp_aux_private *aux,
47 			struct drm_dp_aux_msg *msg)
48 {
49 	u8 data[4];
50 	u32 reg;
51 	ssize_t len;
52 	u8 *msgdata = msg->buffer;
53 	int const AUX_CMD_FIFO_LEN = 128;
54 	int i = 0;
55 
56 	if (aux->read)
57 		len = 0;
58 	else
59 		len = msg->size;
60 
61 	/*
62 	 * cmd fifo only has depth of 144 bytes
63 	 * limit buf length to 128 bytes here
64 	 */
65 	if (len > AUX_CMD_FIFO_LEN - 4) {
66 		DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
67 		return -EINVAL;
68 	}
69 
70 	/* Pack cmd and write to HW */
71 	data[0] = (msg->address >> 16) & 0xf;	/* addr[19:16] */
72 	if (aux->read)
73 		data[0] |=  BIT(4);		/* R/W */
74 
75 	data[1] = msg->address >> 8;		/* addr[15:8] */
76 	data[2] = msg->address;			/* addr[7:0] */
77 	data[3] = msg->size - 1;		/* len[7:0] */
78 
79 	for (i = 0; i < len + 4; i++) {
80 		reg = (i < 4) ? data[i] : msgdata[i - 4];
81 		reg <<= DP_AUX_DATA_OFFSET;
82 		reg &= DP_AUX_DATA_MASK;
83 		reg |= DP_AUX_DATA_WRITE;
84 		/* index = 0, write */
85 		if (i == 0)
86 			reg |= DP_AUX_DATA_INDEX_WRITE;
87 		aux->catalog->aux_data = reg;
88 		dp_catalog_aux_write_data(aux->catalog);
89 	}
90 
91 	dp_catalog_aux_clear_trans(aux->catalog, false);
92 	dp_catalog_aux_clear_hw_interrupts(aux->catalog);
93 
94 	reg = 0; /* Transaction number == 1 */
95 	if (!aux->native) { /* i2c */
96 		reg |= DP_AUX_TRANS_CTRL_I2C;
97 
98 		if (aux->no_send_addr)
99 			reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
100 
101 		if (aux->no_send_stop)
102 			reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
103 	}
104 
105 	reg |= DP_AUX_TRANS_CTRL_GO;
106 	aux->catalog->aux_data = reg;
107 	dp_catalog_aux_write_trans(aux->catalog);
108 
109 	return len;
110 }
111 
112 static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
113 			      struct drm_dp_aux_msg *msg)
114 {
115 	ssize_t ret;
116 	unsigned long time_left;
117 
118 	reinit_completion(&aux->comp);
119 
120 	ret = dp_aux_write(aux, msg);
121 	if (ret < 0)
122 		return ret;
123 
124 	time_left = wait_for_completion_timeout(&aux->comp,
125 						msecs_to_jiffies(250));
126 	if (!time_left)
127 		return -ETIMEDOUT;
128 
129 	return ret;
130 }
131 
132 static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
133 		struct drm_dp_aux_msg *msg)
134 {
135 	u32 data;
136 	u8 *dp;
137 	u32 i, actual_i;
138 	u32 len = msg->size;
139 
140 	dp_catalog_aux_clear_trans(aux->catalog, true);
141 
142 	data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
143 	data |= DP_AUX_DATA_READ;  /* read */
144 
145 	aux->catalog->aux_data = data;
146 	dp_catalog_aux_write_data(aux->catalog);
147 
148 	dp = msg->buffer;
149 
150 	/* discard first byte */
151 	data = dp_catalog_aux_read_data(aux->catalog);
152 
153 	for (i = 0; i < len; i++) {
154 		data = dp_catalog_aux_read_data(aux->catalog);
155 		*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
156 
157 		actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
158 		if (i != actual_i)
159 			break;
160 	}
161 
162 	return i;
163 }
164 
165 static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
166 					     struct drm_dp_aux_msg *input_msg)
167 {
168 	u32 edid_address = 0x50;
169 	u32 segment_address = 0x30;
170 	bool i2c_read = input_msg->request &
171 		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
172 	u8 *data;
173 
174 	if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
175 		(input_msg->address != segment_address)))
176 		return;
177 
178 
179 	data = input_msg->buffer;
180 	if (input_msg->address == segment_address)
181 		aux->segment = *data;
182 	else
183 		aux->offset = *data;
184 }
185 
186 /**
187  * dp_aux_transfer_helper() - helper function for EDID read transactions
188  *
189  * @aux: DP AUX private structure
190  * @input_msg: input message from DRM upstream APIs
191  * @send_seg: send the segment to sink
192  *
193  * return: void
194  *
195  * This helper function is used to fix EDID reads for non-compliant
196  * sinks that do not handle the i2c middle-of-transaction flag correctly.
197  */
198 static void dp_aux_transfer_helper(struct dp_aux_private *aux,
199 				   struct drm_dp_aux_msg *input_msg,
200 				   bool send_seg)
201 {
202 	struct drm_dp_aux_msg helper_msg;
203 	u32 message_size = 0x10;
204 	u32 segment_address = 0x30;
205 	u32 const edid_block_length = 0x80;
206 	bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
207 	bool i2c_read = input_msg->request &
208 		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
209 
210 	if (!i2c_mot || !i2c_read || (input_msg->size == 0))
211 		return;
212 
213 	/*
214 	 * Sending the segment value and EDID offset will be performed
215 	 * from the DRM upstream EDID driver for each block. Avoid
216 	 * duplicate AUX transactions related to this while reading the
217 	 * first 16 bytes of each block.
218 	 */
219 	if (!(aux->offset % edid_block_length) || !send_seg)
220 		goto end;
221 
222 	aux->read = false;
223 	aux->cmd_busy = true;
224 	aux->no_send_addr = true;
225 	aux->no_send_stop = true;
226 
227 	/*
228 	 * Send the segment address for every i2c read in which the
229 	 * middle-of-tranaction flag is set. This is required to support EDID
230 	 * reads of more than 2 blocks as the segment address is reset to 0
231 	 * since we are overriding the middle-of-transaction flag for read
232 	 * transactions.
233 	 */
234 
235 	if (aux->segment) {
236 		memset(&helper_msg, 0, sizeof(helper_msg));
237 		helper_msg.address = segment_address;
238 		helper_msg.buffer = &aux->segment;
239 		helper_msg.size = 1;
240 		dp_aux_cmd_fifo_tx(aux, &helper_msg);
241 	}
242 
243 	/*
244 	 * Send the offset address for every i2c read in which the
245 	 * middle-of-transaction flag is set. This will ensure that the sink
246 	 * will update its read pointer and return the correct portion of the
247 	 * EDID buffer in the subsequent i2c read trasntion triggered in the
248 	 * native AUX transfer function.
249 	 */
250 	memset(&helper_msg, 0, sizeof(helper_msg));
251 	helper_msg.address = input_msg->address;
252 	helper_msg.buffer = &aux->offset;
253 	helper_msg.size = 1;
254 	dp_aux_cmd_fifo_tx(aux, &helper_msg);
255 
256 end:
257 	aux->offset += message_size;
258 	if (aux->offset == 0x80 || aux->offset == 0x100)
259 		aux->segment = 0x0; /* reset segment at end of block */
260 }
261 
262 /*
263  * This function does the real job to process an AUX transaction.
264  * It will call aux_reset() function to reset the AUX channel,
265  * if the waiting is timeout.
266  */
267 static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
268 			       struct drm_dp_aux_msg *msg)
269 {
270 	ssize_t ret;
271 	int const aux_cmd_native_max = 16;
272 	int const aux_cmd_i2c_max = 128;
273 	struct dp_aux_private *aux;
274 
275 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
276 
277 	aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
278 
279 	/* Ignore address only message */
280 	if (msg->size == 0 || !msg->buffer) {
281 		msg->reply = aux->native ?
282 			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
283 		return msg->size;
284 	}
285 
286 	/* msg sanity check */
287 	if ((aux->native && msg->size > aux_cmd_native_max) ||
288 	    msg->size > aux_cmd_i2c_max) {
289 		DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
290 			__func__, msg->size, msg->request);
291 		return -EINVAL;
292 	}
293 
294 	ret = pm_runtime_resume_and_get(dp_aux->dev);
295 	if (ret)
296 		return  ret;
297 
298 	mutex_lock(&aux->mutex);
299 	if (!aux->initted) {
300 		ret = -EIO;
301 		goto exit;
302 	}
303 
304 	/*
305 	 * For eDP it's important to give a reasonably long wait here for HPD
306 	 * to be asserted. This is because the panel driver may have _just_
307 	 * turned on the panel and then tried to do an AUX transfer. The panel
308 	 * driver has no way of knowing when the panel is ready, so it's up
309 	 * to us to wait. For DP we never get into this situation so let's
310 	 * avoid ever doing the extra long wait for DP.
311 	 */
312 	if (aux->is_edp) {
313 		ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
314 		if (ret) {
315 			DRM_DEBUG_DP("Panel not ready for aux transactions\n");
316 			goto exit;
317 		}
318 	}
319 
320 	dp_aux_update_offset_and_segment(aux, msg);
321 	dp_aux_transfer_helper(aux, msg, true);
322 
323 	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
324 	aux->cmd_busy = true;
325 
326 	if (aux->read) {
327 		aux->no_send_addr = true;
328 		aux->no_send_stop = false;
329 	} else {
330 		aux->no_send_addr = true;
331 		aux->no_send_stop = true;
332 	}
333 
334 	ret = dp_aux_cmd_fifo_tx(aux, msg);
335 	if (ret < 0) {
336 		if (aux->native) {
337 			aux->retry_cnt++;
338 			if (!(aux->retry_cnt % MAX_AUX_RETRIES))
339 				dp_catalog_aux_update_cfg(aux->catalog);
340 		}
341 		/* reset aux if link is in connected state */
342 		if (dp_catalog_link_is_connected(aux->catalog))
343 			dp_catalog_aux_reset(aux->catalog);
344 	} else {
345 		aux->retry_cnt = 0;
346 		switch (aux->aux_error_num) {
347 		case DP_AUX_ERR_NONE:
348 			if (aux->read)
349 				ret = dp_aux_cmd_fifo_rx(aux, msg);
350 			msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
351 			break;
352 		case DP_AUX_ERR_DEFER:
353 			msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
354 			break;
355 		case DP_AUX_ERR_PHY:
356 		case DP_AUX_ERR_ADDR:
357 		case DP_AUX_ERR_NACK:
358 		case DP_AUX_ERR_NACK_DEFER:
359 			msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_NACK : DP_AUX_I2C_REPLY_NACK;
360 			break;
361 		case DP_AUX_ERR_TOUT:
362 			ret = -ETIMEDOUT;
363 			break;
364 		}
365 	}
366 
367 	aux->cmd_busy = false;
368 
369 exit:
370 	mutex_unlock(&aux->mutex);
371 	pm_runtime_put_sync(dp_aux->dev);
372 
373 	return ret;
374 }
375 
376 irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
377 {
378 	u32 isr;
379 	struct dp_aux_private *aux;
380 
381 	if (!dp_aux) {
382 		DRM_ERROR("invalid input\n");
383 		return IRQ_NONE;
384 	}
385 
386 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
387 
388 	isr = dp_catalog_aux_get_irq(aux->catalog);
389 
390 	/* no interrupts pending, return immediately */
391 	if (!isr)
392 		return IRQ_NONE;
393 
394 	if (!aux->cmd_busy) {
395 		DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
396 		return IRQ_NONE;
397 	}
398 
399 	/*
400 	 * The logic below assumes only one error bit is set (other than "done"
401 	 * which can apparently be set at the same time as some of the other
402 	 * bits). Warn if more than one get set so we know we need to improve
403 	 * the logic.
404 	 */
405 	if (hweight32(isr & ~DP_INTR_AUX_XFER_DONE) > 1)
406 		DRM_WARN("Some DP AUX interrupts unhandled: %#010x\n", isr);
407 
408 	if (isr & DP_INTR_AUX_ERROR) {
409 		aux->aux_error_num = DP_AUX_ERR_PHY;
410 		dp_catalog_aux_clear_hw_interrupts(aux->catalog);
411 	} else if (isr & DP_INTR_NACK_DEFER) {
412 		aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
413 	} else if (isr & DP_INTR_WRONG_ADDR) {
414 		aux->aux_error_num = DP_AUX_ERR_ADDR;
415 	} else if (isr & DP_INTR_TIMEOUT) {
416 		aux->aux_error_num = DP_AUX_ERR_TOUT;
417 	} else if (!aux->native && (isr & DP_INTR_I2C_NACK)) {
418 		aux->aux_error_num = DP_AUX_ERR_NACK;
419 	} else if (!aux->native && (isr & DP_INTR_I2C_DEFER)) {
420 		if (isr & DP_INTR_AUX_XFER_DONE)
421 			aux->aux_error_num = DP_AUX_ERR_NACK;
422 		else
423 			aux->aux_error_num = DP_AUX_ERR_DEFER;
424 	} else if (isr & DP_INTR_AUX_XFER_DONE) {
425 		aux->aux_error_num = DP_AUX_ERR_NONE;
426 	} else {
427 		DRM_WARN("Unexpected interrupt: %#010x\n", isr);
428 		return IRQ_NONE;
429 	}
430 
431 	complete(&aux->comp);
432 
433 	return IRQ_HANDLED;
434 }
435 
436 void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
437 {
438 	struct dp_aux_private *aux;
439 
440 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
441 
442 	dp_catalog_aux_update_cfg(aux->catalog);
443 	dp_catalog_aux_reset(aux->catalog);
444 }
445 
446 void dp_aux_init(struct drm_dp_aux *dp_aux)
447 {
448 	struct dp_aux_private *aux;
449 
450 	if (!dp_aux) {
451 		DRM_ERROR("invalid input\n");
452 		return;
453 	}
454 
455 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
456 
457 	mutex_lock(&aux->mutex);
458 
459 	dp_catalog_aux_enable(aux->catalog, true);
460 	aux->retry_cnt = 0;
461 	aux->initted = true;
462 
463 	mutex_unlock(&aux->mutex);
464 }
465 
466 void dp_aux_deinit(struct drm_dp_aux *dp_aux)
467 {
468 	struct dp_aux_private *aux;
469 
470 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
471 
472 	mutex_lock(&aux->mutex);
473 
474 	aux->initted = false;
475 	dp_catalog_aux_enable(aux->catalog, false);
476 
477 	mutex_unlock(&aux->mutex);
478 }
479 
480 int dp_aux_register(struct drm_dp_aux *dp_aux)
481 {
482 	int ret;
483 
484 	if (!dp_aux) {
485 		DRM_ERROR("invalid input\n");
486 		return -EINVAL;
487 	}
488 
489 	ret = drm_dp_aux_register(dp_aux);
490 	if (ret) {
491 		DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
492 				ret);
493 		return ret;
494 	}
495 
496 	return 0;
497 }
498 
499 void dp_aux_unregister(struct drm_dp_aux *dp_aux)
500 {
501 	drm_dp_aux_unregister(dp_aux);
502 }
503 
504 static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
505 				 unsigned long wait_us)
506 {
507 	int ret;
508 	struct dp_aux_private *aux;
509 
510 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
511 
512 	pm_runtime_get_sync(aux->dev);
513 	ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
514 	pm_runtime_put_sync(aux->dev);
515 
516 	return ret;
517 }
518 
519 struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
520 			      bool is_edp)
521 {
522 	struct dp_aux_private *aux;
523 
524 	if (!catalog) {
525 		DRM_ERROR("invalid input\n");
526 		return ERR_PTR(-ENODEV);
527 	}
528 
529 	aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
530 	if (!aux)
531 		return ERR_PTR(-ENOMEM);
532 
533 	init_completion(&aux->comp);
534 	aux->cmd_busy = false;
535 	aux->is_edp = is_edp;
536 	mutex_init(&aux->mutex);
537 
538 	aux->dev = dev;
539 	aux->catalog = catalog;
540 	aux->retry_cnt = 0;
541 
542 	/*
543 	 * Use the drm_dp_aux_init() to use the aux adapter
544 	 * before registering AUX with the DRM device so that
545 	 * msm eDP panel can be detected by generic_dep_panel_probe().
546 	 */
547 	aux->dp_aux.name = "dpu_dp_aux";
548 	aux->dp_aux.dev = dev;
549 	aux->dp_aux.transfer = dp_aux_transfer;
550 	aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted;
551 	drm_dp_aux_init(&aux->dp_aux);
552 
553 	return &aux->dp_aux;
554 }
555 
556 void dp_aux_put(struct drm_dp_aux *dp_aux)
557 {
558 	struct dp_aux_private *aux;
559 
560 	if (!dp_aux)
561 		return;
562 
563 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
564 
565 	mutex_destroy(&aux->mutex);
566 
567 	devm_kfree(aux->dev, aux);
568 }
569