xref: /linux/drivers/gpu/drm/rockchip/cdn-dp-core.c (revision b8265621f4888af9494e1d685620871ec81bc33d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author: Chris Zhong <zyw@rock-chips.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/component.h>
9 #include <linux/extcon.h>
10 #include <linux/firmware.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/phy/phy.h>
13 #include <linux/regmap.h>
14 #include <linux/reset.h>
15 
16 #include <sound/hdmi-codec.h>
17 
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_dp_helper.h>
20 #include <drm/drm_edid.h>
21 #include <drm/drm_of.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_simple_kms_helper.h>
24 
25 #include "cdn-dp-core.h"
26 #include "cdn-dp-reg.h"
27 #include "rockchip_drm_vop.h"
28 
29 #define connector_to_dp(c) \
30 		container_of(c, struct cdn_dp_device, connector)
31 
32 #define encoder_to_dp(c) \
33 		container_of(c, struct cdn_dp_device, encoder)
34 
35 #define GRF_SOC_CON9		0x6224
36 #define DP_SEL_VOP_LIT		BIT(12)
37 #define GRF_SOC_CON26		0x6268
38 #define DPTX_HPD_SEL		(3 << 12)
39 #define DPTX_HPD_DEL		(2 << 12)
40 #define DPTX_HPD_SEL_MASK	(3 << 28)
41 
42 #define CDN_FW_TIMEOUT_MS	(64 * 1000)
43 #define CDN_DPCD_TIMEOUT_MS	5000
44 #define CDN_DP_FIRMWARE		"rockchip/dptx.bin"
45 
46 struct cdn_dp_data {
47 	u8 max_phy;
48 };
49 
50 struct cdn_dp_data rk3399_cdn_dp = {
51 	.max_phy = 2,
52 };
53 
54 static const struct of_device_id cdn_dp_dt_ids[] = {
55 	{ .compatible = "rockchip,rk3399-cdn-dp",
56 		.data = (void *)&rk3399_cdn_dp },
57 	{}
58 };
59 
60 MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
61 
62 static int cdn_dp_grf_write(struct cdn_dp_device *dp,
63 			    unsigned int reg, unsigned int val)
64 {
65 	int ret;
66 
67 	ret = clk_prepare_enable(dp->grf_clk);
68 	if (ret) {
69 		DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
70 		return ret;
71 	}
72 
73 	ret = regmap_write(dp->grf, reg, val);
74 	if (ret) {
75 		DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
76 		return ret;
77 	}
78 
79 	clk_disable_unprepare(dp->grf_clk);
80 
81 	return 0;
82 }
83 
84 static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
85 {
86 	int ret;
87 	unsigned long rate;
88 
89 	ret = clk_prepare_enable(dp->pclk);
90 	if (ret < 0) {
91 		DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
92 		goto err_pclk;
93 	}
94 
95 	ret = clk_prepare_enable(dp->core_clk);
96 	if (ret < 0) {
97 		DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
98 		goto err_core_clk;
99 	}
100 
101 	ret = pm_runtime_get_sync(dp->dev);
102 	if (ret < 0) {
103 		DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
104 		goto err_pm_runtime_get;
105 	}
106 
107 	reset_control_assert(dp->core_rst);
108 	reset_control_assert(dp->dptx_rst);
109 	reset_control_assert(dp->apb_rst);
110 	reset_control_deassert(dp->core_rst);
111 	reset_control_deassert(dp->dptx_rst);
112 	reset_control_deassert(dp->apb_rst);
113 
114 	rate = clk_get_rate(dp->core_clk);
115 	if (!rate) {
116 		DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
117 		ret = -EINVAL;
118 		goto err_set_rate;
119 	}
120 
121 	cdn_dp_set_fw_clk(dp, rate);
122 	cdn_dp_clock_reset(dp);
123 
124 	return 0;
125 
126 err_set_rate:
127 	pm_runtime_put(dp->dev);
128 err_pm_runtime_get:
129 	clk_disable_unprepare(dp->core_clk);
130 err_core_clk:
131 	clk_disable_unprepare(dp->pclk);
132 err_pclk:
133 	return ret;
134 }
135 
136 static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
137 {
138 	pm_runtime_put_sync(dp->dev);
139 	clk_disable_unprepare(dp->pclk);
140 	clk_disable_unprepare(dp->core_clk);
141 }
142 
143 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
144 {
145 	struct extcon_dev *edev = port->extcon;
146 	union extcon_property_value property;
147 	int dptx;
148 	u8 lanes;
149 
150 	dptx = extcon_get_state(edev, EXTCON_DISP_DP);
151 	if (dptx > 0) {
152 		extcon_get_property(edev, EXTCON_DISP_DP,
153 				    EXTCON_PROP_USB_SS, &property);
154 		if (property.intval)
155 			lanes = 2;
156 		else
157 			lanes = 4;
158 	} else {
159 		lanes = 0;
160 	}
161 
162 	return lanes;
163 }
164 
165 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
166 {
167 	int ret;
168 	u8 value;
169 
170 	*sink_count = 0;
171 	ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
172 	if (ret)
173 		return ret;
174 
175 	*sink_count = DP_GET_SINK_COUNT(value);
176 	return 0;
177 }
178 
179 static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
180 {
181 	struct cdn_dp_port *port;
182 	int i, lanes;
183 
184 	for (i = 0; i < dp->ports; i++) {
185 		port = dp->port[i];
186 		lanes = cdn_dp_get_port_lanes(port);
187 		if (lanes)
188 			return port;
189 	}
190 	return NULL;
191 }
192 
193 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
194 {
195 	unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
196 	struct cdn_dp_port *port;
197 	u8 sink_count = 0;
198 
199 	if (dp->active_port < 0 || dp->active_port >= dp->ports) {
200 		DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
201 		return false;
202 	}
203 
204 	port = dp->port[dp->active_port];
205 
206 	/*
207 	 * Attempt to read sink count, retry in case the sink may not be ready.
208 	 *
209 	 * Sinks are *supposed* to come up within 1ms from an off state, but
210 	 * some docks need more time to power up.
211 	 */
212 	while (time_before(jiffies, timeout)) {
213 		if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
214 			return false;
215 
216 		if (!cdn_dp_get_sink_count(dp, &sink_count))
217 			return sink_count ? true : false;
218 
219 		usleep_range(5000, 10000);
220 	}
221 
222 	DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
223 	return false;
224 }
225 
226 static enum drm_connector_status
227 cdn_dp_connector_detect(struct drm_connector *connector, bool force)
228 {
229 	struct cdn_dp_device *dp = connector_to_dp(connector);
230 	enum drm_connector_status status = connector_status_disconnected;
231 
232 	mutex_lock(&dp->lock);
233 	if (dp->connected)
234 		status = connector_status_connected;
235 	mutex_unlock(&dp->lock);
236 
237 	return status;
238 }
239 
240 static void cdn_dp_connector_destroy(struct drm_connector *connector)
241 {
242 	drm_connector_unregister(connector);
243 	drm_connector_cleanup(connector);
244 }
245 
246 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
247 	.detect = cdn_dp_connector_detect,
248 	.destroy = cdn_dp_connector_destroy,
249 	.fill_modes = drm_helper_probe_single_connector_modes,
250 	.reset = drm_atomic_helper_connector_reset,
251 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
252 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
253 };
254 
255 static int cdn_dp_connector_get_modes(struct drm_connector *connector)
256 {
257 	struct cdn_dp_device *dp = connector_to_dp(connector);
258 	struct edid *edid;
259 	int ret = 0;
260 
261 	mutex_lock(&dp->lock);
262 	edid = dp->edid;
263 	if (edid) {
264 		DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
265 				  edid->width_cm, edid->height_cm);
266 
267 		dp->sink_has_audio = drm_detect_monitor_audio(edid);
268 		ret = drm_add_edid_modes(connector, edid);
269 		if (ret)
270 			drm_connector_update_edid_property(connector,
271 								edid);
272 	}
273 	mutex_unlock(&dp->lock);
274 
275 	return ret;
276 }
277 
278 static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
279 				       struct drm_display_mode *mode)
280 {
281 	struct cdn_dp_device *dp = connector_to_dp(connector);
282 	struct drm_display_info *display_info = &dp->connector.display_info;
283 	u32 requested, actual, rate, sink_max, source_max = 0;
284 	u8 lanes, bpc;
285 
286 	/* If DP is disconnected, every mode is invalid */
287 	if (!dp->connected)
288 		return MODE_BAD;
289 
290 	switch (display_info->bpc) {
291 	case 10:
292 		bpc = 10;
293 		break;
294 	case 6:
295 		bpc = 6;
296 		break;
297 	default:
298 		bpc = 8;
299 		break;
300 	}
301 
302 	requested = mode->clock * bpc * 3 / 1000;
303 
304 	source_max = dp->lanes;
305 	sink_max = drm_dp_max_lane_count(dp->dpcd);
306 	lanes = min(source_max, sink_max);
307 
308 	source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
309 	sink_max = drm_dp_max_link_rate(dp->dpcd);
310 	rate = min(source_max, sink_max);
311 
312 	actual = rate * lanes / 100;
313 
314 	/* efficiency is about 0.8 */
315 	actual = actual * 8 / 10;
316 
317 	if (requested > actual) {
318 		DRM_DEV_DEBUG_KMS(dp->dev,
319 				  "requested=%d, actual=%d, clock=%d\n",
320 				  requested, actual, mode->clock);
321 		return MODE_CLOCK_HIGH;
322 	}
323 
324 	return MODE_OK;
325 }
326 
327 static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
328 	.get_modes = cdn_dp_connector_get_modes,
329 	.mode_valid = cdn_dp_connector_mode_valid,
330 };
331 
332 static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
333 {
334 	int ret;
335 	const u32 *iram_data, *dram_data;
336 	const struct firmware *fw = dp->fw;
337 	const struct cdn_firmware_header *hdr;
338 
339 	hdr = (struct cdn_firmware_header *)fw->data;
340 	if (fw->size != le32_to_cpu(hdr->size_bytes)) {
341 		DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
342 		return -EINVAL;
343 	}
344 
345 	iram_data = (const u32 *)(fw->data + hdr->header_size);
346 	dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
347 
348 	ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
349 				   dram_data, hdr->dram_size);
350 	if (ret)
351 		return ret;
352 
353 	ret = cdn_dp_set_firmware_active(dp, true);
354 	if (ret) {
355 		DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
356 		return ret;
357 	}
358 
359 	return cdn_dp_event_config(dp);
360 }
361 
362 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
363 {
364 	int ret;
365 
366 	if (!cdn_dp_check_sink_connection(dp))
367 		return -ENODEV;
368 
369 	ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
370 			       DP_RECEIVER_CAP_SIZE);
371 	if (ret) {
372 		DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
373 		return ret;
374 	}
375 
376 	kfree(dp->edid);
377 	dp->edid = drm_do_get_edid(&dp->connector,
378 				   cdn_dp_get_edid_block, dp);
379 	return 0;
380 }
381 
382 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
383 {
384 	union extcon_property_value property;
385 	int ret;
386 
387 	if (!port->phy_enabled) {
388 		ret = phy_power_on(port->phy);
389 		if (ret) {
390 			DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
391 				      ret);
392 			goto err_phy;
393 		}
394 		port->phy_enabled = true;
395 	}
396 
397 	ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
398 			       DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
399 	if (ret) {
400 		DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
401 		goto err_power_on;
402 	}
403 
404 	ret = cdn_dp_get_hpd_status(dp);
405 	if (ret <= 0) {
406 		if (!ret)
407 			DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
408 		goto err_power_on;
409 	}
410 
411 	ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
412 				  EXTCON_PROP_USB_TYPEC_POLARITY, &property);
413 	if (ret) {
414 		DRM_DEV_ERROR(dp->dev, "get property failed\n");
415 		goto err_power_on;
416 	}
417 
418 	port->lanes = cdn_dp_get_port_lanes(port);
419 	ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
420 	if (ret) {
421 		DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
422 			      ret);
423 		goto err_power_on;
424 	}
425 
426 	dp->active_port = port->id;
427 	return 0;
428 
429 err_power_on:
430 	if (phy_power_off(port->phy))
431 		DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
432 	else
433 		port->phy_enabled = false;
434 
435 err_phy:
436 	cdn_dp_grf_write(dp, GRF_SOC_CON26,
437 			 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
438 	return ret;
439 }
440 
441 static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
442 			      struct cdn_dp_port *port)
443 {
444 	int ret;
445 
446 	if (port->phy_enabled) {
447 		ret = phy_power_off(port->phy);
448 		if (ret) {
449 			DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
450 			return ret;
451 		}
452 	}
453 
454 	port->phy_enabled = false;
455 	port->lanes = 0;
456 	dp->active_port = -1;
457 	return 0;
458 }
459 
460 static int cdn_dp_disable(struct cdn_dp_device *dp)
461 {
462 	int ret, i;
463 
464 	if (!dp->active)
465 		return 0;
466 
467 	for (i = 0; i < dp->ports; i++)
468 		cdn_dp_disable_phy(dp, dp->port[i]);
469 
470 	ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
471 			       DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
472 	if (ret) {
473 		DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
474 			      ret);
475 		return ret;
476 	}
477 
478 	cdn_dp_set_firmware_active(dp, false);
479 	cdn_dp_clk_disable(dp);
480 	dp->active = false;
481 	dp->max_lanes = 0;
482 	dp->max_rate = 0;
483 	if (!dp->connected) {
484 		kfree(dp->edid);
485 		dp->edid = NULL;
486 	}
487 
488 	return 0;
489 }
490 
491 static int cdn_dp_enable(struct cdn_dp_device *dp)
492 {
493 	int ret, i, lanes;
494 	struct cdn_dp_port *port;
495 
496 	port = cdn_dp_connected_port(dp);
497 	if (!port) {
498 		DRM_DEV_ERROR(dp->dev,
499 			      "Can't enable without connection\n");
500 		return -ENODEV;
501 	}
502 
503 	if (dp->active)
504 		return 0;
505 
506 	ret = cdn_dp_clk_enable(dp);
507 	if (ret)
508 		return ret;
509 
510 	ret = cdn_dp_firmware_init(dp);
511 	if (ret) {
512 		DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
513 		goto err_clk_disable;
514 	}
515 
516 	/* only enable the port that connected with downstream device */
517 	for (i = port->id; i < dp->ports; i++) {
518 		port = dp->port[i];
519 		lanes = cdn_dp_get_port_lanes(port);
520 		if (lanes) {
521 			ret = cdn_dp_enable_phy(dp, port);
522 			if (ret)
523 				continue;
524 
525 			ret = cdn_dp_get_sink_capability(dp);
526 			if (ret) {
527 				cdn_dp_disable_phy(dp, port);
528 			} else {
529 				dp->active = true;
530 				dp->lanes = port->lanes;
531 				return 0;
532 			}
533 		}
534 	}
535 
536 err_clk_disable:
537 	cdn_dp_clk_disable(dp);
538 	return ret;
539 }
540 
541 static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
542 				    struct drm_display_mode *mode,
543 				    struct drm_display_mode *adjusted)
544 {
545 	struct cdn_dp_device *dp = encoder_to_dp(encoder);
546 	struct drm_display_info *display_info = &dp->connector.display_info;
547 	struct video_info *video = &dp->video_info;
548 
549 	switch (display_info->bpc) {
550 	case 10:
551 		video->color_depth = 10;
552 		break;
553 	case 6:
554 		video->color_depth = 6;
555 		break;
556 	default:
557 		video->color_depth = 8;
558 		break;
559 	}
560 
561 	video->color_fmt = PXL_RGB;
562 	video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
563 	video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
564 
565 	memcpy(&dp->mode, adjusted, sizeof(*mode));
566 }
567 
568 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
569 {
570 	u8 link_status[DP_LINK_STATUS_SIZE];
571 	struct cdn_dp_port *port = cdn_dp_connected_port(dp);
572 	u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
573 
574 	if (!port || !dp->max_rate || !dp->max_lanes)
575 		return false;
576 
577 	if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
578 			     DP_LINK_STATUS_SIZE)) {
579 		DRM_ERROR("Failed to get link status\n");
580 		return false;
581 	}
582 
583 	/* if link training is requested we should perform it always */
584 	return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
585 }
586 
587 static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
588 {
589 	struct cdn_dp_device *dp = encoder_to_dp(encoder);
590 	int ret, val;
591 
592 	ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
593 	if (ret < 0) {
594 		DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
595 		return;
596 	}
597 
598 	DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
599 			  (ret) ? "LIT" : "BIG");
600 	if (ret)
601 		val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
602 	else
603 		val = DP_SEL_VOP_LIT << 16;
604 
605 	ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
606 	if (ret)
607 		return;
608 
609 	mutex_lock(&dp->lock);
610 
611 	ret = cdn_dp_enable(dp);
612 	if (ret) {
613 		DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
614 			      ret);
615 		goto out;
616 	}
617 	if (!cdn_dp_check_link_status(dp)) {
618 		ret = cdn_dp_train_link(dp);
619 		if (ret) {
620 			DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
621 			goto out;
622 		}
623 	}
624 
625 	ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
626 	if (ret) {
627 		DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
628 		goto out;
629 	}
630 
631 	ret = cdn_dp_config_video(dp);
632 	if (ret) {
633 		DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
634 		goto out;
635 	}
636 
637 	ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
638 	if (ret) {
639 		DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
640 		goto out;
641 	}
642 out:
643 	mutex_unlock(&dp->lock);
644 }
645 
646 static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
647 {
648 	struct cdn_dp_device *dp = encoder_to_dp(encoder);
649 	int ret;
650 
651 	mutex_lock(&dp->lock);
652 	if (dp->active) {
653 		ret = cdn_dp_disable(dp);
654 		if (ret) {
655 			DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
656 				      ret);
657 		}
658 	}
659 	mutex_unlock(&dp->lock);
660 
661 	/*
662 	 * In the following 2 cases, we need to run the event_work to re-enable
663 	 * the DP:
664 	 * 1. If there is not just one port device is connected, and remove one
665 	 *    device from a port, the DP will be disabled here, at this case,
666 	 *    run the event_work to re-open DP for the other port.
667 	 * 2. If re-training or re-config failed, the DP will be disabled here.
668 	 *    run the event_work to re-connect it.
669 	 */
670 	if (!dp->connected && cdn_dp_connected_port(dp))
671 		schedule_work(&dp->event_work);
672 }
673 
674 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
675 				       struct drm_crtc_state *crtc_state,
676 				       struct drm_connector_state *conn_state)
677 {
678 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
679 
680 	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
681 	s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
682 
683 	return 0;
684 }
685 
686 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
687 	.mode_set = cdn_dp_encoder_mode_set,
688 	.enable = cdn_dp_encoder_enable,
689 	.disable = cdn_dp_encoder_disable,
690 	.atomic_check = cdn_dp_encoder_atomic_check,
691 };
692 
693 static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
694 {
695 	struct device *dev = dp->dev;
696 	struct device_node *np = dev->of_node;
697 	struct platform_device *pdev = to_platform_device(dev);
698 	struct resource *res;
699 
700 	dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
701 	if (IS_ERR(dp->grf)) {
702 		DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
703 		return PTR_ERR(dp->grf);
704 	}
705 
706 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
707 	dp->regs = devm_ioremap_resource(dev, res);
708 	if (IS_ERR(dp->regs)) {
709 		DRM_DEV_ERROR(dev, "ioremap reg failed\n");
710 		return PTR_ERR(dp->regs);
711 	}
712 
713 	dp->core_clk = devm_clk_get(dev, "core-clk");
714 	if (IS_ERR(dp->core_clk)) {
715 		DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
716 		return PTR_ERR(dp->core_clk);
717 	}
718 
719 	dp->pclk = devm_clk_get(dev, "pclk");
720 	if (IS_ERR(dp->pclk)) {
721 		DRM_DEV_ERROR(dev, "cannot get pclk\n");
722 		return PTR_ERR(dp->pclk);
723 	}
724 
725 	dp->spdif_clk = devm_clk_get(dev, "spdif");
726 	if (IS_ERR(dp->spdif_clk)) {
727 		DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
728 		return PTR_ERR(dp->spdif_clk);
729 	}
730 
731 	dp->grf_clk = devm_clk_get(dev, "grf");
732 	if (IS_ERR(dp->grf_clk)) {
733 		DRM_DEV_ERROR(dev, "cannot get grf clk\n");
734 		return PTR_ERR(dp->grf_clk);
735 	}
736 
737 	dp->spdif_rst = devm_reset_control_get(dev, "spdif");
738 	if (IS_ERR(dp->spdif_rst)) {
739 		DRM_DEV_ERROR(dev, "no spdif reset control found\n");
740 		return PTR_ERR(dp->spdif_rst);
741 	}
742 
743 	dp->dptx_rst = devm_reset_control_get(dev, "dptx");
744 	if (IS_ERR(dp->dptx_rst)) {
745 		DRM_DEV_ERROR(dev, "no uphy reset control found\n");
746 		return PTR_ERR(dp->dptx_rst);
747 	}
748 
749 	dp->core_rst = devm_reset_control_get(dev, "core");
750 	if (IS_ERR(dp->core_rst)) {
751 		DRM_DEV_ERROR(dev, "no core reset control found\n");
752 		return PTR_ERR(dp->core_rst);
753 	}
754 
755 	dp->apb_rst = devm_reset_control_get(dev, "apb");
756 	if (IS_ERR(dp->apb_rst)) {
757 		DRM_DEV_ERROR(dev, "no apb reset control found\n");
758 		return PTR_ERR(dp->apb_rst);
759 	}
760 
761 	return 0;
762 }
763 
764 static int cdn_dp_audio_hw_params(struct device *dev,  void *data,
765 				  struct hdmi_codec_daifmt *daifmt,
766 				  struct hdmi_codec_params *params)
767 {
768 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
769 	struct audio_info audio = {
770 		.sample_width = params->sample_width,
771 		.sample_rate = params->sample_rate,
772 		.channels = params->channels,
773 	};
774 	int ret;
775 
776 	mutex_lock(&dp->lock);
777 	if (!dp->active) {
778 		ret = -ENODEV;
779 		goto out;
780 	}
781 
782 	switch (daifmt->fmt) {
783 	case HDMI_I2S:
784 		audio.format = AFMT_I2S;
785 		break;
786 	case HDMI_SPDIF:
787 		audio.format = AFMT_SPDIF;
788 		break;
789 	default:
790 		DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
791 		ret = -EINVAL;
792 		goto out;
793 	}
794 
795 	ret = cdn_dp_audio_config(dp, &audio);
796 	if (!ret)
797 		dp->audio_info = audio;
798 
799 out:
800 	mutex_unlock(&dp->lock);
801 	return ret;
802 }
803 
804 static void cdn_dp_audio_shutdown(struct device *dev, void *data)
805 {
806 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
807 	int ret;
808 
809 	mutex_lock(&dp->lock);
810 	if (!dp->active)
811 		goto out;
812 
813 	ret = cdn_dp_audio_stop(dp, &dp->audio_info);
814 	if (!ret)
815 		dp->audio_info.format = AFMT_UNUSED;
816 out:
817 	mutex_unlock(&dp->lock);
818 }
819 
820 static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
821 				     bool enable)
822 {
823 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
824 	int ret;
825 
826 	mutex_lock(&dp->lock);
827 	if (!dp->active) {
828 		ret = -ENODEV;
829 		goto out;
830 	}
831 
832 	ret = cdn_dp_audio_mute(dp, enable);
833 
834 out:
835 	mutex_unlock(&dp->lock);
836 	return ret;
837 }
838 
839 static int cdn_dp_audio_get_eld(struct device *dev, void *data,
840 				u8 *buf, size_t len)
841 {
842 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
843 
844 	memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
845 
846 	return 0;
847 }
848 
849 static const struct hdmi_codec_ops audio_codec_ops = {
850 	.hw_params = cdn_dp_audio_hw_params,
851 	.audio_shutdown = cdn_dp_audio_shutdown,
852 	.digital_mute = cdn_dp_audio_digital_mute,
853 	.get_eld = cdn_dp_audio_get_eld,
854 };
855 
856 static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
857 				   struct device *dev)
858 {
859 	struct hdmi_codec_pdata codec_data = {
860 		.i2s = 1,
861 		.spdif = 1,
862 		.ops = &audio_codec_ops,
863 		.max_i2s_channels = 8,
864 	};
865 
866 	dp->audio_pdev = platform_device_register_data(
867 			 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
868 			 &codec_data, sizeof(codec_data));
869 
870 	return PTR_ERR_OR_ZERO(dp->audio_pdev);
871 }
872 
873 static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
874 {
875 	int ret;
876 	unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
877 	unsigned long sleep = 1000;
878 
879 	WARN_ON(!mutex_is_locked(&dp->lock));
880 
881 	if (dp->fw_loaded)
882 		return 0;
883 
884 	/* Drop the lock before getting the firmware to avoid blocking boot */
885 	mutex_unlock(&dp->lock);
886 
887 	while (time_before(jiffies, timeout)) {
888 		ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
889 		if (ret == -ENOENT) {
890 			msleep(sleep);
891 			sleep *= 2;
892 			continue;
893 		} else if (ret) {
894 			DRM_DEV_ERROR(dp->dev,
895 				      "failed to request firmware: %d\n", ret);
896 			goto out;
897 		}
898 
899 		dp->fw_loaded = true;
900 		ret = 0;
901 		goto out;
902 	}
903 
904 	DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
905 	ret = -ETIMEDOUT;
906 out:
907 	mutex_lock(&dp->lock);
908 	return ret;
909 }
910 
911 static void cdn_dp_pd_event_work(struct work_struct *work)
912 {
913 	struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
914 						event_work);
915 	struct drm_connector *connector = &dp->connector;
916 	enum drm_connector_status old_status;
917 
918 	int ret;
919 
920 	mutex_lock(&dp->lock);
921 
922 	if (dp->suspended)
923 		goto out;
924 
925 	ret = cdn_dp_request_firmware(dp);
926 	if (ret)
927 		goto out;
928 
929 	dp->connected = true;
930 
931 	/* Not connected, notify userspace to disable the block */
932 	if (!cdn_dp_connected_port(dp)) {
933 		DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
934 		dp->connected = false;
935 
936 	/* Connected but not enabled, enable the block */
937 	} else if (!dp->active) {
938 		DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
939 		ret = cdn_dp_enable(dp);
940 		if (ret) {
941 			DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
942 			dp->connected = false;
943 		}
944 
945 	/* Enabled and connected to a dongle without a sink, notify userspace */
946 	} else if (!cdn_dp_check_sink_connection(dp)) {
947 		DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
948 		dp->connected = false;
949 
950 	/* Enabled and connected with a sink, re-train if requested */
951 	} else if (!cdn_dp_check_link_status(dp)) {
952 		unsigned int rate = dp->max_rate;
953 		unsigned int lanes = dp->max_lanes;
954 		struct drm_display_mode *mode = &dp->mode;
955 
956 		DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
957 		ret = cdn_dp_train_link(dp);
958 		if (ret) {
959 			dp->connected = false;
960 			DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
961 			goto out;
962 		}
963 
964 		/* If training result is changed, update the video config */
965 		if (mode->clock &&
966 		    (rate != dp->max_rate || lanes != dp->max_lanes)) {
967 			ret = cdn_dp_config_video(dp);
968 			if (ret) {
969 				dp->connected = false;
970 				DRM_DEV_ERROR(dp->dev,
971 					      "Failed to config video %d\n",
972 					      ret);
973 			}
974 		}
975 	}
976 
977 out:
978 	mutex_unlock(&dp->lock);
979 
980 	old_status = connector->status;
981 	connector->status = connector->funcs->detect(connector, false);
982 	if (old_status != connector->status)
983 		drm_kms_helper_hotplug_event(dp->drm_dev);
984 }
985 
986 static int cdn_dp_pd_event(struct notifier_block *nb,
987 			   unsigned long event, void *priv)
988 {
989 	struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
990 						event_nb);
991 	struct cdn_dp_device *dp = port->dp;
992 
993 	/*
994 	 * It would be nice to be able to just do the work inline right here.
995 	 * However, we need to make a bunch of calls that might sleep in order
996 	 * to turn on the block/phy, so use a worker instead.
997 	 */
998 	schedule_work(&dp->event_work);
999 
1000 	return NOTIFY_DONE;
1001 }
1002 
1003 static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
1004 {
1005 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1006 	struct drm_encoder *encoder;
1007 	struct drm_connector *connector;
1008 	struct cdn_dp_port *port;
1009 	struct drm_device *drm_dev = data;
1010 	int ret, i;
1011 
1012 	ret = cdn_dp_parse_dt(dp);
1013 	if (ret < 0)
1014 		return ret;
1015 
1016 	dp->drm_dev = drm_dev;
1017 	dp->connected = false;
1018 	dp->active = false;
1019 	dp->active_port = -1;
1020 	dp->fw_loaded = false;
1021 
1022 	INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
1023 
1024 	encoder = &dp->encoder;
1025 
1026 	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
1027 							     dev->of_node);
1028 	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1029 
1030 	ret = drm_simple_encoder_init(drm_dev, encoder,
1031 				      DRM_MODE_ENCODER_TMDS);
1032 	if (ret) {
1033 		DRM_ERROR("failed to initialize encoder with drm\n");
1034 		return ret;
1035 	}
1036 
1037 	drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
1038 
1039 	connector = &dp->connector;
1040 	connector->polled = DRM_CONNECTOR_POLL_HPD;
1041 	connector->dpms = DRM_MODE_DPMS_OFF;
1042 
1043 	ret = drm_connector_init(drm_dev, connector,
1044 				 &cdn_dp_atomic_connector_funcs,
1045 				 DRM_MODE_CONNECTOR_DisplayPort);
1046 	if (ret) {
1047 		DRM_ERROR("failed to initialize connector with drm\n");
1048 		goto err_free_encoder;
1049 	}
1050 
1051 	drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
1052 
1053 	ret = drm_connector_attach_encoder(connector, encoder);
1054 	if (ret) {
1055 		DRM_ERROR("failed to attach connector and encoder\n");
1056 		goto err_free_connector;
1057 	}
1058 
1059 	for (i = 0; i < dp->ports; i++) {
1060 		port = dp->port[i];
1061 
1062 		port->event_nb.notifier_call = cdn_dp_pd_event;
1063 		ret = devm_extcon_register_notifier(dp->dev, port->extcon,
1064 						    EXTCON_DISP_DP,
1065 						    &port->event_nb);
1066 		if (ret) {
1067 			DRM_DEV_ERROR(dev,
1068 				      "register EXTCON_DISP_DP notifier err\n");
1069 			goto err_free_connector;
1070 		}
1071 	}
1072 
1073 	pm_runtime_enable(dev);
1074 
1075 	schedule_work(&dp->event_work);
1076 
1077 	return 0;
1078 
1079 err_free_connector:
1080 	drm_connector_cleanup(connector);
1081 err_free_encoder:
1082 	drm_encoder_cleanup(encoder);
1083 	return ret;
1084 }
1085 
1086 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
1087 {
1088 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1089 	struct drm_encoder *encoder = &dp->encoder;
1090 	struct drm_connector *connector = &dp->connector;
1091 
1092 	cancel_work_sync(&dp->event_work);
1093 	cdn_dp_encoder_disable(encoder);
1094 	encoder->funcs->destroy(encoder);
1095 	connector->funcs->destroy(connector);
1096 
1097 	pm_runtime_disable(dev);
1098 	if (dp->fw_loaded)
1099 		release_firmware(dp->fw);
1100 	kfree(dp->edid);
1101 	dp->edid = NULL;
1102 }
1103 
1104 static const struct component_ops cdn_dp_component_ops = {
1105 	.bind = cdn_dp_bind,
1106 	.unbind = cdn_dp_unbind,
1107 };
1108 
1109 static int cdn_dp_suspend(struct device *dev)
1110 {
1111 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1112 	int ret = 0;
1113 
1114 	mutex_lock(&dp->lock);
1115 	if (dp->active)
1116 		ret = cdn_dp_disable(dp);
1117 	dp->suspended = true;
1118 	mutex_unlock(&dp->lock);
1119 
1120 	return ret;
1121 }
1122 
1123 static int cdn_dp_resume(struct device *dev)
1124 {
1125 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1126 
1127 	mutex_lock(&dp->lock);
1128 	dp->suspended = false;
1129 	if (dp->fw_loaded)
1130 		schedule_work(&dp->event_work);
1131 	mutex_unlock(&dp->lock);
1132 
1133 	return 0;
1134 }
1135 
1136 static int cdn_dp_probe(struct platform_device *pdev)
1137 {
1138 	struct device *dev = &pdev->dev;
1139 	const struct of_device_id *match;
1140 	struct cdn_dp_data *dp_data;
1141 	struct cdn_dp_port *port;
1142 	struct cdn_dp_device *dp;
1143 	struct extcon_dev *extcon;
1144 	struct phy *phy;
1145 	int i;
1146 
1147 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
1148 	if (!dp)
1149 		return -ENOMEM;
1150 	dp->dev = dev;
1151 
1152 	match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
1153 	dp_data = (struct cdn_dp_data *)match->data;
1154 
1155 	for (i = 0; i < dp_data->max_phy; i++) {
1156 		extcon = extcon_get_edev_by_phandle(dev, i);
1157 		phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
1158 
1159 		if (PTR_ERR(extcon) == -EPROBE_DEFER ||
1160 		    PTR_ERR(phy) == -EPROBE_DEFER)
1161 			return -EPROBE_DEFER;
1162 
1163 		if (IS_ERR(extcon) || IS_ERR(phy))
1164 			continue;
1165 
1166 		port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1167 		if (!port)
1168 			return -ENOMEM;
1169 
1170 		port->extcon = extcon;
1171 		port->phy = phy;
1172 		port->dp = dp;
1173 		port->id = i;
1174 		dp->port[dp->ports++] = port;
1175 	}
1176 
1177 	if (!dp->ports) {
1178 		DRM_DEV_ERROR(dev, "missing extcon or phy\n");
1179 		return -EINVAL;
1180 	}
1181 
1182 	mutex_init(&dp->lock);
1183 	dev_set_drvdata(dev, dp);
1184 
1185 	cdn_dp_audio_codec_init(dp, dev);
1186 
1187 	return component_add(dev, &cdn_dp_component_ops);
1188 }
1189 
1190 static int cdn_dp_remove(struct platform_device *pdev)
1191 {
1192 	struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1193 
1194 	platform_device_unregister(dp->audio_pdev);
1195 	cdn_dp_suspend(dp->dev);
1196 	component_del(&pdev->dev, &cdn_dp_component_ops);
1197 
1198 	return 0;
1199 }
1200 
1201 static void cdn_dp_shutdown(struct platform_device *pdev)
1202 {
1203 	struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1204 
1205 	cdn_dp_suspend(dp->dev);
1206 }
1207 
1208 static const struct dev_pm_ops cdn_dp_pm_ops = {
1209 	SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
1210 				cdn_dp_resume)
1211 };
1212 
1213 struct platform_driver cdn_dp_driver = {
1214 	.probe = cdn_dp_probe,
1215 	.remove = cdn_dp_remove,
1216 	.shutdown = cdn_dp_shutdown,
1217 	.driver = {
1218 		   .name = "cdn-dp",
1219 		   .owner = THIS_MODULE,
1220 		   .of_match_table = of_match_ptr(cdn_dp_dt_ids),
1221 		   .pm = &cdn_dp_pm_ops,
1222 	},
1223 };
1224