1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 * Author: Chris Zhong <zyw@rock-chips.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/component.h> 9 #include <linux/extcon.h> 10 #include <linux/firmware.h> 11 #include <linux/mfd/syscon.h> 12 #include <linux/phy/phy.h> 13 #include <linux/regmap.h> 14 #include <linux/reset.h> 15 16 #include <sound/hdmi-codec.h> 17 18 #include <drm/drm_atomic_helper.h> 19 #include <drm/dp/drm_dp_helper.h> 20 #include <drm/drm_edid.h> 21 #include <drm/drm_of.h> 22 #include <drm/drm_probe_helper.h> 23 #include <drm/drm_simple_kms_helper.h> 24 25 #include "cdn-dp-core.h" 26 #include "cdn-dp-reg.h" 27 #include "rockchip_drm_vop.h" 28 29 #define connector_to_dp(c) \ 30 container_of(c, struct cdn_dp_device, connector) 31 32 #define encoder_to_dp(c) \ 33 container_of(c, struct cdn_dp_device, encoder) 34 35 #define GRF_SOC_CON9 0x6224 36 #define DP_SEL_VOP_LIT BIT(12) 37 #define GRF_SOC_CON26 0x6268 38 #define DPTX_HPD_SEL (3 << 12) 39 #define DPTX_HPD_DEL (2 << 12) 40 #define DPTX_HPD_SEL_MASK (3 << 28) 41 42 #define CDN_FW_TIMEOUT_MS (64 * 1000) 43 #define CDN_DPCD_TIMEOUT_MS 5000 44 #define CDN_DP_FIRMWARE "rockchip/dptx.bin" 45 MODULE_FIRMWARE(CDN_DP_FIRMWARE); 46 47 struct cdn_dp_data { 48 u8 max_phy; 49 }; 50 51 struct cdn_dp_data rk3399_cdn_dp = { 52 .max_phy = 2, 53 }; 54 55 static const struct of_device_id cdn_dp_dt_ids[] = { 56 { .compatible = "rockchip,rk3399-cdn-dp", 57 .data = (void *)&rk3399_cdn_dp }, 58 {} 59 }; 60 61 MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids); 62 63 static int cdn_dp_grf_write(struct cdn_dp_device *dp, 64 unsigned int reg, unsigned int val) 65 { 66 int ret; 67 68 ret = clk_prepare_enable(dp->grf_clk); 69 if (ret) { 70 DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n"); 71 return ret; 72 } 73 74 ret = regmap_write(dp->grf, reg, val); 75 if (ret) { 76 DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); 77 clk_disable_unprepare(dp->grf_clk); 78 return ret; 79 } 80 81 clk_disable_unprepare(dp->grf_clk); 82 83 return 0; 84 } 85 86 static int cdn_dp_clk_enable(struct cdn_dp_device *dp) 87 { 88 int ret; 89 unsigned long rate; 90 91 ret = clk_prepare_enable(dp->pclk); 92 if (ret < 0) { 93 DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret); 94 goto err_pclk; 95 } 96 97 ret = clk_prepare_enable(dp->core_clk); 98 if (ret < 0) { 99 DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret); 100 goto err_core_clk; 101 } 102 103 ret = pm_runtime_get_sync(dp->dev); 104 if (ret < 0) { 105 DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); 106 goto err_pm_runtime_get; 107 } 108 109 reset_control_assert(dp->core_rst); 110 reset_control_assert(dp->dptx_rst); 111 reset_control_assert(dp->apb_rst); 112 reset_control_deassert(dp->core_rst); 113 reset_control_deassert(dp->dptx_rst); 114 reset_control_deassert(dp->apb_rst); 115 116 rate = clk_get_rate(dp->core_clk); 117 if (!rate) { 118 DRM_DEV_ERROR(dp->dev, "get clk rate failed\n"); 119 ret = -EINVAL; 120 goto err_set_rate; 121 } 122 123 cdn_dp_set_fw_clk(dp, rate); 124 cdn_dp_clock_reset(dp); 125 126 return 0; 127 128 err_set_rate: 129 pm_runtime_put(dp->dev); 130 err_pm_runtime_get: 131 clk_disable_unprepare(dp->core_clk); 132 err_core_clk: 133 clk_disable_unprepare(dp->pclk); 134 err_pclk: 135 return ret; 136 } 137 138 static void cdn_dp_clk_disable(struct cdn_dp_device *dp) 139 { 140 pm_runtime_put_sync(dp->dev); 141 clk_disable_unprepare(dp->pclk); 142 clk_disable_unprepare(dp->core_clk); 143 } 144 145 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port) 146 { 147 struct extcon_dev *edev = port->extcon; 148 union extcon_property_value property; 149 int dptx; 150 u8 lanes; 151 152 dptx = extcon_get_state(edev, EXTCON_DISP_DP); 153 if (dptx > 0) { 154 extcon_get_property(edev, EXTCON_DISP_DP, 155 EXTCON_PROP_USB_SS, &property); 156 if (property.intval) 157 lanes = 2; 158 else 159 lanes = 4; 160 } else { 161 lanes = 0; 162 } 163 164 return lanes; 165 } 166 167 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count) 168 { 169 int ret; 170 u8 value; 171 172 *sink_count = 0; 173 ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1); 174 if (ret) 175 return ret; 176 177 *sink_count = DP_GET_SINK_COUNT(value); 178 return 0; 179 } 180 181 static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) 182 { 183 struct cdn_dp_port *port; 184 int i, lanes; 185 186 for (i = 0; i < dp->ports; i++) { 187 port = dp->port[i]; 188 lanes = cdn_dp_get_port_lanes(port); 189 if (lanes) 190 return port; 191 } 192 return NULL; 193 } 194 195 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) 196 { 197 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); 198 struct cdn_dp_port *port; 199 u8 sink_count = 0; 200 201 if (dp->active_port < 0 || dp->active_port >= dp->ports) { 202 DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n"); 203 return false; 204 } 205 206 port = dp->port[dp->active_port]; 207 208 /* 209 * Attempt to read sink count, retry in case the sink may not be ready. 210 * 211 * Sinks are *supposed* to come up within 1ms from an off state, but 212 * some docks need more time to power up. 213 */ 214 while (time_before(jiffies, timeout)) { 215 if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) 216 return false; 217 218 if (!cdn_dp_get_sink_count(dp, &sink_count)) 219 return sink_count ? true : false; 220 221 usleep_range(5000, 10000); 222 } 223 224 DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); 225 return false; 226 } 227 228 static enum drm_connector_status 229 cdn_dp_connector_detect(struct drm_connector *connector, bool force) 230 { 231 struct cdn_dp_device *dp = connector_to_dp(connector); 232 enum drm_connector_status status = connector_status_disconnected; 233 234 mutex_lock(&dp->lock); 235 if (dp->connected) 236 status = connector_status_connected; 237 mutex_unlock(&dp->lock); 238 239 return status; 240 } 241 242 static void cdn_dp_connector_destroy(struct drm_connector *connector) 243 { 244 drm_connector_unregister(connector); 245 drm_connector_cleanup(connector); 246 } 247 248 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { 249 .detect = cdn_dp_connector_detect, 250 .destroy = cdn_dp_connector_destroy, 251 .fill_modes = drm_helper_probe_single_connector_modes, 252 .reset = drm_atomic_helper_connector_reset, 253 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 254 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 255 }; 256 257 static int cdn_dp_connector_get_modes(struct drm_connector *connector) 258 { 259 struct cdn_dp_device *dp = connector_to_dp(connector); 260 struct edid *edid; 261 int ret = 0; 262 263 mutex_lock(&dp->lock); 264 edid = dp->edid; 265 if (edid) { 266 DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", 267 edid->width_cm, edid->height_cm); 268 269 dp->sink_has_audio = drm_detect_monitor_audio(edid); 270 ret = drm_add_edid_modes(connector, edid); 271 if (ret) 272 drm_connector_update_edid_property(connector, 273 edid); 274 } 275 mutex_unlock(&dp->lock); 276 277 return ret; 278 } 279 280 static int cdn_dp_connector_mode_valid(struct drm_connector *connector, 281 struct drm_display_mode *mode) 282 { 283 struct cdn_dp_device *dp = connector_to_dp(connector); 284 struct drm_display_info *display_info = &dp->connector.display_info; 285 u32 requested, actual, rate, sink_max, source_max = 0; 286 u8 lanes, bpc; 287 288 /* If DP is disconnected, every mode is invalid */ 289 if (!dp->connected) 290 return MODE_BAD; 291 292 switch (display_info->bpc) { 293 case 10: 294 bpc = 10; 295 break; 296 case 6: 297 bpc = 6; 298 break; 299 default: 300 bpc = 8; 301 break; 302 } 303 304 requested = mode->clock * bpc * 3 / 1000; 305 306 source_max = dp->lanes; 307 sink_max = drm_dp_max_lane_count(dp->dpcd); 308 lanes = min(source_max, sink_max); 309 310 source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE); 311 sink_max = drm_dp_max_link_rate(dp->dpcd); 312 rate = min(source_max, sink_max); 313 314 actual = rate * lanes / 100; 315 316 /* efficiency is about 0.8 */ 317 actual = actual * 8 / 10; 318 319 if (requested > actual) { 320 DRM_DEV_DEBUG_KMS(dp->dev, 321 "requested=%d, actual=%d, clock=%d\n", 322 requested, actual, mode->clock); 323 return MODE_CLOCK_HIGH; 324 } 325 326 return MODE_OK; 327 } 328 329 static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { 330 .get_modes = cdn_dp_connector_get_modes, 331 .mode_valid = cdn_dp_connector_mode_valid, 332 }; 333 334 static int cdn_dp_firmware_init(struct cdn_dp_device *dp) 335 { 336 int ret; 337 const u32 *iram_data, *dram_data; 338 const struct firmware *fw = dp->fw; 339 const struct cdn_firmware_header *hdr; 340 341 hdr = (struct cdn_firmware_header *)fw->data; 342 if (fw->size != le32_to_cpu(hdr->size_bytes)) { 343 DRM_DEV_ERROR(dp->dev, "firmware is invalid\n"); 344 return -EINVAL; 345 } 346 347 iram_data = (const u32 *)(fw->data + hdr->header_size); 348 dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size); 349 350 ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size, 351 dram_data, hdr->dram_size); 352 if (ret) 353 return ret; 354 355 ret = cdn_dp_set_firmware_active(dp, true); 356 if (ret) { 357 DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret); 358 return ret; 359 } 360 361 return cdn_dp_event_config(dp); 362 } 363 364 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) 365 { 366 int ret; 367 368 if (!cdn_dp_check_sink_connection(dp)) 369 return -ENODEV; 370 371 ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, 372 DP_RECEIVER_CAP_SIZE); 373 if (ret) { 374 DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); 375 return ret; 376 } 377 378 kfree(dp->edid); 379 dp->edid = drm_do_get_edid(&dp->connector, 380 cdn_dp_get_edid_block, dp); 381 return 0; 382 } 383 384 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) 385 { 386 union extcon_property_value property; 387 int ret; 388 389 if (!port->phy_enabled) { 390 ret = phy_power_on(port->phy); 391 if (ret) { 392 DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n", 393 ret); 394 goto err_phy; 395 } 396 port->phy_enabled = true; 397 } 398 399 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 400 DPTX_HPD_SEL_MASK | DPTX_HPD_SEL); 401 if (ret) { 402 DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret); 403 goto err_power_on; 404 } 405 406 ret = cdn_dp_get_hpd_status(dp); 407 if (ret <= 0) { 408 if (!ret) 409 DRM_DEV_ERROR(dp->dev, "hpd does not exist\n"); 410 goto err_power_on; 411 } 412 413 ret = extcon_get_property(port->extcon, EXTCON_DISP_DP, 414 EXTCON_PROP_USB_TYPEC_POLARITY, &property); 415 if (ret) { 416 DRM_DEV_ERROR(dp->dev, "get property failed\n"); 417 goto err_power_on; 418 } 419 420 port->lanes = cdn_dp_get_port_lanes(port); 421 ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval); 422 if (ret) { 423 DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n", 424 ret); 425 goto err_power_on; 426 } 427 428 dp->active_port = port->id; 429 return 0; 430 431 err_power_on: 432 if (phy_power_off(port->phy)) 433 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 434 else 435 port->phy_enabled = false; 436 437 err_phy: 438 cdn_dp_grf_write(dp, GRF_SOC_CON26, 439 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 440 return ret; 441 } 442 443 static int cdn_dp_disable_phy(struct cdn_dp_device *dp, 444 struct cdn_dp_port *port) 445 { 446 int ret; 447 448 if (port->phy_enabled) { 449 ret = phy_power_off(port->phy); 450 if (ret) { 451 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 452 return ret; 453 } 454 } 455 456 port->phy_enabled = false; 457 port->lanes = 0; 458 dp->active_port = -1; 459 return 0; 460 } 461 462 static int cdn_dp_disable(struct cdn_dp_device *dp) 463 { 464 int ret, i; 465 466 if (!dp->active) 467 return 0; 468 469 for (i = 0; i < dp->ports; i++) 470 cdn_dp_disable_phy(dp, dp->port[i]); 471 472 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 473 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 474 if (ret) { 475 DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n", 476 ret); 477 return ret; 478 } 479 480 cdn_dp_set_firmware_active(dp, false); 481 cdn_dp_clk_disable(dp); 482 dp->active = false; 483 dp->max_lanes = 0; 484 dp->max_rate = 0; 485 if (!dp->connected) { 486 kfree(dp->edid); 487 dp->edid = NULL; 488 } 489 490 return 0; 491 } 492 493 static int cdn_dp_enable(struct cdn_dp_device *dp) 494 { 495 int ret, i, lanes; 496 struct cdn_dp_port *port; 497 498 port = cdn_dp_connected_port(dp); 499 if (!port) { 500 DRM_DEV_ERROR(dp->dev, 501 "Can't enable without connection\n"); 502 return -ENODEV; 503 } 504 505 if (dp->active) 506 return 0; 507 508 ret = cdn_dp_clk_enable(dp); 509 if (ret) 510 return ret; 511 512 ret = cdn_dp_firmware_init(dp); 513 if (ret) { 514 DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret); 515 goto err_clk_disable; 516 } 517 518 /* only enable the port that connected with downstream device */ 519 for (i = port->id; i < dp->ports; i++) { 520 port = dp->port[i]; 521 lanes = cdn_dp_get_port_lanes(port); 522 if (lanes) { 523 ret = cdn_dp_enable_phy(dp, port); 524 if (ret) 525 continue; 526 527 ret = cdn_dp_get_sink_capability(dp); 528 if (ret) { 529 cdn_dp_disable_phy(dp, port); 530 } else { 531 dp->active = true; 532 dp->lanes = port->lanes; 533 return 0; 534 } 535 } 536 } 537 538 err_clk_disable: 539 cdn_dp_clk_disable(dp); 540 return ret; 541 } 542 543 static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, 544 struct drm_display_mode *mode, 545 struct drm_display_mode *adjusted) 546 { 547 struct cdn_dp_device *dp = encoder_to_dp(encoder); 548 struct drm_display_info *display_info = &dp->connector.display_info; 549 struct video_info *video = &dp->video_info; 550 551 switch (display_info->bpc) { 552 case 10: 553 video->color_depth = 10; 554 break; 555 case 6: 556 video->color_depth = 6; 557 break; 558 default: 559 video->color_depth = 8; 560 break; 561 } 562 563 video->color_fmt = PXL_RGB; 564 video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); 565 video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); 566 567 memcpy(&dp->mode, adjusted, sizeof(*mode)); 568 } 569 570 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) 571 { 572 u8 link_status[DP_LINK_STATUS_SIZE]; 573 struct cdn_dp_port *port = cdn_dp_connected_port(dp); 574 u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); 575 576 if (!port || !dp->max_rate || !dp->max_lanes) 577 return false; 578 579 if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, 580 DP_LINK_STATUS_SIZE)) { 581 DRM_ERROR("Failed to get link status\n"); 582 return false; 583 } 584 585 /* if link training is requested we should perform it always */ 586 return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); 587 } 588 589 static void cdn_dp_encoder_enable(struct drm_encoder *encoder) 590 { 591 struct cdn_dp_device *dp = encoder_to_dp(encoder); 592 int ret, val; 593 594 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 595 if (ret < 0) { 596 DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); 597 return; 598 } 599 600 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 601 (ret) ? "LIT" : "BIG"); 602 if (ret) 603 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 604 else 605 val = DP_SEL_VOP_LIT << 16; 606 607 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 608 if (ret) 609 return; 610 611 mutex_lock(&dp->lock); 612 613 ret = cdn_dp_enable(dp); 614 if (ret) { 615 DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", 616 ret); 617 goto out; 618 } 619 if (!cdn_dp_check_link_status(dp)) { 620 ret = cdn_dp_train_link(dp); 621 if (ret) { 622 DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret); 623 goto out; 624 } 625 } 626 627 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE); 628 if (ret) { 629 DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret); 630 goto out; 631 } 632 633 ret = cdn_dp_config_video(dp); 634 if (ret) { 635 DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); 636 goto out; 637 } 638 639 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID); 640 if (ret) { 641 DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret); 642 goto out; 643 } 644 out: 645 mutex_unlock(&dp->lock); 646 } 647 648 static void cdn_dp_encoder_disable(struct drm_encoder *encoder) 649 { 650 struct cdn_dp_device *dp = encoder_to_dp(encoder); 651 int ret; 652 653 mutex_lock(&dp->lock); 654 if (dp->active) { 655 ret = cdn_dp_disable(dp); 656 if (ret) { 657 DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", 658 ret); 659 } 660 } 661 mutex_unlock(&dp->lock); 662 663 /* 664 * In the following 2 cases, we need to run the event_work to re-enable 665 * the DP: 666 * 1. If there is not just one port device is connected, and remove one 667 * device from a port, the DP will be disabled here, at this case, 668 * run the event_work to re-open DP for the other port. 669 * 2. If re-training or re-config failed, the DP will be disabled here. 670 * run the event_work to re-connect it. 671 */ 672 if (!dp->connected && cdn_dp_connected_port(dp)) 673 schedule_work(&dp->event_work); 674 } 675 676 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, 677 struct drm_crtc_state *crtc_state, 678 struct drm_connector_state *conn_state) 679 { 680 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 681 682 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 683 s->output_type = DRM_MODE_CONNECTOR_DisplayPort; 684 685 return 0; 686 } 687 688 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { 689 .mode_set = cdn_dp_encoder_mode_set, 690 .enable = cdn_dp_encoder_enable, 691 .disable = cdn_dp_encoder_disable, 692 .atomic_check = cdn_dp_encoder_atomic_check, 693 }; 694 695 static int cdn_dp_parse_dt(struct cdn_dp_device *dp) 696 { 697 struct device *dev = dp->dev; 698 struct device_node *np = dev->of_node; 699 struct platform_device *pdev = to_platform_device(dev); 700 701 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 702 if (IS_ERR(dp->grf)) { 703 DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n"); 704 return PTR_ERR(dp->grf); 705 } 706 707 dp->regs = devm_platform_ioremap_resource(pdev, 0); 708 if (IS_ERR(dp->regs)) { 709 DRM_DEV_ERROR(dev, "ioremap reg failed\n"); 710 return PTR_ERR(dp->regs); 711 } 712 713 dp->core_clk = devm_clk_get(dev, "core-clk"); 714 if (IS_ERR(dp->core_clk)) { 715 DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n"); 716 return PTR_ERR(dp->core_clk); 717 } 718 719 dp->pclk = devm_clk_get(dev, "pclk"); 720 if (IS_ERR(dp->pclk)) { 721 DRM_DEV_ERROR(dev, "cannot get pclk\n"); 722 return PTR_ERR(dp->pclk); 723 } 724 725 dp->spdif_clk = devm_clk_get(dev, "spdif"); 726 if (IS_ERR(dp->spdif_clk)) { 727 DRM_DEV_ERROR(dev, "cannot get spdif_clk\n"); 728 return PTR_ERR(dp->spdif_clk); 729 } 730 731 dp->grf_clk = devm_clk_get(dev, "grf"); 732 if (IS_ERR(dp->grf_clk)) { 733 DRM_DEV_ERROR(dev, "cannot get grf clk\n"); 734 return PTR_ERR(dp->grf_clk); 735 } 736 737 dp->spdif_rst = devm_reset_control_get(dev, "spdif"); 738 if (IS_ERR(dp->spdif_rst)) { 739 DRM_DEV_ERROR(dev, "no spdif reset control found\n"); 740 return PTR_ERR(dp->spdif_rst); 741 } 742 743 dp->dptx_rst = devm_reset_control_get(dev, "dptx"); 744 if (IS_ERR(dp->dptx_rst)) { 745 DRM_DEV_ERROR(dev, "no uphy reset control found\n"); 746 return PTR_ERR(dp->dptx_rst); 747 } 748 749 dp->core_rst = devm_reset_control_get(dev, "core"); 750 if (IS_ERR(dp->core_rst)) { 751 DRM_DEV_ERROR(dev, "no core reset control found\n"); 752 return PTR_ERR(dp->core_rst); 753 } 754 755 dp->apb_rst = devm_reset_control_get(dev, "apb"); 756 if (IS_ERR(dp->apb_rst)) { 757 DRM_DEV_ERROR(dev, "no apb reset control found\n"); 758 return PTR_ERR(dp->apb_rst); 759 } 760 761 return 0; 762 } 763 764 static int cdn_dp_audio_hw_params(struct device *dev, void *data, 765 struct hdmi_codec_daifmt *daifmt, 766 struct hdmi_codec_params *params) 767 { 768 struct cdn_dp_device *dp = dev_get_drvdata(dev); 769 struct audio_info audio = { 770 .sample_width = params->sample_width, 771 .sample_rate = params->sample_rate, 772 .channels = params->channels, 773 }; 774 int ret; 775 776 mutex_lock(&dp->lock); 777 if (!dp->active) { 778 ret = -ENODEV; 779 goto out; 780 } 781 782 switch (daifmt->fmt) { 783 case HDMI_I2S: 784 audio.format = AFMT_I2S; 785 break; 786 case HDMI_SPDIF: 787 audio.format = AFMT_SPDIF; 788 break; 789 default: 790 DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); 791 ret = -EINVAL; 792 goto out; 793 } 794 795 ret = cdn_dp_audio_config(dp, &audio); 796 if (!ret) 797 dp->audio_info = audio; 798 799 out: 800 mutex_unlock(&dp->lock); 801 return ret; 802 } 803 804 static void cdn_dp_audio_shutdown(struct device *dev, void *data) 805 { 806 struct cdn_dp_device *dp = dev_get_drvdata(dev); 807 int ret; 808 809 mutex_lock(&dp->lock); 810 if (!dp->active) 811 goto out; 812 813 ret = cdn_dp_audio_stop(dp, &dp->audio_info); 814 if (!ret) 815 dp->audio_info.format = AFMT_UNUSED; 816 out: 817 mutex_unlock(&dp->lock); 818 } 819 820 static int cdn_dp_audio_mute_stream(struct device *dev, void *data, 821 bool enable, int direction) 822 { 823 struct cdn_dp_device *dp = dev_get_drvdata(dev); 824 int ret; 825 826 mutex_lock(&dp->lock); 827 if (!dp->active) { 828 ret = -ENODEV; 829 goto out; 830 } 831 832 ret = cdn_dp_audio_mute(dp, enable); 833 834 out: 835 mutex_unlock(&dp->lock); 836 return ret; 837 } 838 839 static int cdn_dp_audio_get_eld(struct device *dev, void *data, 840 u8 *buf, size_t len) 841 { 842 struct cdn_dp_device *dp = dev_get_drvdata(dev); 843 844 memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); 845 846 return 0; 847 } 848 849 static const struct hdmi_codec_ops audio_codec_ops = { 850 .hw_params = cdn_dp_audio_hw_params, 851 .audio_shutdown = cdn_dp_audio_shutdown, 852 .mute_stream = cdn_dp_audio_mute_stream, 853 .get_eld = cdn_dp_audio_get_eld, 854 .no_capture_mute = 1, 855 }; 856 857 static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, 858 struct device *dev) 859 { 860 struct hdmi_codec_pdata codec_data = { 861 .i2s = 1, 862 .spdif = 1, 863 .ops = &audio_codec_ops, 864 .max_i2s_channels = 8, 865 }; 866 867 dp->audio_pdev = platform_device_register_data( 868 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, 869 &codec_data, sizeof(codec_data)); 870 871 return PTR_ERR_OR_ZERO(dp->audio_pdev); 872 } 873 874 static int cdn_dp_request_firmware(struct cdn_dp_device *dp) 875 { 876 int ret; 877 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS); 878 unsigned long sleep = 1000; 879 880 WARN_ON(!mutex_is_locked(&dp->lock)); 881 882 if (dp->fw_loaded) 883 return 0; 884 885 /* Drop the lock before getting the firmware to avoid blocking boot */ 886 mutex_unlock(&dp->lock); 887 888 while (time_before(jiffies, timeout)) { 889 ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev); 890 if (ret == -ENOENT) { 891 msleep(sleep); 892 sleep *= 2; 893 continue; 894 } else if (ret) { 895 DRM_DEV_ERROR(dp->dev, 896 "failed to request firmware: %d\n", ret); 897 goto out; 898 } 899 900 dp->fw_loaded = true; 901 ret = 0; 902 goto out; 903 } 904 905 DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n"); 906 ret = -ETIMEDOUT; 907 out: 908 mutex_lock(&dp->lock); 909 return ret; 910 } 911 912 static void cdn_dp_pd_event_work(struct work_struct *work) 913 { 914 struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, 915 event_work); 916 struct drm_connector *connector = &dp->connector; 917 enum drm_connector_status old_status; 918 919 int ret; 920 921 mutex_lock(&dp->lock); 922 923 if (dp->suspended) 924 goto out; 925 926 ret = cdn_dp_request_firmware(dp); 927 if (ret) 928 goto out; 929 930 dp->connected = true; 931 932 /* Not connected, notify userspace to disable the block */ 933 if (!cdn_dp_connected_port(dp)) { 934 DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n"); 935 dp->connected = false; 936 937 /* Connected but not enabled, enable the block */ 938 } else if (!dp->active) { 939 DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n"); 940 ret = cdn_dp_enable(dp); 941 if (ret) { 942 DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret); 943 dp->connected = false; 944 } 945 946 /* Enabled and connected to a dongle without a sink, notify userspace */ 947 } else if (!cdn_dp_check_sink_connection(dp)) { 948 DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); 949 dp->connected = false; 950 951 /* Enabled and connected with a sink, re-train if requested */ 952 } else if (!cdn_dp_check_link_status(dp)) { 953 unsigned int rate = dp->max_rate; 954 unsigned int lanes = dp->max_lanes; 955 struct drm_display_mode *mode = &dp->mode; 956 957 DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); 958 ret = cdn_dp_train_link(dp); 959 if (ret) { 960 dp->connected = false; 961 DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret); 962 goto out; 963 } 964 965 /* If training result is changed, update the video config */ 966 if (mode->clock && 967 (rate != dp->max_rate || lanes != dp->max_lanes)) { 968 ret = cdn_dp_config_video(dp); 969 if (ret) { 970 dp->connected = false; 971 DRM_DEV_ERROR(dp->dev, 972 "Failed to config video %d\n", 973 ret); 974 } 975 } 976 } 977 978 out: 979 mutex_unlock(&dp->lock); 980 981 old_status = connector->status; 982 connector->status = connector->funcs->detect(connector, false); 983 if (old_status != connector->status) 984 drm_kms_helper_hotplug_event(dp->drm_dev); 985 } 986 987 static int cdn_dp_pd_event(struct notifier_block *nb, 988 unsigned long event, void *priv) 989 { 990 struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port, 991 event_nb); 992 struct cdn_dp_device *dp = port->dp; 993 994 /* 995 * It would be nice to be able to just do the work inline right here. 996 * However, we need to make a bunch of calls that might sleep in order 997 * to turn on the block/phy, so use a worker instead. 998 */ 999 schedule_work(&dp->event_work); 1000 1001 return NOTIFY_DONE; 1002 } 1003 1004 static int cdn_dp_bind(struct device *dev, struct device *master, void *data) 1005 { 1006 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1007 struct drm_encoder *encoder; 1008 struct drm_connector *connector; 1009 struct cdn_dp_port *port; 1010 struct drm_device *drm_dev = data; 1011 int ret, i; 1012 1013 ret = cdn_dp_parse_dt(dp); 1014 if (ret < 0) 1015 return ret; 1016 1017 dp->drm_dev = drm_dev; 1018 dp->connected = false; 1019 dp->active = false; 1020 dp->active_port = -1; 1021 dp->fw_loaded = false; 1022 1023 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); 1024 1025 encoder = &dp->encoder; 1026 1027 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, 1028 dev->of_node); 1029 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1030 1031 ret = drm_simple_encoder_init(drm_dev, encoder, 1032 DRM_MODE_ENCODER_TMDS); 1033 if (ret) { 1034 DRM_ERROR("failed to initialize encoder with drm\n"); 1035 return ret; 1036 } 1037 1038 drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); 1039 1040 connector = &dp->connector; 1041 connector->polled = DRM_CONNECTOR_POLL_HPD; 1042 connector->dpms = DRM_MODE_DPMS_OFF; 1043 1044 ret = drm_connector_init(drm_dev, connector, 1045 &cdn_dp_atomic_connector_funcs, 1046 DRM_MODE_CONNECTOR_DisplayPort); 1047 if (ret) { 1048 DRM_ERROR("failed to initialize connector with drm\n"); 1049 goto err_free_encoder; 1050 } 1051 1052 drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); 1053 1054 ret = drm_connector_attach_encoder(connector, encoder); 1055 if (ret) { 1056 DRM_ERROR("failed to attach connector and encoder\n"); 1057 goto err_free_connector; 1058 } 1059 1060 for (i = 0; i < dp->ports; i++) { 1061 port = dp->port[i]; 1062 1063 port->event_nb.notifier_call = cdn_dp_pd_event; 1064 ret = devm_extcon_register_notifier(dp->dev, port->extcon, 1065 EXTCON_DISP_DP, 1066 &port->event_nb); 1067 if (ret) { 1068 DRM_DEV_ERROR(dev, 1069 "register EXTCON_DISP_DP notifier err\n"); 1070 goto err_free_connector; 1071 } 1072 } 1073 1074 pm_runtime_enable(dev); 1075 1076 schedule_work(&dp->event_work); 1077 1078 return 0; 1079 1080 err_free_connector: 1081 drm_connector_cleanup(connector); 1082 err_free_encoder: 1083 drm_encoder_cleanup(encoder); 1084 return ret; 1085 } 1086 1087 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) 1088 { 1089 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1090 struct drm_encoder *encoder = &dp->encoder; 1091 struct drm_connector *connector = &dp->connector; 1092 1093 cancel_work_sync(&dp->event_work); 1094 cdn_dp_encoder_disable(encoder); 1095 encoder->funcs->destroy(encoder); 1096 connector->funcs->destroy(connector); 1097 1098 pm_runtime_disable(dev); 1099 if (dp->fw_loaded) 1100 release_firmware(dp->fw); 1101 kfree(dp->edid); 1102 dp->edid = NULL; 1103 } 1104 1105 static const struct component_ops cdn_dp_component_ops = { 1106 .bind = cdn_dp_bind, 1107 .unbind = cdn_dp_unbind, 1108 }; 1109 1110 static int cdn_dp_suspend(struct device *dev) 1111 { 1112 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1113 int ret = 0; 1114 1115 mutex_lock(&dp->lock); 1116 if (dp->active) 1117 ret = cdn_dp_disable(dp); 1118 dp->suspended = true; 1119 mutex_unlock(&dp->lock); 1120 1121 return ret; 1122 } 1123 1124 static __maybe_unused int cdn_dp_resume(struct device *dev) 1125 { 1126 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1127 1128 mutex_lock(&dp->lock); 1129 dp->suspended = false; 1130 if (dp->fw_loaded) 1131 schedule_work(&dp->event_work); 1132 mutex_unlock(&dp->lock); 1133 1134 return 0; 1135 } 1136 1137 static int cdn_dp_probe(struct platform_device *pdev) 1138 { 1139 struct device *dev = &pdev->dev; 1140 const struct of_device_id *match; 1141 struct cdn_dp_data *dp_data; 1142 struct cdn_dp_port *port; 1143 struct cdn_dp_device *dp; 1144 struct extcon_dev *extcon; 1145 struct phy *phy; 1146 int i; 1147 1148 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 1149 if (!dp) 1150 return -ENOMEM; 1151 dp->dev = dev; 1152 1153 match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); 1154 dp_data = (struct cdn_dp_data *)match->data; 1155 1156 for (i = 0; i < dp_data->max_phy; i++) { 1157 extcon = extcon_get_edev_by_phandle(dev, i); 1158 phy = devm_of_phy_get_by_index(dev, dev->of_node, i); 1159 1160 if (PTR_ERR(extcon) == -EPROBE_DEFER || 1161 PTR_ERR(phy) == -EPROBE_DEFER) 1162 return -EPROBE_DEFER; 1163 1164 if (IS_ERR(extcon) || IS_ERR(phy)) 1165 continue; 1166 1167 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1168 if (!port) 1169 return -ENOMEM; 1170 1171 port->extcon = extcon; 1172 port->phy = phy; 1173 port->dp = dp; 1174 port->id = i; 1175 dp->port[dp->ports++] = port; 1176 } 1177 1178 if (!dp->ports) { 1179 DRM_DEV_ERROR(dev, "missing extcon or phy\n"); 1180 return -EINVAL; 1181 } 1182 1183 mutex_init(&dp->lock); 1184 dev_set_drvdata(dev, dp); 1185 1186 cdn_dp_audio_codec_init(dp, dev); 1187 1188 return component_add(dev, &cdn_dp_component_ops); 1189 } 1190 1191 static int cdn_dp_remove(struct platform_device *pdev) 1192 { 1193 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1194 1195 platform_device_unregister(dp->audio_pdev); 1196 cdn_dp_suspend(dp->dev); 1197 component_del(&pdev->dev, &cdn_dp_component_ops); 1198 1199 return 0; 1200 } 1201 1202 static void cdn_dp_shutdown(struct platform_device *pdev) 1203 { 1204 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1205 1206 cdn_dp_suspend(dp->dev); 1207 } 1208 1209 static const struct dev_pm_ops cdn_dp_pm_ops = { 1210 SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend, 1211 cdn_dp_resume) 1212 }; 1213 1214 struct platform_driver cdn_dp_driver = { 1215 .probe = cdn_dp_probe, 1216 .remove = cdn_dp_remove, 1217 .shutdown = cdn_dp_shutdown, 1218 .driver = { 1219 .name = "cdn-dp", 1220 .owner = THIS_MODULE, 1221 .of_match_table = of_match_ptr(cdn_dp_dt_ids), 1222 .pm = &cdn_dp_pm_ops, 1223 }, 1224 }; 1225