1 /* 2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Author: Chris Zhong <zyw@rock-chips.com> 4 * 5 * This software is licensed under the terms of the GNU General Public 6 * License version 2, as published by the Free Software Foundation, and 7 * may be copied, distributed, and modified under those terms. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <drm/drmP.h> 16 #include <drm/drm_atomic_helper.h> 17 #include <drm/drm_crtc_helper.h> 18 #include <drm/drm_dp_helper.h> 19 #include <drm/drm_edid.h> 20 #include <drm/drm_of.h> 21 22 #include <linux/clk.h> 23 #include <linux/component.h> 24 #include <linux/extcon.h> 25 #include <linux/firmware.h> 26 #include <linux/regmap.h> 27 #include <linux/reset.h> 28 #include <linux/mfd/syscon.h> 29 #include <linux/phy/phy.h> 30 31 #include <sound/hdmi-codec.h> 32 33 #include "cdn-dp-core.h" 34 #include "cdn-dp-reg.h" 35 #include "rockchip_drm_vop.h" 36 37 #define connector_to_dp(c) \ 38 container_of(c, struct cdn_dp_device, connector) 39 40 #define encoder_to_dp(c) \ 41 container_of(c, struct cdn_dp_device, encoder) 42 43 #define GRF_SOC_CON9 0x6224 44 #define DP_SEL_VOP_LIT BIT(12) 45 #define GRF_SOC_CON26 0x6268 46 #define DPTX_HPD_SEL (3 << 12) 47 #define DPTX_HPD_DEL (2 << 12) 48 #define DPTX_HPD_SEL_MASK (3 << 28) 49 50 #define CDN_FW_TIMEOUT_MS (64 * 1000) 51 #define CDN_DPCD_TIMEOUT_MS 5000 52 #define CDN_DP_FIRMWARE "rockchip/dptx.bin" 53 54 struct cdn_dp_data { 55 u8 max_phy; 56 }; 57 58 struct cdn_dp_data rk3399_cdn_dp = { 59 .max_phy = 2, 60 }; 61 62 static const struct of_device_id cdn_dp_dt_ids[] = { 63 { .compatible = "rockchip,rk3399-cdn-dp", 64 .data = (void *)&rk3399_cdn_dp }, 65 {} 66 }; 67 68 MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids); 69 70 static int cdn_dp_grf_write(struct cdn_dp_device *dp, 71 unsigned int reg, unsigned int val) 72 { 73 int ret; 74 75 ret = clk_prepare_enable(dp->grf_clk); 76 if (ret) { 77 DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n"); 78 return ret; 79 } 80 81 ret = regmap_write(dp->grf, reg, val); 82 if (ret) { 83 DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); 84 return ret; 85 } 86 87 clk_disable_unprepare(dp->grf_clk); 88 89 return 0; 90 } 91 92 static int cdn_dp_clk_enable(struct cdn_dp_device *dp) 93 { 94 int ret; 95 unsigned long rate; 96 97 ret = clk_prepare_enable(dp->pclk); 98 if (ret < 0) { 99 DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret); 100 goto err_pclk; 101 } 102 103 ret = clk_prepare_enable(dp->core_clk); 104 if (ret < 0) { 105 DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret); 106 goto err_core_clk; 107 } 108 109 ret = pm_runtime_get_sync(dp->dev); 110 if (ret < 0) { 111 DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); 112 goto err_pm_runtime_get; 113 } 114 115 reset_control_assert(dp->core_rst); 116 reset_control_assert(dp->dptx_rst); 117 reset_control_assert(dp->apb_rst); 118 reset_control_deassert(dp->core_rst); 119 reset_control_deassert(dp->dptx_rst); 120 reset_control_deassert(dp->apb_rst); 121 122 rate = clk_get_rate(dp->core_clk); 123 if (!rate) { 124 DRM_DEV_ERROR(dp->dev, "get clk rate failed\n"); 125 ret = -EINVAL; 126 goto err_set_rate; 127 } 128 129 cdn_dp_set_fw_clk(dp, rate); 130 cdn_dp_clock_reset(dp); 131 132 return 0; 133 134 err_set_rate: 135 pm_runtime_put(dp->dev); 136 err_pm_runtime_get: 137 clk_disable_unprepare(dp->core_clk); 138 err_core_clk: 139 clk_disable_unprepare(dp->pclk); 140 err_pclk: 141 return ret; 142 } 143 144 static void cdn_dp_clk_disable(struct cdn_dp_device *dp) 145 { 146 pm_runtime_put_sync(dp->dev); 147 clk_disable_unprepare(dp->pclk); 148 clk_disable_unprepare(dp->core_clk); 149 } 150 151 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port) 152 { 153 struct extcon_dev *edev = port->extcon; 154 union extcon_property_value property; 155 int dptx; 156 u8 lanes; 157 158 dptx = extcon_get_state(edev, EXTCON_DISP_DP); 159 if (dptx > 0) { 160 extcon_get_property(edev, EXTCON_DISP_DP, 161 EXTCON_PROP_USB_SS, &property); 162 if (property.intval) 163 lanes = 2; 164 else 165 lanes = 4; 166 } else { 167 lanes = 0; 168 } 169 170 return lanes; 171 } 172 173 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count) 174 { 175 int ret; 176 u8 value; 177 178 *sink_count = 0; 179 ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1); 180 if (ret) 181 return ret; 182 183 *sink_count = DP_GET_SINK_COUNT(value); 184 return 0; 185 } 186 187 static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) 188 { 189 struct cdn_dp_port *port; 190 int i, lanes; 191 192 for (i = 0; i < dp->ports; i++) { 193 port = dp->port[i]; 194 lanes = cdn_dp_get_port_lanes(port); 195 if (lanes) 196 return port; 197 } 198 return NULL; 199 } 200 201 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) 202 { 203 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); 204 struct cdn_dp_port *port; 205 u8 sink_count = 0; 206 207 if (dp->active_port < 0 || dp->active_port >= dp->ports) { 208 DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n"); 209 return false; 210 } 211 212 port = dp->port[dp->active_port]; 213 214 /* 215 * Attempt to read sink count, retry in case the sink may not be ready. 216 * 217 * Sinks are *supposed* to come up within 1ms from an off state, but 218 * some docks need more time to power up. 219 */ 220 while (time_before(jiffies, timeout)) { 221 if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) 222 return false; 223 224 if (!cdn_dp_get_sink_count(dp, &sink_count)) 225 return sink_count ? true : false; 226 227 usleep_range(5000, 10000); 228 } 229 230 DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); 231 return false; 232 } 233 234 static enum drm_connector_status 235 cdn_dp_connector_detect(struct drm_connector *connector, bool force) 236 { 237 struct cdn_dp_device *dp = connector_to_dp(connector); 238 enum drm_connector_status status = connector_status_disconnected; 239 240 mutex_lock(&dp->lock); 241 if (dp->connected) 242 status = connector_status_connected; 243 mutex_unlock(&dp->lock); 244 245 return status; 246 } 247 248 static void cdn_dp_connector_destroy(struct drm_connector *connector) 249 { 250 drm_connector_unregister(connector); 251 drm_connector_cleanup(connector); 252 } 253 254 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { 255 .detect = cdn_dp_connector_detect, 256 .destroy = cdn_dp_connector_destroy, 257 .fill_modes = drm_helper_probe_single_connector_modes, 258 .reset = drm_atomic_helper_connector_reset, 259 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 260 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 261 }; 262 263 static int cdn_dp_connector_get_modes(struct drm_connector *connector) 264 { 265 struct cdn_dp_device *dp = connector_to_dp(connector); 266 struct edid *edid; 267 int ret = 0; 268 269 mutex_lock(&dp->lock); 270 edid = dp->edid; 271 if (edid) { 272 DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", 273 edid->width_cm, edid->height_cm); 274 275 dp->sink_has_audio = drm_detect_monitor_audio(edid); 276 ret = drm_add_edid_modes(connector, edid); 277 if (ret) 278 drm_connector_update_edid_property(connector, 279 edid); 280 } 281 mutex_unlock(&dp->lock); 282 283 return ret; 284 } 285 286 static int cdn_dp_connector_mode_valid(struct drm_connector *connector, 287 struct drm_display_mode *mode) 288 { 289 struct cdn_dp_device *dp = connector_to_dp(connector); 290 struct drm_display_info *display_info = &dp->connector.display_info; 291 u32 requested, actual, rate, sink_max, source_max = 0; 292 u8 lanes, bpc; 293 294 /* If DP is disconnected, every mode is invalid */ 295 if (!dp->connected) 296 return MODE_BAD; 297 298 switch (display_info->bpc) { 299 case 10: 300 bpc = 10; 301 break; 302 case 6: 303 bpc = 6; 304 break; 305 default: 306 bpc = 8; 307 break; 308 } 309 310 requested = mode->clock * bpc * 3 / 1000; 311 312 source_max = dp->lanes; 313 sink_max = drm_dp_max_lane_count(dp->dpcd); 314 lanes = min(source_max, sink_max); 315 316 source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE); 317 sink_max = drm_dp_max_link_rate(dp->dpcd); 318 rate = min(source_max, sink_max); 319 320 actual = rate * lanes / 100; 321 322 /* efficiency is about 0.8 */ 323 actual = actual * 8 / 10; 324 325 if (requested > actual) { 326 DRM_DEV_DEBUG_KMS(dp->dev, 327 "requested=%d, actual=%d, clock=%d\n", 328 requested, actual, mode->clock); 329 return MODE_CLOCK_HIGH; 330 } 331 332 return MODE_OK; 333 } 334 335 static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { 336 .get_modes = cdn_dp_connector_get_modes, 337 .mode_valid = cdn_dp_connector_mode_valid, 338 }; 339 340 static int cdn_dp_firmware_init(struct cdn_dp_device *dp) 341 { 342 int ret; 343 const u32 *iram_data, *dram_data; 344 const struct firmware *fw = dp->fw; 345 const struct cdn_firmware_header *hdr; 346 347 hdr = (struct cdn_firmware_header *)fw->data; 348 if (fw->size != le32_to_cpu(hdr->size_bytes)) { 349 DRM_DEV_ERROR(dp->dev, "firmware is invalid\n"); 350 return -EINVAL; 351 } 352 353 iram_data = (const u32 *)(fw->data + hdr->header_size); 354 dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size); 355 356 ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size, 357 dram_data, hdr->dram_size); 358 if (ret) 359 return ret; 360 361 ret = cdn_dp_set_firmware_active(dp, true); 362 if (ret) { 363 DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret); 364 return ret; 365 } 366 367 return cdn_dp_event_config(dp); 368 } 369 370 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) 371 { 372 int ret; 373 374 if (!cdn_dp_check_sink_connection(dp)) 375 return -ENODEV; 376 377 ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, 378 DP_RECEIVER_CAP_SIZE); 379 if (ret) { 380 DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); 381 return ret; 382 } 383 384 kfree(dp->edid); 385 dp->edid = drm_do_get_edid(&dp->connector, 386 cdn_dp_get_edid_block, dp); 387 return 0; 388 } 389 390 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) 391 { 392 union extcon_property_value property; 393 int ret; 394 395 if (!port->phy_enabled) { 396 ret = phy_power_on(port->phy); 397 if (ret) { 398 DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n", 399 ret); 400 goto err_phy; 401 } 402 port->phy_enabled = true; 403 } 404 405 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 406 DPTX_HPD_SEL_MASK | DPTX_HPD_SEL); 407 if (ret) { 408 DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret); 409 goto err_power_on; 410 } 411 412 ret = cdn_dp_get_hpd_status(dp); 413 if (ret <= 0) { 414 if (!ret) 415 DRM_DEV_ERROR(dp->dev, "hpd does not exist\n"); 416 goto err_power_on; 417 } 418 419 ret = extcon_get_property(port->extcon, EXTCON_DISP_DP, 420 EXTCON_PROP_USB_TYPEC_POLARITY, &property); 421 if (ret) { 422 DRM_DEV_ERROR(dp->dev, "get property failed\n"); 423 goto err_power_on; 424 } 425 426 port->lanes = cdn_dp_get_port_lanes(port); 427 ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval); 428 if (ret) { 429 DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n", 430 ret); 431 goto err_power_on; 432 } 433 434 dp->active_port = port->id; 435 return 0; 436 437 err_power_on: 438 if (phy_power_off(port->phy)) 439 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 440 else 441 port->phy_enabled = false; 442 443 err_phy: 444 cdn_dp_grf_write(dp, GRF_SOC_CON26, 445 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 446 return ret; 447 } 448 449 static int cdn_dp_disable_phy(struct cdn_dp_device *dp, 450 struct cdn_dp_port *port) 451 { 452 int ret; 453 454 if (port->phy_enabled) { 455 ret = phy_power_off(port->phy); 456 if (ret) { 457 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 458 return ret; 459 } 460 } 461 462 port->phy_enabled = false; 463 port->lanes = 0; 464 dp->active_port = -1; 465 return 0; 466 } 467 468 static int cdn_dp_disable(struct cdn_dp_device *dp) 469 { 470 int ret, i; 471 472 if (!dp->active) 473 return 0; 474 475 for (i = 0; i < dp->ports; i++) 476 cdn_dp_disable_phy(dp, dp->port[i]); 477 478 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 479 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 480 if (ret) { 481 DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n", 482 ret); 483 return ret; 484 } 485 486 cdn_dp_set_firmware_active(dp, false); 487 cdn_dp_clk_disable(dp); 488 dp->active = false; 489 dp->link.rate = 0; 490 dp->link.num_lanes = 0; 491 if (!dp->connected) { 492 kfree(dp->edid); 493 dp->edid = NULL; 494 } 495 496 return 0; 497 } 498 499 static int cdn_dp_enable(struct cdn_dp_device *dp) 500 { 501 int ret, i, lanes; 502 struct cdn_dp_port *port; 503 504 port = cdn_dp_connected_port(dp); 505 if (!port) { 506 DRM_DEV_ERROR(dp->dev, 507 "Can't enable without connection\n"); 508 return -ENODEV; 509 } 510 511 if (dp->active) 512 return 0; 513 514 ret = cdn_dp_clk_enable(dp); 515 if (ret) 516 return ret; 517 518 ret = cdn_dp_firmware_init(dp); 519 if (ret) { 520 DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret); 521 goto err_clk_disable; 522 } 523 524 /* only enable the port that connected with downstream device */ 525 for (i = port->id; i < dp->ports; i++) { 526 port = dp->port[i]; 527 lanes = cdn_dp_get_port_lanes(port); 528 if (lanes) { 529 ret = cdn_dp_enable_phy(dp, port); 530 if (ret) 531 continue; 532 533 ret = cdn_dp_get_sink_capability(dp); 534 if (ret) { 535 cdn_dp_disable_phy(dp, port); 536 } else { 537 dp->active = true; 538 dp->lanes = port->lanes; 539 return 0; 540 } 541 } 542 } 543 544 err_clk_disable: 545 cdn_dp_clk_disable(dp); 546 return ret; 547 } 548 549 static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, 550 struct drm_display_mode *mode, 551 struct drm_display_mode *adjusted) 552 { 553 struct cdn_dp_device *dp = encoder_to_dp(encoder); 554 struct drm_display_info *display_info = &dp->connector.display_info; 555 struct video_info *video = &dp->video_info; 556 557 switch (display_info->bpc) { 558 case 10: 559 video->color_depth = 10; 560 break; 561 case 6: 562 video->color_depth = 6; 563 break; 564 default: 565 video->color_depth = 8; 566 break; 567 } 568 569 video->color_fmt = PXL_RGB; 570 video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); 571 video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); 572 573 memcpy(&dp->mode, adjusted, sizeof(*mode)); 574 } 575 576 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) 577 { 578 u8 link_status[DP_LINK_STATUS_SIZE]; 579 struct cdn_dp_port *port = cdn_dp_connected_port(dp); 580 u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); 581 582 if (!port || !dp->link.rate || !dp->link.num_lanes) 583 return false; 584 585 if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, 586 DP_LINK_STATUS_SIZE)) { 587 DRM_ERROR("Failed to get link status\n"); 588 return false; 589 } 590 591 /* if link training is requested we should perform it always */ 592 return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); 593 } 594 595 static void cdn_dp_encoder_enable(struct drm_encoder *encoder) 596 { 597 struct cdn_dp_device *dp = encoder_to_dp(encoder); 598 int ret, val; 599 600 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 601 if (ret < 0) { 602 DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); 603 return; 604 } 605 606 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 607 (ret) ? "LIT" : "BIG"); 608 if (ret) 609 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 610 else 611 val = DP_SEL_VOP_LIT << 16; 612 613 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 614 if (ret) 615 return; 616 617 mutex_lock(&dp->lock); 618 619 ret = cdn_dp_enable(dp); 620 if (ret) { 621 DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", 622 ret); 623 goto out; 624 } 625 if (!cdn_dp_check_link_status(dp)) { 626 ret = cdn_dp_train_link(dp); 627 if (ret) { 628 DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret); 629 goto out; 630 } 631 } 632 633 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE); 634 if (ret) { 635 DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret); 636 goto out; 637 } 638 639 ret = cdn_dp_config_video(dp); 640 if (ret) { 641 DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); 642 goto out; 643 } 644 645 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID); 646 if (ret) { 647 DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret); 648 goto out; 649 } 650 out: 651 mutex_unlock(&dp->lock); 652 } 653 654 static void cdn_dp_encoder_disable(struct drm_encoder *encoder) 655 { 656 struct cdn_dp_device *dp = encoder_to_dp(encoder); 657 int ret; 658 659 mutex_lock(&dp->lock); 660 if (dp->active) { 661 ret = cdn_dp_disable(dp); 662 if (ret) { 663 DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", 664 ret); 665 } 666 } 667 mutex_unlock(&dp->lock); 668 669 /* 670 * In the following 2 cases, we need to run the event_work to re-enable 671 * the DP: 672 * 1. If there is not just one port device is connected, and remove one 673 * device from a port, the DP will be disabled here, at this case, 674 * run the event_work to re-open DP for the other port. 675 * 2. If re-training or re-config failed, the DP will be disabled here. 676 * run the event_work to re-connect it. 677 */ 678 if (!dp->connected && cdn_dp_connected_port(dp)) 679 schedule_work(&dp->event_work); 680 } 681 682 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, 683 struct drm_crtc_state *crtc_state, 684 struct drm_connector_state *conn_state) 685 { 686 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 687 688 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 689 s->output_type = DRM_MODE_CONNECTOR_DisplayPort; 690 691 return 0; 692 } 693 694 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { 695 .mode_set = cdn_dp_encoder_mode_set, 696 .enable = cdn_dp_encoder_enable, 697 .disable = cdn_dp_encoder_disable, 698 .atomic_check = cdn_dp_encoder_atomic_check, 699 }; 700 701 static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { 702 .destroy = drm_encoder_cleanup, 703 }; 704 705 static int cdn_dp_parse_dt(struct cdn_dp_device *dp) 706 { 707 struct device *dev = dp->dev; 708 struct device_node *np = dev->of_node; 709 struct platform_device *pdev = to_platform_device(dev); 710 struct resource *res; 711 712 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 713 if (IS_ERR(dp->grf)) { 714 DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n"); 715 return PTR_ERR(dp->grf); 716 } 717 718 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 719 dp->regs = devm_ioremap_resource(dev, res); 720 if (IS_ERR(dp->regs)) { 721 DRM_DEV_ERROR(dev, "ioremap reg failed\n"); 722 return PTR_ERR(dp->regs); 723 } 724 725 dp->core_clk = devm_clk_get(dev, "core-clk"); 726 if (IS_ERR(dp->core_clk)) { 727 DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n"); 728 return PTR_ERR(dp->core_clk); 729 } 730 731 dp->pclk = devm_clk_get(dev, "pclk"); 732 if (IS_ERR(dp->pclk)) { 733 DRM_DEV_ERROR(dev, "cannot get pclk\n"); 734 return PTR_ERR(dp->pclk); 735 } 736 737 dp->spdif_clk = devm_clk_get(dev, "spdif"); 738 if (IS_ERR(dp->spdif_clk)) { 739 DRM_DEV_ERROR(dev, "cannot get spdif_clk\n"); 740 return PTR_ERR(dp->spdif_clk); 741 } 742 743 dp->grf_clk = devm_clk_get(dev, "grf"); 744 if (IS_ERR(dp->grf_clk)) { 745 DRM_DEV_ERROR(dev, "cannot get grf clk\n"); 746 return PTR_ERR(dp->grf_clk); 747 } 748 749 dp->spdif_rst = devm_reset_control_get(dev, "spdif"); 750 if (IS_ERR(dp->spdif_rst)) { 751 DRM_DEV_ERROR(dev, "no spdif reset control found\n"); 752 return PTR_ERR(dp->spdif_rst); 753 } 754 755 dp->dptx_rst = devm_reset_control_get(dev, "dptx"); 756 if (IS_ERR(dp->dptx_rst)) { 757 DRM_DEV_ERROR(dev, "no uphy reset control found\n"); 758 return PTR_ERR(dp->dptx_rst); 759 } 760 761 dp->core_rst = devm_reset_control_get(dev, "core"); 762 if (IS_ERR(dp->core_rst)) { 763 DRM_DEV_ERROR(dev, "no core reset control found\n"); 764 return PTR_ERR(dp->core_rst); 765 } 766 767 dp->apb_rst = devm_reset_control_get(dev, "apb"); 768 if (IS_ERR(dp->apb_rst)) { 769 DRM_DEV_ERROR(dev, "no apb reset control found\n"); 770 return PTR_ERR(dp->apb_rst); 771 } 772 773 return 0; 774 } 775 776 static int cdn_dp_audio_hw_params(struct device *dev, void *data, 777 struct hdmi_codec_daifmt *daifmt, 778 struct hdmi_codec_params *params) 779 { 780 struct cdn_dp_device *dp = dev_get_drvdata(dev); 781 struct audio_info audio = { 782 .sample_width = params->sample_width, 783 .sample_rate = params->sample_rate, 784 .channels = params->channels, 785 }; 786 int ret; 787 788 mutex_lock(&dp->lock); 789 if (!dp->active) { 790 ret = -ENODEV; 791 goto out; 792 } 793 794 switch (daifmt->fmt) { 795 case HDMI_I2S: 796 audio.format = AFMT_I2S; 797 break; 798 case HDMI_SPDIF: 799 audio.format = AFMT_SPDIF; 800 break; 801 default: 802 DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); 803 ret = -EINVAL; 804 goto out; 805 } 806 807 ret = cdn_dp_audio_config(dp, &audio); 808 if (!ret) 809 dp->audio_info = audio; 810 811 out: 812 mutex_unlock(&dp->lock); 813 return ret; 814 } 815 816 static void cdn_dp_audio_shutdown(struct device *dev, void *data) 817 { 818 struct cdn_dp_device *dp = dev_get_drvdata(dev); 819 int ret; 820 821 mutex_lock(&dp->lock); 822 if (!dp->active) 823 goto out; 824 825 ret = cdn_dp_audio_stop(dp, &dp->audio_info); 826 if (!ret) 827 dp->audio_info.format = AFMT_UNUSED; 828 out: 829 mutex_unlock(&dp->lock); 830 } 831 832 static int cdn_dp_audio_digital_mute(struct device *dev, void *data, 833 bool enable) 834 { 835 struct cdn_dp_device *dp = dev_get_drvdata(dev); 836 int ret; 837 838 mutex_lock(&dp->lock); 839 if (!dp->active) { 840 ret = -ENODEV; 841 goto out; 842 } 843 844 ret = cdn_dp_audio_mute(dp, enable); 845 846 out: 847 mutex_unlock(&dp->lock); 848 return ret; 849 } 850 851 static int cdn_dp_audio_get_eld(struct device *dev, void *data, 852 u8 *buf, size_t len) 853 { 854 struct cdn_dp_device *dp = dev_get_drvdata(dev); 855 856 memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); 857 858 return 0; 859 } 860 861 static const struct hdmi_codec_ops audio_codec_ops = { 862 .hw_params = cdn_dp_audio_hw_params, 863 .audio_shutdown = cdn_dp_audio_shutdown, 864 .digital_mute = cdn_dp_audio_digital_mute, 865 .get_eld = cdn_dp_audio_get_eld, 866 }; 867 868 static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, 869 struct device *dev) 870 { 871 struct hdmi_codec_pdata codec_data = { 872 .i2s = 1, 873 .spdif = 1, 874 .ops = &audio_codec_ops, 875 .max_i2s_channels = 8, 876 }; 877 878 dp->audio_pdev = platform_device_register_data( 879 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, 880 &codec_data, sizeof(codec_data)); 881 882 return PTR_ERR_OR_ZERO(dp->audio_pdev); 883 } 884 885 static int cdn_dp_request_firmware(struct cdn_dp_device *dp) 886 { 887 int ret; 888 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS); 889 unsigned long sleep = 1000; 890 891 WARN_ON(!mutex_is_locked(&dp->lock)); 892 893 if (dp->fw_loaded) 894 return 0; 895 896 /* Drop the lock before getting the firmware to avoid blocking boot */ 897 mutex_unlock(&dp->lock); 898 899 while (time_before(jiffies, timeout)) { 900 ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev); 901 if (ret == -ENOENT) { 902 msleep(sleep); 903 sleep *= 2; 904 continue; 905 } else if (ret) { 906 DRM_DEV_ERROR(dp->dev, 907 "failed to request firmware: %d\n", ret); 908 goto out; 909 } 910 911 dp->fw_loaded = true; 912 ret = 0; 913 goto out; 914 } 915 916 DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n"); 917 ret = -ETIMEDOUT; 918 out: 919 mutex_lock(&dp->lock); 920 return ret; 921 } 922 923 static void cdn_dp_pd_event_work(struct work_struct *work) 924 { 925 struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, 926 event_work); 927 struct drm_connector *connector = &dp->connector; 928 enum drm_connector_status old_status; 929 930 int ret; 931 932 mutex_lock(&dp->lock); 933 934 if (dp->suspended) 935 goto out; 936 937 ret = cdn_dp_request_firmware(dp); 938 if (ret) 939 goto out; 940 941 dp->connected = true; 942 943 /* Not connected, notify userspace to disable the block */ 944 if (!cdn_dp_connected_port(dp)) { 945 DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n"); 946 dp->connected = false; 947 948 /* Connected but not enabled, enable the block */ 949 } else if (!dp->active) { 950 DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n"); 951 ret = cdn_dp_enable(dp); 952 if (ret) { 953 DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret); 954 dp->connected = false; 955 } 956 957 /* Enabled and connected to a dongle without a sink, notify userspace */ 958 } else if (!cdn_dp_check_sink_connection(dp)) { 959 DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); 960 dp->connected = false; 961 962 /* Enabled and connected with a sink, re-train if requested */ 963 } else if (!cdn_dp_check_link_status(dp)) { 964 unsigned int rate = dp->link.rate; 965 unsigned int lanes = dp->link.num_lanes; 966 struct drm_display_mode *mode = &dp->mode; 967 968 DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); 969 ret = cdn_dp_train_link(dp); 970 if (ret) { 971 dp->connected = false; 972 DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret); 973 goto out; 974 } 975 976 /* If training result is changed, update the video config */ 977 if (mode->clock && 978 (rate != dp->link.rate || lanes != dp->link.num_lanes)) { 979 ret = cdn_dp_config_video(dp); 980 if (ret) { 981 dp->connected = false; 982 DRM_DEV_ERROR(dp->dev, 983 "Failed to config video %d\n", 984 ret); 985 } 986 } 987 } 988 989 out: 990 mutex_unlock(&dp->lock); 991 992 old_status = connector->status; 993 connector->status = connector->funcs->detect(connector, false); 994 if (old_status != connector->status) 995 drm_kms_helper_hotplug_event(dp->drm_dev); 996 } 997 998 static int cdn_dp_pd_event(struct notifier_block *nb, 999 unsigned long event, void *priv) 1000 { 1001 struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port, 1002 event_nb); 1003 struct cdn_dp_device *dp = port->dp; 1004 1005 /* 1006 * It would be nice to be able to just do the work inline right here. 1007 * However, we need to make a bunch of calls that might sleep in order 1008 * to turn on the block/phy, so use a worker instead. 1009 */ 1010 schedule_work(&dp->event_work); 1011 1012 return NOTIFY_DONE; 1013 } 1014 1015 static int cdn_dp_bind(struct device *dev, struct device *master, void *data) 1016 { 1017 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1018 struct drm_encoder *encoder; 1019 struct drm_connector *connector; 1020 struct cdn_dp_port *port; 1021 struct drm_device *drm_dev = data; 1022 int ret, i; 1023 1024 ret = cdn_dp_parse_dt(dp); 1025 if (ret < 0) 1026 return ret; 1027 1028 dp->drm_dev = drm_dev; 1029 dp->connected = false; 1030 dp->active = false; 1031 dp->active_port = -1; 1032 dp->fw_loaded = false; 1033 1034 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); 1035 1036 encoder = &dp->encoder; 1037 1038 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, 1039 dev->of_node); 1040 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1041 1042 ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, 1043 DRM_MODE_ENCODER_TMDS, NULL); 1044 if (ret) { 1045 DRM_ERROR("failed to initialize encoder with drm\n"); 1046 return ret; 1047 } 1048 1049 drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); 1050 1051 connector = &dp->connector; 1052 connector->polled = DRM_CONNECTOR_POLL_HPD; 1053 connector->dpms = DRM_MODE_DPMS_OFF; 1054 1055 ret = drm_connector_init(drm_dev, connector, 1056 &cdn_dp_atomic_connector_funcs, 1057 DRM_MODE_CONNECTOR_DisplayPort); 1058 if (ret) { 1059 DRM_ERROR("failed to initialize connector with drm\n"); 1060 goto err_free_encoder; 1061 } 1062 1063 drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); 1064 1065 ret = drm_connector_attach_encoder(connector, encoder); 1066 if (ret) { 1067 DRM_ERROR("failed to attach connector and encoder\n"); 1068 goto err_free_connector; 1069 } 1070 1071 for (i = 0; i < dp->ports; i++) { 1072 port = dp->port[i]; 1073 1074 port->event_nb.notifier_call = cdn_dp_pd_event; 1075 ret = devm_extcon_register_notifier(dp->dev, port->extcon, 1076 EXTCON_DISP_DP, 1077 &port->event_nb); 1078 if (ret) { 1079 DRM_DEV_ERROR(dev, 1080 "register EXTCON_DISP_DP notifier err\n"); 1081 goto err_free_connector; 1082 } 1083 } 1084 1085 pm_runtime_enable(dev); 1086 1087 schedule_work(&dp->event_work); 1088 1089 return 0; 1090 1091 err_free_connector: 1092 drm_connector_cleanup(connector); 1093 err_free_encoder: 1094 drm_encoder_cleanup(encoder); 1095 return ret; 1096 } 1097 1098 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) 1099 { 1100 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1101 struct drm_encoder *encoder = &dp->encoder; 1102 struct drm_connector *connector = &dp->connector; 1103 1104 cancel_work_sync(&dp->event_work); 1105 cdn_dp_encoder_disable(encoder); 1106 encoder->funcs->destroy(encoder); 1107 connector->funcs->destroy(connector); 1108 1109 pm_runtime_disable(dev); 1110 if (dp->fw_loaded) 1111 release_firmware(dp->fw); 1112 kfree(dp->edid); 1113 dp->edid = NULL; 1114 } 1115 1116 static const struct component_ops cdn_dp_component_ops = { 1117 .bind = cdn_dp_bind, 1118 .unbind = cdn_dp_unbind, 1119 }; 1120 1121 int cdn_dp_suspend(struct device *dev) 1122 { 1123 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1124 int ret = 0; 1125 1126 mutex_lock(&dp->lock); 1127 if (dp->active) 1128 ret = cdn_dp_disable(dp); 1129 dp->suspended = true; 1130 mutex_unlock(&dp->lock); 1131 1132 return ret; 1133 } 1134 1135 int cdn_dp_resume(struct device *dev) 1136 { 1137 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1138 1139 mutex_lock(&dp->lock); 1140 dp->suspended = false; 1141 if (dp->fw_loaded) 1142 schedule_work(&dp->event_work); 1143 mutex_unlock(&dp->lock); 1144 1145 return 0; 1146 } 1147 1148 static int cdn_dp_probe(struct platform_device *pdev) 1149 { 1150 struct device *dev = &pdev->dev; 1151 const struct of_device_id *match; 1152 struct cdn_dp_data *dp_data; 1153 struct cdn_dp_port *port; 1154 struct cdn_dp_device *dp; 1155 struct extcon_dev *extcon; 1156 struct phy *phy; 1157 int i; 1158 1159 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 1160 if (!dp) 1161 return -ENOMEM; 1162 dp->dev = dev; 1163 1164 match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); 1165 dp_data = (struct cdn_dp_data *)match->data; 1166 1167 for (i = 0; i < dp_data->max_phy; i++) { 1168 extcon = extcon_get_edev_by_phandle(dev, i); 1169 phy = devm_of_phy_get_by_index(dev, dev->of_node, i); 1170 1171 if (PTR_ERR(extcon) == -EPROBE_DEFER || 1172 PTR_ERR(phy) == -EPROBE_DEFER) 1173 return -EPROBE_DEFER; 1174 1175 if (IS_ERR(extcon) || IS_ERR(phy)) 1176 continue; 1177 1178 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1179 if (!port) 1180 return -ENOMEM; 1181 1182 port->extcon = extcon; 1183 port->phy = phy; 1184 port->dp = dp; 1185 port->id = i; 1186 dp->port[dp->ports++] = port; 1187 } 1188 1189 if (!dp->ports) { 1190 DRM_DEV_ERROR(dev, "missing extcon or phy\n"); 1191 return -EINVAL; 1192 } 1193 1194 mutex_init(&dp->lock); 1195 dev_set_drvdata(dev, dp); 1196 1197 cdn_dp_audio_codec_init(dp, dev); 1198 1199 return component_add(dev, &cdn_dp_component_ops); 1200 } 1201 1202 static int cdn_dp_remove(struct platform_device *pdev) 1203 { 1204 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1205 1206 platform_device_unregister(dp->audio_pdev); 1207 cdn_dp_suspend(dp->dev); 1208 component_del(&pdev->dev, &cdn_dp_component_ops); 1209 1210 return 0; 1211 } 1212 1213 static void cdn_dp_shutdown(struct platform_device *pdev) 1214 { 1215 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1216 1217 cdn_dp_suspend(dp->dev); 1218 } 1219 1220 static const struct dev_pm_ops cdn_dp_pm_ops = { 1221 SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend, 1222 cdn_dp_resume) 1223 }; 1224 1225 struct platform_driver cdn_dp_driver = { 1226 .probe = cdn_dp_probe, 1227 .remove = cdn_dp_remove, 1228 .shutdown = cdn_dp_shutdown, 1229 .driver = { 1230 .name = "cdn-dp", 1231 .owner = THIS_MODULE, 1232 .of_match_table = of_match_ptr(cdn_dp_dt_ids), 1233 .pm = &cdn_dp_pm_ops, 1234 }, 1235 }; 1236