1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/err.h> 10 #include <linux/interrupt.h> 11 #include <linux/mfd/syscon.h> 12 #include <linux/of.h> 13 #include <linux/of_graph.h> 14 #include <linux/of_irq.h> 15 #include <linux/pinctrl/consumer.h> 16 #include <linux/pm_opp.h> 17 #include <linux/regmap.h> 18 #include <linux/regulator/consumer.h> 19 #include <linux/spinlock.h> 20 21 #include <video/mipi_display.h> 22 23 #include <drm/display/drm_dsc_helper.h> 24 #include <drm/drm_of.h> 25 26 #include "dsi.h" 27 #include "dsi.xml.h" 28 #include "sfpb.xml.h" 29 #include "dsi_cfg.h" 30 #include "msm_dsc_helper.h" 31 #include "msm_kms.h" 32 #include "msm_gem.h" 33 #include "phy/dsi_phy.h" 34 35 #define DSI_RESET_TOGGLE_DELAY_MS 20 36 37 static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc); 38 39 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 40 { 41 u32 ver; 42 43 if (!major || !minor) 44 return -EINVAL; 45 46 /* 47 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 48 * makes all other registers 4-byte shifted down. 49 * 50 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and 51 * older, we read the DSI_VERSION register without any shift(offset 52 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In 53 * the case of DSI6G, this has to be zero (the offset points to a 54 * scratch register which we never touch) 55 */ 56 57 ver = readl(base + REG_DSI_VERSION); 58 if (ver) { 59 /* older dsi host, there is no register shift */ 60 ver = FIELD(ver, DSI_VERSION_MAJOR); 61 if (ver <= MSM_DSI_VER_MAJOR_V2) { 62 /* old versions */ 63 *major = ver; 64 *minor = 0; 65 return 0; 66 } else { 67 return -EINVAL; 68 } 69 } else { 70 /* 71 * newer host, offset 0 has 6G_HW_VERSION, the rest of the 72 * registers are shifted down, read DSI_VERSION again with 73 * the shifted offset 74 */ 75 ver = readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); 76 ver = FIELD(ver, DSI_VERSION_MAJOR); 77 if (ver == MSM_DSI_VER_MAJOR_6G) { 78 /* 6G version */ 79 *major = ver; 80 *minor = readl(base + REG_DSI_6G_HW_VERSION); 81 return 0; 82 } else { 83 return -EINVAL; 84 } 85 } 86 } 87 88 #define DSI_ERR_STATE_ACK 0x0000 89 #define DSI_ERR_STATE_TIMEOUT 0x0001 90 #define DSI_ERR_STATE_DLN0_PHY 0x0002 91 #define DSI_ERR_STATE_FIFO 0x0004 92 #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008 93 #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010 94 #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020 95 96 #define DSI_CLK_CTRL_ENABLE_CLKS \ 97 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \ 98 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \ 99 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \ 100 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK) 101 102 struct msm_dsi_host { 103 struct mipi_dsi_host base; 104 105 struct platform_device *pdev; 106 struct drm_device *dev; 107 108 int id; 109 110 void __iomem *ctrl_base; 111 phys_addr_t ctrl_size; 112 struct regulator_bulk_data *supplies; 113 114 int num_bus_clks; 115 struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX]; 116 117 struct clk *byte_clk; 118 struct clk *esc_clk; 119 struct clk *pixel_clk; 120 struct clk *byte_intf_clk; 121 122 unsigned long byte_clk_rate; 123 unsigned long byte_intf_clk_rate; 124 unsigned long pixel_clk_rate; 125 unsigned long esc_clk_rate; 126 127 /* DSI v2 specific clocks */ 128 struct clk *src_clk; 129 130 unsigned long src_clk_rate; 131 132 const struct msm_dsi_cfg_handler *cfg_hnd; 133 134 struct completion dma_comp; 135 struct completion video_comp; 136 struct mutex dev_mutex; 137 struct mutex cmd_mutex; 138 spinlock_t intr_lock; /* Protect interrupt ctrl register */ 139 140 u32 err_work_state; 141 struct work_struct err_work; 142 struct workqueue_struct *workqueue; 143 144 /* DSI 6G TX buffer*/ 145 struct drm_gem_object *tx_gem_obj; 146 struct msm_gem_address_space *aspace; 147 148 /* DSI v2 TX buffer */ 149 void *tx_buf; 150 dma_addr_t tx_buf_paddr; 151 152 int tx_size; 153 154 u8 *rx_buf; 155 156 struct regmap *sfpb; 157 158 struct drm_display_mode *mode; 159 struct drm_dsc_config *dsc; 160 161 /* connected device info */ 162 unsigned int channel; 163 unsigned int lanes; 164 enum mipi_dsi_pixel_format format; 165 unsigned long mode_flags; 166 167 /* lane data parsed via DT */ 168 int dlane_swap; 169 int num_data_lanes; 170 171 /* from phy DT */ 172 bool cphy_mode; 173 174 u32 dma_cmd_ctrl_restore; 175 176 bool registered; 177 bool power_on; 178 bool enabled; 179 int irq; 180 }; 181 182 183 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) 184 { 185 return readl(msm_host->ctrl_base + reg); 186 } 187 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) 188 { 189 writel(data, msm_host->ctrl_base + reg); 190 } 191 192 static const struct msm_dsi_cfg_handler *dsi_get_config( 193 struct msm_dsi_host *msm_host) 194 { 195 const struct msm_dsi_cfg_handler *cfg_hnd = NULL; 196 struct device *dev = &msm_host->pdev->dev; 197 struct clk *ahb_clk; 198 int ret; 199 u32 major = 0, minor = 0; 200 201 ahb_clk = msm_clk_get(msm_host->pdev, "iface"); 202 if (IS_ERR(ahb_clk)) { 203 pr_err("%s: cannot get interface clock\n", __func__); 204 goto exit; 205 } 206 207 pm_runtime_get_sync(dev); 208 209 ret = clk_prepare_enable(ahb_clk); 210 if (ret) { 211 pr_err("%s: unable to enable ahb_clk\n", __func__); 212 goto runtime_put; 213 } 214 215 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); 216 if (ret) { 217 pr_err("%s: Invalid version\n", __func__); 218 goto disable_clks; 219 } 220 221 cfg_hnd = msm_dsi_cfg_get(major, minor); 222 223 DBG("%s: Version %x:%x\n", __func__, major, minor); 224 225 disable_clks: 226 clk_disable_unprepare(ahb_clk); 227 runtime_put: 228 pm_runtime_put_sync(dev); 229 exit: 230 return cfg_hnd; 231 } 232 233 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) 234 { 235 return container_of(host, struct msm_dsi_host, base); 236 } 237 238 int dsi_clk_init_v2(struct msm_dsi_host *msm_host) 239 { 240 struct platform_device *pdev = msm_host->pdev; 241 int ret = 0; 242 243 msm_host->src_clk = msm_clk_get(pdev, "src"); 244 245 if (IS_ERR(msm_host->src_clk)) { 246 ret = PTR_ERR(msm_host->src_clk); 247 pr_err("%s: can't find src clock. ret=%d\n", 248 __func__, ret); 249 msm_host->src_clk = NULL; 250 return ret; 251 } 252 253 return ret; 254 } 255 256 int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host) 257 { 258 struct platform_device *pdev = msm_host->pdev; 259 int ret = 0; 260 261 msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); 262 if (IS_ERR(msm_host->byte_intf_clk)) { 263 ret = PTR_ERR(msm_host->byte_intf_clk); 264 pr_err("%s: can't find byte_intf clock. ret=%d\n", 265 __func__, ret); 266 } 267 268 return ret; 269 } 270 271 static int dsi_clk_init(struct msm_dsi_host *msm_host) 272 { 273 struct platform_device *pdev = msm_host->pdev; 274 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 275 const struct msm_dsi_config *cfg = cfg_hnd->cfg; 276 int i, ret = 0; 277 278 /* get bus clocks */ 279 for (i = 0; i < cfg->num_bus_clks; i++) 280 msm_host->bus_clks[i].id = cfg->bus_clk_names[i]; 281 msm_host->num_bus_clks = cfg->num_bus_clks; 282 283 ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks); 284 if (ret < 0) { 285 dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret); 286 goto exit; 287 } 288 289 /* get link and source clocks */ 290 msm_host->byte_clk = msm_clk_get(pdev, "byte"); 291 if (IS_ERR(msm_host->byte_clk)) { 292 ret = PTR_ERR(msm_host->byte_clk); 293 pr_err("%s: can't find dsi_byte clock. ret=%d\n", 294 __func__, ret); 295 msm_host->byte_clk = NULL; 296 goto exit; 297 } 298 299 msm_host->pixel_clk = msm_clk_get(pdev, "pixel"); 300 if (IS_ERR(msm_host->pixel_clk)) { 301 ret = PTR_ERR(msm_host->pixel_clk); 302 pr_err("%s: can't find dsi_pixel clock. ret=%d\n", 303 __func__, ret); 304 msm_host->pixel_clk = NULL; 305 goto exit; 306 } 307 308 msm_host->esc_clk = msm_clk_get(pdev, "core"); 309 if (IS_ERR(msm_host->esc_clk)) { 310 ret = PTR_ERR(msm_host->esc_clk); 311 pr_err("%s: can't find dsi_esc clock. ret=%d\n", 312 __func__, ret); 313 msm_host->esc_clk = NULL; 314 goto exit; 315 } 316 317 if (cfg_hnd->ops->clk_init_ver) 318 ret = cfg_hnd->ops->clk_init_ver(msm_host); 319 exit: 320 return ret; 321 } 322 323 int msm_dsi_runtime_suspend(struct device *dev) 324 { 325 struct platform_device *pdev = to_platform_device(dev); 326 struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 327 struct mipi_dsi_host *host = msm_dsi->host; 328 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 329 330 if (!msm_host->cfg_hnd) 331 return 0; 332 333 clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks); 334 335 return 0; 336 } 337 338 int msm_dsi_runtime_resume(struct device *dev) 339 { 340 struct platform_device *pdev = to_platform_device(dev); 341 struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 342 struct mipi_dsi_host *host = msm_dsi->host; 343 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 344 345 if (!msm_host->cfg_hnd) 346 return 0; 347 348 return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks); 349 } 350 351 int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) 352 { 353 int ret; 354 355 DBG("Set clk rates: pclk=%lu, byteclk=%lu", 356 msm_host->pixel_clk_rate, msm_host->byte_clk_rate); 357 358 ret = dev_pm_opp_set_rate(&msm_host->pdev->dev, 359 msm_host->byte_clk_rate); 360 if (ret) { 361 pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret); 362 return ret; 363 } 364 365 ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); 366 if (ret) { 367 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 368 return ret; 369 } 370 371 if (msm_host->byte_intf_clk) { 372 ret = clk_set_rate(msm_host->byte_intf_clk, msm_host->byte_intf_clk_rate); 373 if (ret) { 374 pr_err("%s: Failed to set rate byte intf clk, %d\n", 375 __func__, ret); 376 return ret; 377 } 378 } 379 380 return 0; 381 } 382 383 384 int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) 385 { 386 int ret; 387 388 ret = clk_prepare_enable(msm_host->esc_clk); 389 if (ret) { 390 pr_err("%s: Failed to enable dsi esc clk\n", __func__); 391 goto error; 392 } 393 394 ret = clk_prepare_enable(msm_host->byte_clk); 395 if (ret) { 396 pr_err("%s: Failed to enable dsi byte clk\n", __func__); 397 goto byte_clk_err; 398 } 399 400 ret = clk_prepare_enable(msm_host->pixel_clk); 401 if (ret) { 402 pr_err("%s: Failed to enable dsi pixel clk\n", __func__); 403 goto pixel_clk_err; 404 } 405 406 ret = clk_prepare_enable(msm_host->byte_intf_clk); 407 if (ret) { 408 pr_err("%s: Failed to enable byte intf clk\n", 409 __func__); 410 goto byte_intf_clk_err; 411 } 412 413 return 0; 414 415 byte_intf_clk_err: 416 clk_disable_unprepare(msm_host->pixel_clk); 417 pixel_clk_err: 418 clk_disable_unprepare(msm_host->byte_clk); 419 byte_clk_err: 420 clk_disable_unprepare(msm_host->esc_clk); 421 error: 422 return ret; 423 } 424 425 int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) 426 { 427 int ret; 428 429 DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu", 430 msm_host->pixel_clk_rate, msm_host->byte_clk_rate, 431 msm_host->esc_clk_rate, msm_host->src_clk_rate); 432 433 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); 434 if (ret) { 435 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); 436 return ret; 437 } 438 439 ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate); 440 if (ret) { 441 pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret); 442 return ret; 443 } 444 445 ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate); 446 if (ret) { 447 pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret); 448 return ret; 449 } 450 451 ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); 452 if (ret) { 453 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 454 return ret; 455 } 456 457 return 0; 458 } 459 460 int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) 461 { 462 int ret; 463 464 ret = clk_prepare_enable(msm_host->byte_clk); 465 if (ret) { 466 pr_err("%s: Failed to enable dsi byte clk\n", __func__); 467 goto error; 468 } 469 470 ret = clk_prepare_enable(msm_host->esc_clk); 471 if (ret) { 472 pr_err("%s: Failed to enable dsi esc clk\n", __func__); 473 goto esc_clk_err; 474 } 475 476 ret = clk_prepare_enable(msm_host->src_clk); 477 if (ret) { 478 pr_err("%s: Failed to enable dsi src clk\n", __func__); 479 goto src_clk_err; 480 } 481 482 ret = clk_prepare_enable(msm_host->pixel_clk); 483 if (ret) { 484 pr_err("%s: Failed to enable dsi pixel clk\n", __func__); 485 goto pixel_clk_err; 486 } 487 488 return 0; 489 490 pixel_clk_err: 491 clk_disable_unprepare(msm_host->src_clk); 492 src_clk_err: 493 clk_disable_unprepare(msm_host->esc_clk); 494 esc_clk_err: 495 clk_disable_unprepare(msm_host->byte_clk); 496 error: 497 return ret; 498 } 499 500 void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host) 501 { 502 /* Drop the performance state vote */ 503 dev_pm_opp_set_rate(&msm_host->pdev->dev, 0); 504 clk_disable_unprepare(msm_host->esc_clk); 505 clk_disable_unprepare(msm_host->pixel_clk); 506 clk_disable_unprepare(msm_host->byte_intf_clk); 507 clk_disable_unprepare(msm_host->byte_clk); 508 } 509 510 void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host) 511 { 512 clk_disable_unprepare(msm_host->pixel_clk); 513 clk_disable_unprepare(msm_host->src_clk); 514 clk_disable_unprepare(msm_host->esc_clk); 515 clk_disable_unprepare(msm_host->byte_clk); 516 } 517 518 /** 519 * dsi_adjust_pclk_for_compression() - Adjust the pclk rate for compression case 520 * @mode: The selected mode for the DSI output 521 * @dsc: DRM DSC configuration for this DSI output 522 * 523 * Adjust the pclk rate by calculating a new hdisplay proportional to 524 * the compression ratio such that: 525 * new_hdisplay = old_hdisplay * compressed_bpp / uncompressed_bpp 526 * 527 * Porches do not need to be adjusted: 528 * - For VIDEO mode they are not compressed by DSC and are passed as is. 529 * - For CMD mode there are no actual porches. Instead these fields 530 * currently represent the overhead to the image data transfer. As such, they 531 * are calculated for the final mode parameters (after the compression) and 532 * are not to be adjusted too. 533 * 534 * FIXME: Reconsider this if/when CMD mode handling is rewritten to use 535 * transfer time and data overhead as a starting point of the calculations. 536 */ 537 static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode, 538 const struct drm_dsc_config *dsc) 539 { 540 int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc), 541 dsc->bits_per_component * 3); 542 543 int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; 544 545 return new_htotal * mode->vtotal * drm_mode_vrefresh(mode); 546 } 547 548 static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, 549 const struct drm_dsc_config *dsc, bool is_bonded_dsi) 550 { 551 unsigned long pclk_rate; 552 553 pclk_rate = mode->clock * 1000; 554 555 if (dsc) 556 pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); 557 558 /* 559 * For bonded DSI mode, the current DRM mode has the complete width of the 560 * panel. Since, the complete panel is driven by two DSI controllers, 561 * the clock rates have to be split between the two dsi controllers. 562 * Adjust the byte and pixel clock rates for each dsi host accordingly. 563 */ 564 if (is_bonded_dsi) 565 pclk_rate /= 2; 566 567 return pclk_rate; 568 } 569 570 unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_dsi, 571 const struct drm_display_mode *mode) 572 { 573 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 574 u8 lanes = msm_host->lanes; 575 u32 bpp = mipi_dsi_pixel_format_to_bpp(msm_host->format); 576 unsigned long pclk_rate = dsi_get_pclk_rate(mode, msm_host->dsc, is_bonded_dsi); 577 unsigned long pclk_bpp; 578 579 if (lanes == 0) { 580 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); 581 lanes = 1; 582 } 583 584 /* CPHY "byte_clk" is in units of 16 bits */ 585 if (msm_host->cphy_mode) 586 pclk_bpp = mult_frac(pclk_rate, bpp, 16 * lanes); 587 else 588 pclk_bpp = mult_frac(pclk_rate, bpp, 8 * lanes); 589 590 return pclk_bpp; 591 } 592 593 static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 594 { 595 msm_host->pixel_clk_rate = dsi_get_pclk_rate(msm_host->mode, msm_host->dsc, is_bonded_dsi); 596 msm_host->byte_clk_rate = dsi_byte_clk_get_rate(&msm_host->base, is_bonded_dsi, 597 msm_host->mode); 598 599 DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate, 600 msm_host->byte_clk_rate); 601 602 } 603 604 int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 605 { 606 if (!msm_host->mode) { 607 pr_err("%s: mode not set\n", __func__); 608 return -EINVAL; 609 } 610 611 dsi_calc_pclk(msm_host, is_bonded_dsi); 612 msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk); 613 return 0; 614 } 615 616 int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 617 { 618 u32 bpp = mipi_dsi_pixel_format_to_bpp(msm_host->format); 619 unsigned int esc_mhz, esc_div; 620 unsigned long byte_mhz; 621 622 dsi_calc_pclk(msm_host, is_bonded_dsi); 623 624 msm_host->src_clk_rate = mult_frac(msm_host->pixel_clk_rate, bpp, 8); 625 626 /* 627 * esc clock is byte clock followed by a 4 bit divider, 628 * we need to find an escape clock frequency within the 629 * mipi DSI spec range within the maximum divider limit 630 * We iterate here between an escape clock frequencey 631 * between 20 Mhz to 5 Mhz and pick up the first one 632 * that can be supported by our divider 633 */ 634 635 byte_mhz = msm_host->byte_clk_rate / 1000000; 636 637 for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) { 638 esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz); 639 640 /* 641 * TODO: Ideally, we shouldn't know what sort of divider 642 * is available in mmss_cc, we're just assuming that 643 * it'll always be a 4 bit divider. Need to come up with 644 * a better way here. 645 */ 646 if (esc_div >= 1 && esc_div <= 16) 647 break; 648 } 649 650 if (esc_mhz < 5) 651 return -EINVAL; 652 653 msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div; 654 655 DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate, 656 msm_host->src_clk_rate); 657 658 return 0; 659 } 660 661 static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) 662 { 663 u32 intr; 664 unsigned long flags; 665 666 spin_lock_irqsave(&msm_host->intr_lock, flags); 667 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 668 669 if (enable) 670 intr |= mask; 671 else 672 intr &= ~mask; 673 674 DBG("intr=%x enable=%d", intr, enable); 675 676 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr); 677 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 678 } 679 680 static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags) 681 { 682 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 683 return BURST_MODE; 684 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 685 return NON_BURST_SYNCH_PULSE; 686 687 return NON_BURST_SYNCH_EVENT; 688 } 689 690 static inline enum dsi_vid_dst_format dsi_get_vid_fmt( 691 const enum mipi_dsi_pixel_format mipi_fmt) 692 { 693 switch (mipi_fmt) { 694 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; 695 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE; 696 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666; 697 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565; 698 default: return VID_DST_FORMAT_RGB888; 699 } 700 } 701 702 static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( 703 const enum mipi_dsi_pixel_format mipi_fmt) 704 { 705 switch (mipi_fmt) { 706 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 707 case MIPI_DSI_FMT_RGB666_PACKED: 708 case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; 709 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 710 default: return CMD_DST_FORMAT_RGB888; 711 } 712 } 713 714 static void dsi_ctrl_disable(struct msm_dsi_host *msm_host) 715 { 716 dsi_write(msm_host, REG_DSI_CTRL, 0); 717 } 718 719 bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host) 720 { 721 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 722 723 return msm_host->dsc && 724 (msm_host->cfg_hnd->major == MSM_DSI_VER_MAJOR_6G && 725 msm_host->cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_5_0); 726 } 727 728 static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, 729 struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy) 730 { 731 u32 flags = msm_host->mode_flags; 732 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; 733 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 734 u32 data = 0, lane_ctrl = 0; 735 736 if (flags & MIPI_DSI_MODE_VIDEO) { 737 if (flags & MIPI_DSI_MODE_VIDEO_HSE) 738 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE; 739 if (flags & MIPI_DSI_MODE_VIDEO_NO_HFP) 740 data |= DSI_VID_CFG0_HFP_POWER_STOP; 741 if (flags & MIPI_DSI_MODE_VIDEO_NO_HBP) 742 data |= DSI_VID_CFG0_HBP_POWER_STOP; 743 if (flags & MIPI_DSI_MODE_VIDEO_NO_HSA) 744 data |= DSI_VID_CFG0_HSA_POWER_STOP; 745 /* Always set low power stop mode for BLLP 746 * to let command engine send packets 747 */ 748 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP | 749 DSI_VID_CFG0_BLLP_POWER_STOP; 750 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); 751 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); 752 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); 753 if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) 754 data |= DSI_VID_CFG0_DATABUS_WIDEN; 755 dsi_write(msm_host, REG_DSI_VID_CFG0, data); 756 757 /* Do not swap RGB colors */ 758 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB); 759 dsi_write(msm_host, REG_DSI_VID_CFG1, 0); 760 } else { 761 /* Do not swap RGB colors */ 762 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB); 763 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt)); 764 dsi_write(msm_host, REG_DSI_CMD_CFG0, data); 765 766 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) | 767 DSI_CMD_CFG1_WR_MEM_CONTINUE( 768 MIPI_DCS_WRITE_MEMORY_CONTINUE); 769 /* Always insert DCS command */ 770 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND; 771 dsi_write(msm_host, REG_DSI_CMD_CFG1, data); 772 773 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 774 data = dsi_read(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2); 775 776 if (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3) 777 data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE; 778 779 if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) 780 data |= DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN; 781 782 dsi_write(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2, data); 783 } 784 } 785 786 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, 787 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER | 788 DSI_CMD_DMA_CTRL_LOW_POWER); 789 790 data = 0; 791 /* Always assume dedicated TE pin */ 792 data |= DSI_TRIG_CTRL_TE; 793 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); 794 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); 795 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); 796 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 797 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) 798 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; 799 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); 800 801 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) | 802 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre); 803 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); 804 805 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 806 (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) && 807 phy_shared_timings->clk_pre_inc_by_2) 808 dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND, 809 DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); 810 811 data = 0; 812 if (!(flags & MIPI_DSI_MODE_NO_EOT_PACKET)) 813 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; 814 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data); 815 816 /* allow only ack-err-status to generate interrupt */ 817 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0); 818 819 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 820 821 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 822 823 data = DSI_CTRL_CLK_EN; 824 825 DBG("lane number=%d", msm_host->lanes); 826 data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0); 827 828 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, 829 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap)); 830 831 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) { 832 lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL); 833 834 if (msm_dsi_phy_set_continuous_clock(phy, true)) 835 lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY; 836 837 dsi_write(msm_host, REG_DSI_LANE_CTRL, 838 lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); 839 } 840 841 data |= DSI_CTRL_ENABLE; 842 843 dsi_write(msm_host, REG_DSI_CTRL, data); 844 845 if (msm_host->cphy_mode) 846 dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); 847 } 848 849 static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay) 850 { 851 struct drm_dsc_config *dsc = msm_host->dsc; 852 u32 reg, reg_ctrl, reg_ctrl2; 853 u32 slice_per_intf, total_bytes_per_intf; 854 u32 pkt_per_line; 855 u32 eol_byte_num; 856 u32 bytes_per_pkt; 857 858 /* first calculate dsc parameters and then program 859 * compress mode registers 860 */ 861 slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay); 862 863 total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; 864 bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ 865 866 eol_byte_num = total_bytes_per_intf % 3; 867 868 /* 869 * Typically, pkt_per_line = slice_per_intf * slice_per_pkt. 870 * 871 * Since the current driver only supports slice_per_pkt = 1, 872 * pkt_per_line will be equal to slice per intf for now. 873 */ 874 pkt_per_line = slice_per_intf; 875 876 if (is_cmd_mode) /* packet data type */ 877 reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE); 878 else 879 reg = DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(MIPI_DSI_COMPRESSED_PIXEL_STREAM); 880 881 /* DSI_VIDEO_COMPRESSION_MODE & DSI_COMMAND_COMPRESSION_MODE 882 * registers have similar offsets, so for below common code use 883 * DSI_VIDEO_COMPRESSION_MODE_XXXX for setting bits 884 * 885 * pkt_per_line is log2 encoded, >>1 works for supported values (1,2,4) 886 */ 887 if (pkt_per_line > 4) 888 drm_warn_once(msm_host->dev, "pkt_per_line too big"); 889 reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(pkt_per_line >> 1); 890 reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(eol_byte_num); 891 reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EN; 892 893 if (is_cmd_mode) { 894 reg_ctrl = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL); 895 reg_ctrl2 = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2); 896 897 reg_ctrl &= ~0xffff; 898 reg_ctrl |= reg; 899 900 reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK; 901 reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(dsc->slice_chunk_size); 902 903 dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); 904 dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); 905 } else { 906 reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(bytes_per_pkt); 907 dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg); 908 } 909 } 910 911 static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 912 { 913 struct drm_display_mode *mode = msm_host->mode; 914 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ 915 u32 h_total = mode->htotal; 916 u32 v_total = mode->vtotal; 917 u32 hs_end = mode->hsync_end - mode->hsync_start; 918 u32 vs_end = mode->vsync_end - mode->vsync_start; 919 u32 ha_start = h_total - mode->hsync_start; 920 u32 ha_end = ha_start + mode->hdisplay; 921 u32 va_start = v_total - mode->vsync_start; 922 u32 va_end = va_start + mode->vdisplay; 923 u32 hdisplay = mode->hdisplay; 924 u32 wc; 925 int ret; 926 bool wide_bus_enabled = msm_dsi_host_is_wide_bus_enabled(&msm_host->base); 927 928 DBG(""); 929 930 /* 931 * For bonded DSI mode, the current DRM mode has 932 * the complete width of the panel. Since, the complete 933 * panel is driven by two DSI controllers, the horizontal 934 * timings have to be split between the two dsi controllers. 935 * Adjust the DSI host timing values accordingly. 936 */ 937 if (is_bonded_dsi) { 938 h_total /= 2; 939 hs_end /= 2; 940 ha_start /= 2; 941 ha_end /= 2; 942 hdisplay /= 2; 943 } 944 945 if (msm_host->dsc) { 946 struct drm_dsc_config *dsc = msm_host->dsc; 947 u32 bytes_per_pclk; 948 949 /* update dsc params with timing params */ 950 if (!dsc || !mode->hdisplay || !mode->vdisplay) { 951 pr_err("DSI: invalid input: pic_width: %d pic_height: %d\n", 952 mode->hdisplay, mode->vdisplay); 953 return; 954 } 955 956 dsc->pic_width = mode->hdisplay; 957 dsc->pic_height = mode->vdisplay; 958 DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height); 959 960 /* we do the calculations for dsc parameters here so that 961 * panel can use these parameters 962 */ 963 ret = dsi_populate_dsc_params(msm_host, dsc); 964 if (ret) 965 return; 966 967 /* 968 * DPU sends 3 bytes per pclk cycle to DSI. If widebus is 969 * enabled, bus width is extended to 6 bytes. 970 * 971 * Calculate the number of pclks needed to transmit one line of 972 * the compressed data. 973 974 * The back/font porch and pulse width are kept intact. For 975 * VIDEO mode they represent timing parameters rather than 976 * actual data transfer, see the documentation for 977 * dsi_adjust_pclk_for_compression(). For CMD mode they are 978 * unused anyway. 979 */ 980 h_total -= hdisplay; 981 if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 982 bytes_per_pclk = 6; 983 else 984 bytes_per_pclk = 3; 985 986 hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk); 987 988 h_total += hdisplay; 989 ha_end = ha_start + hdisplay; 990 } 991 992 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { 993 if (msm_host->dsc) 994 dsi_update_dsc_timing(msm_host, false, mode->hdisplay); 995 996 dsi_write(msm_host, REG_DSI_ACTIVE_H, 997 DSI_ACTIVE_H_START(ha_start) | 998 DSI_ACTIVE_H_END(ha_end)); 999 dsi_write(msm_host, REG_DSI_ACTIVE_V, 1000 DSI_ACTIVE_V_START(va_start) | 1001 DSI_ACTIVE_V_END(va_end)); 1002 dsi_write(msm_host, REG_DSI_TOTAL, 1003 DSI_TOTAL_H_TOTAL(h_total - 1) | 1004 DSI_TOTAL_V_TOTAL(v_total - 1)); 1005 1006 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC, 1007 DSI_ACTIVE_HSYNC_START(hs_start) | 1008 DSI_ACTIVE_HSYNC_END(hs_end)); 1009 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0); 1010 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS, 1011 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) | 1012 DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); 1013 } else { /* command mode */ 1014 if (msm_host->dsc) 1015 dsi_update_dsc_timing(msm_host, true, mode->hdisplay); 1016 1017 /* image data and 1 byte write_memory_start cmd */ 1018 if (!msm_host->dsc) 1019 wc = hdisplay * mipi_dsi_pixel_format_to_bpp(msm_host->format) / 8 + 1; 1020 else 1021 /* 1022 * When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1. 1023 * Currently, the driver only supports default value of slice_per_pkt = 1 1024 * 1025 * TODO: Expand mipi_dsi_device struct to hold slice_per_pkt info 1026 * and adjust DSC math to account for slice_per_pkt. 1027 */ 1028 wc = msm_host->dsc->slice_chunk_size + 1; 1029 1030 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL, 1031 DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) | 1032 DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL( 1033 msm_host->channel) | 1034 DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE( 1035 MIPI_DSI_DCS_LONG_WRITE)); 1036 1037 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL, 1038 DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) | 1039 DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay)); 1040 } 1041 } 1042 1043 static void dsi_sw_reset(struct msm_dsi_host *msm_host) 1044 { 1045 u32 ctrl; 1046 1047 ctrl = dsi_read(msm_host, REG_DSI_CTRL); 1048 1049 if (ctrl & DSI_CTRL_ENABLE) { 1050 dsi_write(msm_host, REG_DSI_CTRL, ctrl & ~DSI_CTRL_ENABLE); 1051 /* 1052 * dsi controller need to be disabled before 1053 * clocks turned on 1054 */ 1055 wmb(); 1056 } 1057 1058 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 1059 wmb(); /* clocks need to be enabled before reset */ 1060 1061 /* dsi controller can only be reset while clocks are running */ 1062 dsi_write(msm_host, REG_DSI_RESET, 1); 1063 msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */ 1064 dsi_write(msm_host, REG_DSI_RESET, 0); 1065 wmb(); /* controller out of reset */ 1066 1067 if (ctrl & DSI_CTRL_ENABLE) { 1068 dsi_write(msm_host, REG_DSI_CTRL, ctrl); 1069 wmb(); /* make sure dsi controller enabled again */ 1070 } 1071 } 1072 1073 static void dsi_op_mode_config(struct msm_dsi_host *msm_host, 1074 bool video_mode, bool enable) 1075 { 1076 u32 dsi_ctrl; 1077 1078 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL); 1079 1080 if (!enable) { 1081 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN | 1082 DSI_CTRL_CMD_MODE_EN); 1083 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE | 1084 DSI_IRQ_MASK_VIDEO_DONE, 0); 1085 } else { 1086 if (video_mode) { 1087 dsi_ctrl |= DSI_CTRL_VID_MODE_EN; 1088 } else { /* command mode */ 1089 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN; 1090 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1); 1091 } 1092 dsi_ctrl |= DSI_CTRL_ENABLE; 1093 } 1094 1095 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl); 1096 } 1097 1098 static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) 1099 { 1100 u32 data; 1101 1102 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL); 1103 1104 if (mode == 0) 1105 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER; 1106 else 1107 data |= DSI_CMD_DMA_CTRL_LOW_POWER; 1108 1109 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data); 1110 } 1111 1112 static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 1113 { 1114 u32 ret = 0; 1115 struct device *dev = &msm_host->pdev->dev; 1116 1117 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 1118 1119 reinit_completion(&msm_host->video_comp); 1120 1121 ret = wait_for_completion_timeout(&msm_host->video_comp, 1122 msecs_to_jiffies(70)); 1123 1124 if (ret == 0) 1125 DRM_DEV_ERROR(dev, "wait for video done timed out\n"); 1126 1127 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1128 } 1129 1130 static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) 1131 { 1132 u32 data; 1133 1134 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1135 return; 1136 1137 data = dsi_read(msm_host, REG_DSI_STATUS0); 1138 1139 /* if video mode engine is not busy, its because 1140 * either timing engine was not turned on or the 1141 * DSI controller has finished transmitting the video 1142 * data already, so no need to wait in those cases 1143 */ 1144 if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY)) 1145 return; 1146 1147 if (msm_host->power_on && msm_host->enabled) { 1148 dsi_wait4video_done(msm_host); 1149 /* delay 4 ms to skip BLLP */ 1150 usleep_range(2000, 4000); 1151 } 1152 } 1153 1154 int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size) 1155 { 1156 struct drm_device *dev = msm_host->dev; 1157 struct msm_drm_private *priv = dev->dev_private; 1158 uint64_t iova; 1159 u8 *data; 1160 1161 msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace); 1162 1163 data = msm_gem_kernel_new(dev, size, MSM_BO_WC, 1164 msm_host->aspace, 1165 &msm_host->tx_gem_obj, &iova); 1166 1167 if (IS_ERR(data)) { 1168 msm_host->tx_gem_obj = NULL; 1169 return PTR_ERR(data); 1170 } 1171 1172 msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem"); 1173 1174 msm_host->tx_size = msm_host->tx_gem_obj->size; 1175 1176 return 0; 1177 } 1178 1179 int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size) 1180 { 1181 struct drm_device *dev = msm_host->dev; 1182 1183 msm_host->tx_buf = dma_alloc_coherent(dev->dev, size, 1184 &msm_host->tx_buf_paddr, GFP_KERNEL); 1185 if (!msm_host->tx_buf) 1186 return -ENOMEM; 1187 1188 msm_host->tx_size = size; 1189 1190 return 0; 1191 } 1192 1193 void msm_dsi_tx_buf_free(struct mipi_dsi_host *host) 1194 { 1195 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1196 struct drm_device *dev = msm_host->dev; 1197 1198 /* 1199 * This is possible if we're tearing down before we've had a chance to 1200 * fully initialize. A very real possibility if our probe is deferred, 1201 * in which case we'll hit msm_dsi_host_destroy() without having run 1202 * through the dsi_tx_buf_alloc(). 1203 */ 1204 if (!dev) 1205 return; 1206 1207 if (msm_host->tx_gem_obj) { 1208 msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace); 1209 msm_gem_address_space_put(msm_host->aspace); 1210 msm_host->tx_gem_obj = NULL; 1211 msm_host->aspace = NULL; 1212 } 1213 1214 if (msm_host->tx_buf) 1215 dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf, 1216 msm_host->tx_buf_paddr); 1217 } 1218 1219 void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host) 1220 { 1221 return msm_gem_get_vaddr(msm_host->tx_gem_obj); 1222 } 1223 1224 void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host) 1225 { 1226 return msm_host->tx_buf; 1227 } 1228 1229 void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host) 1230 { 1231 msm_gem_put_vaddr(msm_host->tx_gem_obj); 1232 } 1233 1234 /* 1235 * prepare cmd buffer to be txed 1236 */ 1237 static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, 1238 const struct mipi_dsi_msg *msg) 1239 { 1240 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1241 struct mipi_dsi_packet packet; 1242 int len; 1243 int ret; 1244 u8 *data; 1245 1246 ret = mipi_dsi_create_packet(&packet, msg); 1247 if (ret) { 1248 pr_err("%s: create packet failed, %d\n", __func__, ret); 1249 return ret; 1250 } 1251 len = (packet.size + 3) & (~0x3); 1252 1253 if (len > msm_host->tx_size) { 1254 pr_err("%s: packet size is too big\n", __func__); 1255 return -EINVAL; 1256 } 1257 1258 data = cfg_hnd->ops->tx_buf_get(msm_host); 1259 if (IS_ERR(data)) { 1260 ret = PTR_ERR(data); 1261 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 1262 return ret; 1263 } 1264 1265 /* MSM specific command format in memory */ 1266 data[0] = packet.header[1]; 1267 data[1] = packet.header[2]; 1268 data[2] = packet.header[0]; 1269 data[3] = BIT(7); /* Last packet */ 1270 if (mipi_dsi_packet_format_is_long(msg->type)) 1271 data[3] |= BIT(6); 1272 if (msg->rx_buf && msg->rx_len) 1273 data[3] |= BIT(5); 1274 1275 /* Long packet */ 1276 if (packet.payload && packet.payload_length) 1277 memcpy(data + 4, packet.payload, packet.payload_length); 1278 1279 /* Append 0xff to the end */ 1280 if (packet.size < len) 1281 memset(data + packet.size, 0xff, len - packet.size); 1282 1283 if (cfg_hnd->ops->tx_buf_put) 1284 cfg_hnd->ops->tx_buf_put(msm_host); 1285 1286 return len; 1287 } 1288 1289 /* 1290 * dsi_short_read1_resp: 1 parameter 1291 */ 1292 static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1293 { 1294 u8 *data = msg->rx_buf; 1295 if (data && (msg->rx_len >= 1)) { 1296 *data = buf[1]; /* strip out dcs type */ 1297 return 1; 1298 } else { 1299 pr_err("%s: read data does not match with rx_buf len %zu\n", 1300 __func__, msg->rx_len); 1301 return -EINVAL; 1302 } 1303 } 1304 1305 /* 1306 * dsi_short_read2_resp: 2 parameter 1307 */ 1308 static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1309 { 1310 u8 *data = msg->rx_buf; 1311 if (data && (msg->rx_len >= 2)) { 1312 data[0] = buf[1]; /* strip out dcs type */ 1313 data[1] = buf[2]; 1314 return 2; 1315 } else { 1316 pr_err("%s: read data does not match with rx_buf len %zu\n", 1317 __func__, msg->rx_len); 1318 return -EINVAL; 1319 } 1320 } 1321 1322 static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1323 { 1324 /* strip out 4 byte dcs header */ 1325 if (msg->rx_buf && msg->rx_len) 1326 memcpy(msg->rx_buf, buf + 4, msg->rx_len); 1327 1328 return msg->rx_len; 1329 } 1330 1331 int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base) 1332 { 1333 struct drm_device *dev = msm_host->dev; 1334 struct msm_drm_private *priv = dev->dev_private; 1335 1336 if (!dma_base) 1337 return -EINVAL; 1338 1339 return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj, 1340 priv->kms->aspace, dma_base); 1341 } 1342 1343 int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base) 1344 { 1345 if (!dma_base) 1346 return -EINVAL; 1347 1348 *dma_base = msm_host->tx_buf_paddr; 1349 return 0; 1350 } 1351 1352 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) 1353 { 1354 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1355 int ret; 1356 uint64_t dma_base; 1357 bool triggered; 1358 1359 ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base); 1360 if (ret) { 1361 pr_err("%s: failed to get iova: %d\n", __func__, ret); 1362 return ret; 1363 } 1364 1365 reinit_completion(&msm_host->dma_comp); 1366 1367 dsi_wait4video_eng_busy(msm_host); 1368 1369 triggered = msm_dsi_manager_cmd_xfer_trigger( 1370 msm_host->id, dma_base, len); 1371 if (triggered) { 1372 ret = wait_for_completion_timeout(&msm_host->dma_comp, 1373 msecs_to_jiffies(200)); 1374 DBG("ret=%d", ret); 1375 if (ret == 0) 1376 ret = -ETIMEDOUT; 1377 else 1378 ret = len; 1379 } else 1380 ret = len; 1381 1382 return ret; 1383 } 1384 1385 static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, 1386 u8 *buf, int rx_byte, int pkt_size) 1387 { 1388 u32 *temp, data; 1389 int i, j = 0, cnt; 1390 u32 read_cnt; 1391 u8 reg[16]; 1392 int repeated_bytes = 0; 1393 int buf_offset = buf - msm_host->rx_buf; 1394 1395 temp = (u32 *)reg; 1396 cnt = (rx_byte + 3) >> 2; 1397 if (cnt > 4) 1398 cnt = 4; /* 4 x 32 bits registers only */ 1399 1400 if (rx_byte == 4) 1401 read_cnt = 4; 1402 else 1403 read_cnt = pkt_size + 6; 1404 1405 /* 1406 * In case of multiple reads from the panel, after the first read, there 1407 * is possibility that there are some bytes in the payload repeating in 1408 * the RDBK_DATA registers. Since we read all the parameters from the 1409 * panel right from the first byte for every pass. We need to skip the 1410 * repeating bytes and then append the new parameters to the rx buffer. 1411 */ 1412 if (read_cnt > 16) { 1413 int bytes_shifted; 1414 /* Any data more than 16 bytes will be shifted out. 1415 * The temp read buffer should already contain these bytes. 1416 * The remaining bytes in read buffer are the repeated bytes. 1417 */ 1418 bytes_shifted = read_cnt - 16; 1419 repeated_bytes = buf_offset - bytes_shifted; 1420 } 1421 1422 for (i = cnt - 1; i >= 0; i--) { 1423 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i)); 1424 *temp++ = ntohl(data); /* to host byte order */ 1425 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data)); 1426 } 1427 1428 for (i = repeated_bytes; i < 16; i++) 1429 buf[j++] = reg[i]; 1430 1431 return j; 1432 } 1433 1434 static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, 1435 const struct mipi_dsi_msg *msg) 1436 { 1437 int len, ret; 1438 int bllp_len = msm_host->mode->hdisplay * 1439 mipi_dsi_pixel_format_to_bpp(msm_host->format) / 8; 1440 1441 len = dsi_cmd_dma_add(msm_host, msg); 1442 if (len < 0) { 1443 pr_err("%s: failed to add cmd type = 0x%x\n", 1444 __func__, msg->type); 1445 return len; 1446 } 1447 1448 /* for video mode, do not send cmds more than 1449 * one pixel line, since it only transmit it 1450 * during BLLP. 1451 */ 1452 /* TODO: if the command is sent in LP mode, the bit rate is only 1453 * half of esc clk rate. In this case, if the video is already 1454 * actively streaming, we need to check more carefully if the 1455 * command can be fit into one BLLP. 1456 */ 1457 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) { 1458 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n", 1459 __func__, len); 1460 return -EINVAL; 1461 } 1462 1463 ret = dsi_cmd_dma_tx(msm_host, len); 1464 if (ret < 0) { 1465 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n", 1466 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret); 1467 return ret; 1468 } else if (ret < len) { 1469 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n", 1470 __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len); 1471 return -EIO; 1472 } 1473 1474 return len; 1475 } 1476 1477 static void dsi_err_worker(struct work_struct *work) 1478 { 1479 struct msm_dsi_host *msm_host = 1480 container_of(work, struct msm_dsi_host, err_work); 1481 u32 status = msm_host->err_work_state; 1482 1483 pr_err_ratelimited("%s: status=%x\n", __func__, status); 1484 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) 1485 dsi_sw_reset(msm_host); 1486 1487 /* It is safe to clear here because error irq is disabled. */ 1488 msm_host->err_work_state = 0; 1489 1490 /* enable dsi error interrupt */ 1491 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 1492 } 1493 1494 static void dsi_ack_err_status(struct msm_dsi_host *msm_host) 1495 { 1496 u32 status; 1497 1498 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS); 1499 1500 if (status) { 1501 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status); 1502 /* Writing of an extra 0 needed to clear error bits */ 1503 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0); 1504 msm_host->err_work_state |= DSI_ERR_STATE_ACK; 1505 } 1506 } 1507 1508 static void dsi_timeout_status(struct msm_dsi_host *msm_host) 1509 { 1510 u32 status; 1511 1512 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS); 1513 1514 if (status) { 1515 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status); 1516 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT; 1517 } 1518 } 1519 1520 static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) 1521 { 1522 u32 status; 1523 1524 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); 1525 1526 if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC | 1527 DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC | 1528 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL | 1529 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 | 1530 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) { 1531 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); 1532 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; 1533 } 1534 } 1535 1536 static void dsi_fifo_status(struct msm_dsi_host *msm_host) 1537 { 1538 u32 status; 1539 1540 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS); 1541 1542 /* fifo underflow, overflow */ 1543 if (status) { 1544 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status); 1545 msm_host->err_work_state |= DSI_ERR_STATE_FIFO; 1546 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW) 1547 msm_host->err_work_state |= 1548 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW; 1549 } 1550 } 1551 1552 static void dsi_status(struct msm_dsi_host *msm_host) 1553 { 1554 u32 status; 1555 1556 status = dsi_read(msm_host, REG_DSI_STATUS0); 1557 1558 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) { 1559 dsi_write(msm_host, REG_DSI_STATUS0, status); 1560 msm_host->err_work_state |= 1561 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION; 1562 } 1563 } 1564 1565 static void dsi_clk_status(struct msm_dsi_host *msm_host) 1566 { 1567 u32 status; 1568 1569 status = dsi_read(msm_host, REG_DSI_CLK_STATUS); 1570 1571 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) { 1572 dsi_write(msm_host, REG_DSI_CLK_STATUS, status); 1573 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED; 1574 } 1575 } 1576 1577 static void dsi_error(struct msm_dsi_host *msm_host) 1578 { 1579 /* disable dsi error interrupt */ 1580 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0); 1581 1582 dsi_clk_status(msm_host); 1583 dsi_fifo_status(msm_host); 1584 dsi_ack_err_status(msm_host); 1585 dsi_timeout_status(msm_host); 1586 dsi_status(msm_host); 1587 dsi_dln0_phy_err(msm_host); 1588 1589 queue_work(msm_host->workqueue, &msm_host->err_work); 1590 } 1591 1592 static irqreturn_t dsi_host_irq(int irq, void *ptr) 1593 { 1594 struct msm_dsi_host *msm_host = ptr; 1595 u32 isr; 1596 unsigned long flags; 1597 1598 if (!msm_host->ctrl_base) 1599 return IRQ_HANDLED; 1600 1601 spin_lock_irqsave(&msm_host->intr_lock, flags); 1602 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 1603 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr); 1604 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 1605 1606 DBG("isr=0x%x, id=%d", isr, msm_host->id); 1607 1608 if (isr & DSI_IRQ_ERROR) 1609 dsi_error(msm_host); 1610 1611 if (isr & DSI_IRQ_VIDEO_DONE) 1612 complete(&msm_host->video_comp); 1613 1614 if (isr & DSI_IRQ_CMD_DMA_DONE) 1615 complete(&msm_host->dma_comp); 1616 1617 return IRQ_HANDLED; 1618 } 1619 1620 static int dsi_host_attach(struct mipi_dsi_host *host, 1621 struct mipi_dsi_device *dsi) 1622 { 1623 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1624 int ret; 1625 1626 if (dsi->lanes > msm_host->num_data_lanes) 1627 return -EINVAL; 1628 1629 msm_host->channel = dsi->channel; 1630 msm_host->lanes = dsi->lanes; 1631 msm_host->format = dsi->format; 1632 msm_host->mode_flags = dsi->mode_flags; 1633 if (dsi->dsc) 1634 msm_host->dsc = dsi->dsc; 1635 1636 ret = dsi_dev_attach(msm_host->pdev); 1637 if (ret) 1638 return ret; 1639 1640 DBG("id=%d", msm_host->id); 1641 1642 return 0; 1643 } 1644 1645 static int dsi_host_detach(struct mipi_dsi_host *host, 1646 struct mipi_dsi_device *dsi) 1647 { 1648 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1649 1650 dsi_dev_detach(msm_host->pdev); 1651 1652 DBG("id=%d", msm_host->id); 1653 1654 return 0; 1655 } 1656 1657 static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, 1658 const struct mipi_dsi_msg *msg) 1659 { 1660 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1661 int ret; 1662 1663 if (!msg || !msm_host->power_on) 1664 return -EINVAL; 1665 1666 mutex_lock(&msm_host->cmd_mutex); 1667 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg); 1668 mutex_unlock(&msm_host->cmd_mutex); 1669 1670 return ret; 1671 } 1672 1673 static const struct mipi_dsi_host_ops dsi_host_ops = { 1674 .attach = dsi_host_attach, 1675 .detach = dsi_host_detach, 1676 .transfer = dsi_host_transfer, 1677 }; 1678 1679 /* 1680 * List of supported physical to logical lane mappings. 1681 * For example, the 2nd entry represents the following mapping: 1682 * 1683 * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; 1684 */ 1685 static const int supported_data_lane_swaps[][4] = { 1686 { 0, 1, 2, 3 }, 1687 { 3, 0, 1, 2 }, 1688 { 2, 3, 0, 1 }, 1689 { 1, 2, 3, 0 }, 1690 { 0, 3, 2, 1 }, 1691 { 1, 0, 3, 2 }, 1692 { 2, 1, 0, 3 }, 1693 { 3, 2, 1, 0 }, 1694 }; 1695 1696 static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, 1697 struct device_node *ep) 1698 { 1699 struct device *dev = &msm_host->pdev->dev; 1700 struct property *prop; 1701 u32 lane_map[4]; 1702 int ret, i, len, num_lanes; 1703 1704 prop = of_find_property(ep, "data-lanes", &len); 1705 if (!prop) { 1706 DRM_DEV_DEBUG(dev, 1707 "failed to find data lane mapping, using default\n"); 1708 /* Set the number of date lanes to 4 by default. */ 1709 msm_host->num_data_lanes = 4; 1710 return 0; 1711 } 1712 1713 num_lanes = drm_of_get_data_lanes_count(ep, 1, 4); 1714 if (num_lanes < 0) { 1715 DRM_DEV_ERROR(dev, "bad number of data lanes\n"); 1716 return num_lanes; 1717 } 1718 1719 msm_host->num_data_lanes = num_lanes; 1720 1721 ret = of_property_read_u32_array(ep, "data-lanes", lane_map, 1722 num_lanes); 1723 if (ret) { 1724 DRM_DEV_ERROR(dev, "failed to read lane data\n"); 1725 return ret; 1726 } 1727 1728 /* 1729 * compare DT specified physical-logical lane mappings with the ones 1730 * supported by hardware 1731 */ 1732 for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) { 1733 const int *swap = supported_data_lane_swaps[i]; 1734 int j; 1735 1736 /* 1737 * the data-lanes array we get from DT has a logical->physical 1738 * mapping. The "data lane swap" register field represents 1739 * supported configurations in a physical->logical mapping. 1740 * Translate the DT mapping to what we understand and find a 1741 * configuration that works. 1742 */ 1743 for (j = 0; j < num_lanes; j++) { 1744 if (lane_map[j] < 0 || lane_map[j] > 3) 1745 DRM_DEV_ERROR(dev, "bad physical lane entry %u\n", 1746 lane_map[j]); 1747 1748 if (swap[lane_map[j]] != j) 1749 break; 1750 } 1751 1752 if (j == num_lanes) { 1753 msm_host->dlane_swap = i; 1754 return 0; 1755 } 1756 } 1757 1758 return -EINVAL; 1759 } 1760 1761 static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc) 1762 { 1763 int ret; 1764 1765 if (dsc->bits_per_pixel & 0xf) { 1766 DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support fractional bits_per_pixel\n"); 1767 return -EINVAL; 1768 } 1769 1770 if (dsc->bits_per_component != 8) { 1771 DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n"); 1772 return -EOPNOTSUPP; 1773 } 1774 1775 dsc->simple_422 = 0; 1776 dsc->convert_rgb = 1; 1777 dsc->vbr_enable = 0; 1778 1779 drm_dsc_set_const_params(dsc); 1780 drm_dsc_set_rc_buf_thresh(dsc); 1781 1782 /* handle only bpp = bpc = 8, pre-SCR panels */ 1783 ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR); 1784 if (ret) { 1785 DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n"); 1786 return ret; 1787 } 1788 1789 dsc->initial_scale_value = drm_dsc_initial_scale_value(dsc); 1790 dsc->line_buf_depth = dsc->bits_per_component + 1; 1791 1792 return drm_dsc_compute_rc_parameters(dsc); 1793 } 1794 1795 static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) 1796 { 1797 struct msm_dsi *msm_dsi = platform_get_drvdata(msm_host->pdev); 1798 struct device *dev = &msm_host->pdev->dev; 1799 struct device_node *np = dev->of_node; 1800 struct device_node *endpoint; 1801 const char *te_source; 1802 int ret = 0; 1803 1804 /* 1805 * Get the endpoint of the output port of the DSI host. In our case, 1806 * this is mapped to port number with reg = 1. Don't return an error if 1807 * the remote endpoint isn't defined. It's possible that there is 1808 * nothing connected to the dsi output. 1809 */ 1810 endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); 1811 if (!endpoint) { 1812 DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__); 1813 return 0; 1814 } 1815 1816 ret = dsi_host_parse_lane_data(msm_host, endpoint); 1817 if (ret) { 1818 DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n", 1819 __func__, ret); 1820 ret = -EINVAL; 1821 goto err; 1822 } 1823 1824 ret = of_property_read_string(endpoint, "qcom,te-source", &te_source); 1825 if (ret && ret != -EINVAL) { 1826 DRM_DEV_ERROR(dev, "%s: invalid TE source configuration %d\n", 1827 __func__, ret); 1828 goto err; 1829 } 1830 if (!ret) 1831 msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL); 1832 ret = 0; 1833 1834 if (of_property_read_bool(np, "syscon-sfpb")) { 1835 msm_host->sfpb = syscon_regmap_lookup_by_phandle(np, 1836 "syscon-sfpb"); 1837 if (IS_ERR(msm_host->sfpb)) { 1838 DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n", 1839 __func__); 1840 ret = PTR_ERR(msm_host->sfpb); 1841 } 1842 } 1843 1844 err: 1845 of_node_put(endpoint); 1846 1847 return ret; 1848 } 1849 1850 static int dsi_host_get_id(struct msm_dsi_host *msm_host) 1851 { 1852 struct platform_device *pdev = msm_host->pdev; 1853 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; 1854 struct resource *res; 1855 int i, j; 1856 1857 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl"); 1858 if (!res) 1859 return -EINVAL; 1860 1861 for (i = 0; i < VARIANTS_MAX; i++) 1862 for (j = 0; j < DSI_MAX; j++) 1863 if (cfg->io_start[i][j] == res->start) 1864 return j; 1865 1866 return -EINVAL; 1867 } 1868 1869 int msm_dsi_host_init(struct msm_dsi *msm_dsi) 1870 { 1871 struct msm_dsi_host *msm_host = NULL; 1872 struct platform_device *pdev = msm_dsi->pdev; 1873 const struct msm_dsi_config *cfg; 1874 int ret; 1875 1876 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); 1877 if (!msm_host) { 1878 return -ENOMEM; 1879 } 1880 1881 msm_host->pdev = pdev; 1882 msm_dsi->host = &msm_host->base; 1883 1884 ret = dsi_host_parse_dt(msm_host); 1885 if (ret) { 1886 pr_err("%s: failed to parse dt\n", __func__); 1887 return ret; 1888 } 1889 1890 msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size); 1891 if (IS_ERR(msm_host->ctrl_base)) { 1892 pr_err("%s: unable to map Dsi ctrl base\n", __func__); 1893 return PTR_ERR(msm_host->ctrl_base); 1894 } 1895 1896 pm_runtime_enable(&pdev->dev); 1897 1898 msm_host->cfg_hnd = dsi_get_config(msm_host); 1899 if (!msm_host->cfg_hnd) { 1900 pr_err("%s: get config failed\n", __func__); 1901 return -EINVAL; 1902 } 1903 cfg = msm_host->cfg_hnd->cfg; 1904 1905 msm_host->id = dsi_host_get_id(msm_host); 1906 if (msm_host->id < 0) { 1907 pr_err("%s: unable to identify DSI host index\n", __func__); 1908 return msm_host->id; 1909 } 1910 1911 /* fixup base address by io offset */ 1912 msm_host->ctrl_base += cfg->io_offset; 1913 1914 ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators, 1915 cfg->regulator_data, 1916 &msm_host->supplies); 1917 if (ret) 1918 return ret; 1919 1920 ret = dsi_clk_init(msm_host); 1921 if (ret) { 1922 pr_err("%s: unable to initialize dsi clks\n", __func__); 1923 return ret; 1924 } 1925 1926 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); 1927 if (!msm_host->rx_buf) { 1928 pr_err("%s: alloc rx temp buf failed\n", __func__); 1929 return -ENOMEM; 1930 } 1931 1932 ret = devm_pm_opp_set_clkname(&pdev->dev, "byte"); 1933 if (ret) 1934 return ret; 1935 /* OPP table is optional */ 1936 ret = devm_pm_opp_of_add_table(&pdev->dev); 1937 if (ret && ret != -ENODEV) { 1938 dev_err(&pdev->dev, "invalid OPP table in device tree\n"); 1939 return ret; 1940 } 1941 1942 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1943 if (!msm_host->irq) { 1944 dev_err(&pdev->dev, "failed to get irq\n"); 1945 return -EINVAL; 1946 } 1947 1948 /* do not autoenable, will be enabled later */ 1949 ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, 1950 IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, 1951 "dsi_isr", msm_host); 1952 if (ret < 0) { 1953 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", 1954 msm_host->irq, ret); 1955 return ret; 1956 } 1957 1958 init_completion(&msm_host->dma_comp); 1959 init_completion(&msm_host->video_comp); 1960 mutex_init(&msm_host->dev_mutex); 1961 mutex_init(&msm_host->cmd_mutex); 1962 spin_lock_init(&msm_host->intr_lock); 1963 1964 /* setup workqueue */ 1965 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 1966 if (!msm_host->workqueue) 1967 return -ENOMEM; 1968 1969 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1970 1971 msm_dsi->id = msm_host->id; 1972 1973 DBG("Dsi Host %d initialized", msm_host->id); 1974 return 0; 1975 } 1976 1977 void msm_dsi_host_destroy(struct mipi_dsi_host *host) 1978 { 1979 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1980 1981 DBG(""); 1982 if (msm_host->workqueue) { 1983 destroy_workqueue(msm_host->workqueue); 1984 msm_host->workqueue = NULL; 1985 } 1986 1987 mutex_destroy(&msm_host->cmd_mutex); 1988 mutex_destroy(&msm_host->dev_mutex); 1989 1990 pm_runtime_disable(&msm_host->pdev->dev); 1991 } 1992 1993 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, 1994 struct drm_device *dev) 1995 { 1996 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1997 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1998 int ret; 1999 2000 msm_host->dev = dev; 2001 2002 ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K); 2003 if (ret) { 2004 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret); 2005 return ret; 2006 } 2007 2008 return 0; 2009 } 2010 2011 int msm_dsi_host_register(struct mipi_dsi_host *host) 2012 { 2013 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2014 int ret; 2015 2016 /* Register mipi dsi host */ 2017 if (!msm_host->registered) { 2018 host->dev = &msm_host->pdev->dev; 2019 host->ops = &dsi_host_ops; 2020 ret = mipi_dsi_host_register(host); 2021 if (ret) 2022 return ret; 2023 2024 msm_host->registered = true; 2025 } 2026 2027 return 0; 2028 } 2029 2030 void msm_dsi_host_unregister(struct mipi_dsi_host *host) 2031 { 2032 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2033 2034 if (msm_host->registered) { 2035 mipi_dsi_host_unregister(host); 2036 host->dev = NULL; 2037 host->ops = NULL; 2038 msm_host->registered = false; 2039 } 2040 } 2041 2042 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, 2043 const struct mipi_dsi_msg *msg) 2044 { 2045 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2046 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2047 2048 /* TODO: make sure dsi_cmd_mdp is idle. 2049 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME 2050 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed. 2051 * How to handle the old versions? Wait for mdp cmd done? 2052 */ 2053 2054 /* 2055 * mdss interrupt is generated in mdp core clock domain 2056 * mdp clock need to be enabled to receive dsi interrupt 2057 */ 2058 pm_runtime_get_sync(&msm_host->pdev->dev); 2059 cfg_hnd->ops->link_clk_set_rate(msm_host); 2060 cfg_hnd->ops->link_clk_enable(msm_host); 2061 2062 /* TODO: vote for bus bandwidth */ 2063 2064 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 2065 dsi_set_tx_power_mode(0, msm_host); 2066 2067 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL); 2068 dsi_write(msm_host, REG_DSI_CTRL, 2069 msm_host->dma_cmd_ctrl_restore | 2070 DSI_CTRL_CMD_MODE_EN | 2071 DSI_CTRL_ENABLE); 2072 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1); 2073 2074 return 0; 2075 } 2076 2077 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, 2078 const struct mipi_dsi_msg *msg) 2079 { 2080 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2081 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2082 2083 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0); 2084 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore); 2085 2086 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 2087 dsi_set_tx_power_mode(1, msm_host); 2088 2089 /* TODO: unvote for bus bandwidth */ 2090 2091 cfg_hnd->ops->link_clk_disable(msm_host); 2092 pm_runtime_put(&msm_host->pdev->dev); 2093 } 2094 2095 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, 2096 const struct mipi_dsi_msg *msg) 2097 { 2098 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2099 2100 return dsi_cmds2buf_tx(msm_host, msg); 2101 } 2102 2103 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, 2104 const struct mipi_dsi_msg *msg) 2105 { 2106 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2107 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2108 int data_byte, rx_byte, dlen, end; 2109 int short_response, diff, pkt_size, ret = 0; 2110 char cmd; 2111 int rlen = msg->rx_len; 2112 u8 *buf; 2113 2114 if (rlen <= 2) { 2115 short_response = 1; 2116 pkt_size = rlen; 2117 rx_byte = 4; 2118 } else { 2119 short_response = 0; 2120 data_byte = 10; /* first read */ 2121 if (rlen < data_byte) 2122 pkt_size = rlen; 2123 else 2124 pkt_size = data_byte; 2125 rx_byte = data_byte + 6; /* 4 header + 2 crc */ 2126 } 2127 2128 buf = msm_host->rx_buf; 2129 end = 0; 2130 while (!end) { 2131 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8}; 2132 struct mipi_dsi_msg max_pkt_size_msg = { 2133 .channel = msg->channel, 2134 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, 2135 .tx_len = 2, 2136 .tx_buf = tx, 2137 }; 2138 2139 DBG("rlen=%d pkt_size=%d rx_byte=%d", 2140 rlen, pkt_size, rx_byte); 2141 2142 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg); 2143 if (ret < 2) { 2144 pr_err("%s: Set max pkt size failed, %d\n", 2145 __func__, ret); 2146 return -EINVAL; 2147 } 2148 2149 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 2150 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { 2151 /* Clear the RDBK_DATA registers */ 2152 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 2153 DSI_RDBK_DATA_CTRL_CLR); 2154 wmb(); /* make sure the RDBK registers are cleared */ 2155 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0); 2156 wmb(); /* release cleared status before transfer */ 2157 } 2158 2159 ret = dsi_cmds2buf_tx(msm_host, msg); 2160 if (ret < 0) { 2161 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret); 2162 return ret; 2163 } else if (ret < msg->tx_len) { 2164 pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret); 2165 return -ECOMM; 2166 } 2167 2168 /* 2169 * once cmd_dma_done interrupt received, 2170 * return data from client is ready and stored 2171 * at RDBK_DATA register already 2172 * since rx fifo is 16 bytes, dcs header is kept at first loop, 2173 * after that dcs header lost during shift into registers 2174 */ 2175 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size); 2176 2177 if (dlen <= 0) 2178 return 0; 2179 2180 if (short_response) 2181 break; 2182 2183 if (rlen <= data_byte) { 2184 diff = data_byte - rlen; 2185 end = 1; 2186 } else { 2187 diff = 0; 2188 rlen -= data_byte; 2189 } 2190 2191 if (!end) { 2192 dlen -= 2; /* 2 crc */ 2193 dlen -= diff; 2194 buf += dlen; /* next start position */ 2195 data_byte = 14; /* NOT first read */ 2196 if (rlen < data_byte) 2197 pkt_size += rlen; 2198 else 2199 pkt_size += data_byte; 2200 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff); 2201 } 2202 } 2203 2204 /* 2205 * For single Long read, if the requested rlen < 10, 2206 * we need to shift the start position of rx 2207 * data buffer to skip the bytes which are not 2208 * updated. 2209 */ 2210 if (pkt_size < 10 && !short_response) 2211 buf = msm_host->rx_buf + (10 - rlen); 2212 else 2213 buf = msm_host->rx_buf; 2214 2215 cmd = buf[0]; 2216 switch (cmd) { 2217 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 2218 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); 2219 ret = 0; 2220 break; 2221 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 2222 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 2223 ret = dsi_short_read1_resp(buf, msg); 2224 break; 2225 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 2226 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 2227 ret = dsi_short_read2_resp(buf, msg); 2228 break; 2229 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: 2230 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: 2231 ret = dsi_long_read_resp(buf, msg); 2232 break; 2233 default: 2234 pr_warn("%s:Invalid response cmd\n", __func__); 2235 ret = 0; 2236 } 2237 2238 return ret; 2239 } 2240 2241 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, 2242 u32 len) 2243 { 2244 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2245 2246 dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base); 2247 dsi_write(msm_host, REG_DSI_DMA_LEN, len); 2248 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); 2249 2250 /* Make sure trigger happens */ 2251 wmb(); 2252 } 2253 2254 void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, 2255 struct msm_dsi_phy *src_phy) 2256 { 2257 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2258 2259 msm_host->cphy_mode = src_phy->cphy_mode; 2260 } 2261 2262 void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) 2263 { 2264 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2265 2266 DBG(""); 2267 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); 2268 /* Make sure fully reset */ 2269 wmb(); 2270 udelay(1000); 2271 dsi_write(msm_host, REG_DSI_PHY_RESET, 0); 2272 udelay(100); 2273 } 2274 2275 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, 2276 struct msm_dsi_phy_clk_request *clk_req, 2277 bool is_bonded_dsi) 2278 { 2279 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2280 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2281 int ret; 2282 2283 ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi); 2284 if (ret) { 2285 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2286 return; 2287 } 2288 2289 /* CPHY transmits 16 bits over 7 clock cycles 2290 * "byte_clk" is in units of 16-bits (see dsi_calc_pclk), 2291 * so multiply by 7 to get the "bitclk rate" 2292 */ 2293 if (msm_host->cphy_mode) 2294 clk_req->bitclk_rate = msm_host->byte_clk_rate * 7; 2295 else 2296 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2297 clk_req->escclk_rate = msm_host->esc_clk_rate; 2298 } 2299 2300 void msm_dsi_host_enable_irq(struct mipi_dsi_host *host) 2301 { 2302 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2303 2304 enable_irq(msm_host->irq); 2305 } 2306 2307 void msm_dsi_host_disable_irq(struct mipi_dsi_host *host) 2308 { 2309 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2310 2311 disable_irq(msm_host->irq); 2312 } 2313 2314 int msm_dsi_host_enable(struct mipi_dsi_host *host) 2315 { 2316 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2317 2318 dsi_op_mode_config(msm_host, 2319 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true); 2320 2321 /* TODO: clock should be turned off for command mode, 2322 * and only turned on before MDP START. 2323 * This part of code should be enabled once mdp driver support it. 2324 */ 2325 /* if (msm_panel->mode == MSM_DSI_CMD_MODE) { 2326 * dsi_link_clk_disable(msm_host); 2327 * pm_runtime_put(&msm_host->pdev->dev); 2328 * } 2329 */ 2330 msm_host->enabled = true; 2331 return 0; 2332 } 2333 2334 int msm_dsi_host_disable(struct mipi_dsi_host *host) 2335 { 2336 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2337 2338 msm_host->enabled = false; 2339 dsi_op_mode_config(msm_host, 2340 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2341 2342 /* Since we have disabled INTF, the video engine won't stop so that 2343 * the cmd engine will be blocked. 2344 * Reset to disable video engine so that we can send off cmd. 2345 */ 2346 dsi_sw_reset(msm_host); 2347 2348 return 0; 2349 } 2350 2351 static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) 2352 { 2353 enum sfpb_ahb_arb_master_port_en en; 2354 2355 if (!msm_host->sfpb) 2356 return; 2357 2358 en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE; 2359 2360 regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG, 2361 SFPB_GPREG_MASTER_PORT_EN__MASK, 2362 SFPB_GPREG_MASTER_PORT_EN(en)); 2363 } 2364 2365 int msm_dsi_host_power_on(struct mipi_dsi_host *host, 2366 struct msm_dsi_phy_shared_timings *phy_shared_timings, 2367 bool is_bonded_dsi, struct msm_dsi_phy *phy) 2368 { 2369 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2370 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2371 int ret = 0; 2372 2373 mutex_lock(&msm_host->dev_mutex); 2374 if (msm_host->power_on) { 2375 DBG("dsi host already on"); 2376 goto unlock_ret; 2377 } 2378 2379 msm_host->byte_intf_clk_rate = msm_host->byte_clk_rate; 2380 if (phy_shared_timings->byte_intf_clk_div_2) 2381 msm_host->byte_intf_clk_rate /= 2; 2382 2383 msm_dsi_sfpb_config(msm_host, true); 2384 2385 ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators, 2386 msm_host->supplies); 2387 if (ret) { 2388 pr_err("%s:Failed to enable vregs.ret=%d\n", 2389 __func__, ret); 2390 goto unlock_ret; 2391 } 2392 2393 pm_runtime_get_sync(&msm_host->pdev->dev); 2394 ret = cfg_hnd->ops->link_clk_set_rate(msm_host); 2395 if (!ret) 2396 ret = cfg_hnd->ops->link_clk_enable(msm_host); 2397 if (ret) { 2398 pr_err("%s: failed to enable link clocks. ret=%d\n", 2399 __func__, ret); 2400 goto fail_disable_reg; 2401 } 2402 2403 ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev); 2404 if (ret) { 2405 pr_err("%s: failed to set pinctrl default state, %d\n", 2406 __func__, ret); 2407 goto fail_disable_clk; 2408 } 2409 2410 dsi_timing_setup(msm_host, is_bonded_dsi); 2411 dsi_sw_reset(msm_host); 2412 dsi_ctrl_enable(msm_host, phy_shared_timings, phy); 2413 2414 msm_host->power_on = true; 2415 mutex_unlock(&msm_host->dev_mutex); 2416 2417 return 0; 2418 2419 fail_disable_clk: 2420 cfg_hnd->ops->link_clk_disable(msm_host); 2421 pm_runtime_put(&msm_host->pdev->dev); 2422 fail_disable_reg: 2423 regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators, 2424 msm_host->supplies); 2425 unlock_ret: 2426 mutex_unlock(&msm_host->dev_mutex); 2427 return ret; 2428 } 2429 2430 int msm_dsi_host_power_off(struct mipi_dsi_host *host) 2431 { 2432 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2433 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2434 2435 mutex_lock(&msm_host->dev_mutex); 2436 if (!msm_host->power_on) { 2437 DBG("dsi host already off"); 2438 goto unlock_ret; 2439 } 2440 2441 dsi_ctrl_disable(msm_host); 2442 2443 pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); 2444 2445 cfg_hnd->ops->link_clk_disable(msm_host); 2446 pm_runtime_put(&msm_host->pdev->dev); 2447 2448 regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators, 2449 msm_host->supplies); 2450 2451 msm_dsi_sfpb_config(msm_host, false); 2452 2453 DBG("-"); 2454 2455 msm_host->power_on = false; 2456 2457 unlock_ret: 2458 mutex_unlock(&msm_host->dev_mutex); 2459 return 0; 2460 } 2461 2462 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 2463 const struct drm_display_mode *mode) 2464 { 2465 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2466 2467 if (msm_host->mode) { 2468 drm_mode_destroy(msm_host->dev, msm_host->mode); 2469 msm_host->mode = NULL; 2470 } 2471 2472 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); 2473 if (!msm_host->mode) { 2474 pr_err("%s: cannot duplicate mode\n", __func__); 2475 return -ENOMEM; 2476 } 2477 2478 return 0; 2479 } 2480 2481 enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host, 2482 const struct drm_display_mode *mode) 2483 { 2484 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2485 struct drm_dsc_config *dsc = msm_host->dsc; 2486 int pic_width = mode->hdisplay; 2487 int pic_height = mode->vdisplay; 2488 2489 if (!msm_host->dsc) 2490 return MODE_OK; 2491 2492 if (pic_width % dsc->slice_width) { 2493 pr_err("DSI: pic_width %d has to be multiple of slice %d\n", 2494 pic_width, dsc->slice_width); 2495 return MODE_H_ILLEGAL; 2496 } 2497 2498 if (pic_height % dsc->slice_height) { 2499 pr_err("DSI: pic_height %d has to be multiple of slice %d\n", 2500 pic_height, dsc->slice_height); 2501 return MODE_V_ILLEGAL; 2502 } 2503 2504 return MODE_OK; 2505 } 2506 2507 unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host) 2508 { 2509 return to_msm_dsi_host(host)->mode_flags; 2510 } 2511 2512 void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host) 2513 { 2514 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2515 2516 pm_runtime_get_sync(&msm_host->pdev->dev); 2517 2518 msm_disp_snapshot_add_block(disp_state, msm_host->ctrl_size, 2519 msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id); 2520 2521 pm_runtime_put_sync(&msm_host->pdev->dev); 2522 } 2523 2524 static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host) 2525 { 2526 u32 reg; 2527 2528 reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2529 2530 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff); 2531 /* draw checkered rectangle pattern */ 2532 dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL, 2533 DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN); 2534 /* use 24-bit RGB test pttern */ 2535 dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG, 2536 DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) | 2537 DSI_TPG_VIDEO_CONFIG_RGB); 2538 2539 reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN); 2540 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); 2541 2542 DBG("Video test pattern setup done\n"); 2543 } 2544 2545 static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host) 2546 { 2547 u32 reg; 2548 2549 reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2550 2551 /* initial value for test pattern */ 2552 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff); 2553 2554 reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN); 2555 2556 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); 2557 /* draw checkered rectangle pattern */ 2558 dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2, 2559 DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN); 2560 2561 DBG("Cmd test pattern setup done\n"); 2562 } 2563 2564 void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host) 2565 { 2566 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2567 bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO); 2568 u32 reg; 2569 2570 if (is_video_mode) 2571 msm_dsi_host_video_test_pattern_setup(msm_host); 2572 else 2573 msm_dsi_host_cmd_test_pattern_setup(msm_host); 2574 2575 reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2576 /* enable the test pattern generator */ 2577 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN)); 2578 2579 /* for command mode need to trigger one frame from tpg */ 2580 if (!is_video_mode) 2581 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 2582 DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER); 2583 } 2584 2585 struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host) 2586 { 2587 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2588 2589 return msm_host->dsc; 2590 } 2591