1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/component.h> 8 #include <linux/iopoll.h> 9 #include <linux/irq.h> 10 #include <linux/of.h> 11 #include <linux/of_platform.h> 12 #include <linux/phy/phy.h> 13 #include <linux/platform_device.h> 14 #include <linux/reset.h> 15 16 #include <video/mipi_display.h> 17 #include <video/videomode.h> 18 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_bridge.h> 21 #include <drm/drm_bridge_connector.h> 22 #include <drm/drm_mipi_dsi.h> 23 #include <drm/drm_of.h> 24 #include <drm/drm_panel.h> 25 #include <drm/drm_print.h> 26 #include <drm/drm_probe_helper.h> 27 #include <drm/drm_simple_kms_helper.h> 28 29 #include "mtk_disp_drv.h" 30 #include "mtk_drm_ddp_comp.h" 31 #include "mtk_drm_drv.h" 32 33 #define DSI_START 0x00 34 35 #define DSI_INTEN 0x08 36 37 #define DSI_INTSTA 0x0c 38 #define LPRX_RD_RDY_INT_FLAG BIT(0) 39 #define CMD_DONE_INT_FLAG BIT(1) 40 #define TE_RDY_INT_FLAG BIT(2) 41 #define VM_DONE_INT_FLAG BIT(3) 42 #define EXT_TE_RDY_INT_FLAG BIT(4) 43 #define DSI_BUSY BIT(31) 44 45 #define DSI_CON_CTRL 0x10 46 #define DSI_RESET BIT(0) 47 #define DSI_EN BIT(1) 48 #define DPHY_RESET BIT(2) 49 50 #define DSI_MODE_CTRL 0x14 51 #define MODE (3) 52 #define CMD_MODE 0 53 #define SYNC_PULSE_MODE 1 54 #define SYNC_EVENT_MODE 2 55 #define BURST_MODE 3 56 #define FRM_MODE BIT(16) 57 #define MIX_MODE BIT(17) 58 59 #define DSI_TXRX_CTRL 0x18 60 #define VC_NUM BIT(1) 61 #define LANE_NUM (0xf << 2) 62 #define DIS_EOT BIT(6) 63 #define NULL_EN BIT(7) 64 #define TE_FREERUN BIT(8) 65 #define EXT_TE_EN BIT(9) 66 #define EXT_TE_EDGE BIT(10) 67 #define MAX_RTN_SIZE (0xf << 12) 68 #define HSTX_CKLP_EN BIT(16) 69 70 #define DSI_PSCTRL 0x1c 71 #define DSI_PS_WC 0x3fff 72 #define DSI_PS_SEL (3 << 16) 73 #define PACKED_PS_16BIT_RGB565 (0 << 16) 74 #define LOOSELY_PS_18BIT_RGB666 (1 << 16) 75 #define PACKED_PS_18BIT_RGB666 (2 << 16) 76 #define PACKED_PS_24BIT_RGB888 (3 << 16) 77 78 #define DSI_VSA_NL 0x20 79 #define DSI_VBP_NL 0x24 80 #define DSI_VFP_NL 0x28 81 #define DSI_VACT_NL 0x2C 82 #define DSI_SIZE_CON 0x38 83 #define DSI_HSA_WC 0x50 84 #define DSI_HBP_WC 0x54 85 #define DSI_HFP_WC 0x58 86 87 #define DSI_CMDQ_SIZE 0x60 88 #define CMDQ_SIZE 0x3f 89 #define CMDQ_SIZE_SEL BIT(15) 90 91 #define DSI_HSTX_CKL_WC 0x64 92 93 #define DSI_RX_DATA0 0x74 94 #define DSI_RX_DATA1 0x78 95 #define DSI_RX_DATA2 0x7c 96 #define DSI_RX_DATA3 0x80 97 98 #define DSI_RACK 0x84 99 #define RACK BIT(0) 100 101 #define DSI_PHY_LCCON 0x104 102 #define LC_HS_TX_EN BIT(0) 103 #define LC_ULPM_EN BIT(1) 104 #define LC_WAKEUP_EN BIT(2) 105 106 #define DSI_PHY_LD0CON 0x108 107 #define LD0_HS_TX_EN BIT(0) 108 #define LD0_ULPM_EN BIT(1) 109 #define LD0_WAKEUP_EN BIT(2) 110 111 #define DSI_PHY_TIMECON0 0x110 112 #define LPX (0xff << 0) 113 #define HS_PREP (0xff << 8) 114 #define HS_ZERO (0xff << 16) 115 #define HS_TRAIL (0xff << 24) 116 117 #define DSI_PHY_TIMECON1 0x114 118 #define TA_GO (0xff << 0) 119 #define TA_SURE (0xff << 8) 120 #define TA_GET (0xff << 16) 121 #define DA_HS_EXIT (0xff << 24) 122 123 #define DSI_PHY_TIMECON2 0x118 124 #define CONT_DET (0xff << 0) 125 #define CLK_ZERO (0xff << 16) 126 #define CLK_TRAIL (0xff << 24) 127 128 #define DSI_PHY_TIMECON3 0x11c 129 #define CLK_HS_PREP (0xff << 0) 130 #define CLK_HS_POST (0xff << 8) 131 #define CLK_HS_EXIT (0xff << 16) 132 133 #define DSI_VM_CMD_CON 0x130 134 #define VM_CMD_EN BIT(0) 135 #define TS_VFP_EN BIT(5) 136 137 #define DSI_SHADOW_DEBUG 0x190U 138 #define FORCE_COMMIT BIT(0) 139 #define BYPASS_SHADOW BIT(1) 140 141 #define CONFIG (0xff << 0) 142 #define SHORT_PACKET 0 143 #define LONG_PACKET 2 144 #define BTA BIT(2) 145 #define DATA_ID (0xff << 8) 146 #define DATA_0 (0xff << 16) 147 #define DATA_1 (0xff << 24) 148 149 #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) 150 151 #define MTK_DSI_HOST_IS_READ(type) \ 152 ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \ 153 (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \ 154 (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \ 155 (type == MIPI_DSI_DCS_READ)) 156 157 struct mtk_phy_timing { 158 u32 lpx; 159 u32 da_hs_prepare; 160 u32 da_hs_zero; 161 u32 da_hs_trail; 162 163 u32 ta_go; 164 u32 ta_sure; 165 u32 ta_get; 166 u32 da_hs_exit; 167 168 u32 clk_hs_zero; 169 u32 clk_hs_trail; 170 171 u32 clk_hs_prepare; 172 u32 clk_hs_post; 173 u32 clk_hs_exit; 174 }; 175 176 struct phy; 177 178 struct mtk_dsi_driver_data { 179 const u32 reg_cmdq_off; 180 bool has_shadow_ctl; 181 bool has_size_ctl; 182 bool cmdq_long_packet_ctl; 183 }; 184 185 struct mtk_dsi { 186 struct device *dev; 187 struct mipi_dsi_host host; 188 struct drm_encoder encoder; 189 struct drm_bridge bridge; 190 struct drm_bridge *next_bridge; 191 struct drm_connector *connector; 192 struct phy *phy; 193 194 void __iomem *regs; 195 196 struct clk *engine_clk; 197 struct clk *digital_clk; 198 struct clk *hs_clk; 199 200 u32 data_rate; 201 202 unsigned long mode_flags; 203 enum mipi_dsi_pixel_format format; 204 unsigned int lanes; 205 struct videomode vm; 206 struct mtk_phy_timing phy_timing; 207 int refcount; 208 bool enabled; 209 bool lanes_ready; 210 u32 irq_data; 211 wait_queue_head_t irq_wait_queue; 212 const struct mtk_dsi_driver_data *driver_data; 213 }; 214 215 static inline struct mtk_dsi *bridge_to_dsi(struct drm_bridge *b) 216 { 217 return container_of(b, struct mtk_dsi, bridge); 218 } 219 220 static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h) 221 { 222 return container_of(h, struct mtk_dsi, host); 223 } 224 225 static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) 226 { 227 u32 temp = readl(dsi->regs + offset); 228 229 writel((temp & ~mask) | (data & mask), dsi->regs + offset); 230 } 231 232 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) 233 { 234 u32 timcon0, timcon1, timcon2, timcon3; 235 u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000); 236 struct mtk_phy_timing *timing = &dsi->phy_timing; 237 238 timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; 239 timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; 240 timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - 241 timing->da_hs_prepare; 242 timing->da_hs_trail = timing->da_hs_prepare + 1; 243 244 timing->ta_go = 4 * timing->lpx - 2; 245 timing->ta_sure = timing->lpx + 2; 246 timing->ta_get = 4 * timing->lpx; 247 timing->da_hs_exit = 2 * timing->lpx + 1; 248 249 timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); 250 timing->clk_hs_post = timing->clk_hs_prepare + 8; 251 timing->clk_hs_trail = timing->clk_hs_prepare; 252 timing->clk_hs_zero = timing->clk_hs_trail * 4; 253 timing->clk_hs_exit = 2 * timing->clk_hs_trail; 254 255 timcon0 = timing->lpx | timing->da_hs_prepare << 8 | 256 timing->da_hs_zero << 16 | timing->da_hs_trail << 24; 257 timcon1 = timing->ta_go | timing->ta_sure << 8 | 258 timing->ta_get << 16 | timing->da_hs_exit << 24; 259 timcon2 = 1 << 8 | timing->clk_hs_zero << 16 | 260 timing->clk_hs_trail << 24; 261 timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 | 262 timing->clk_hs_exit << 16; 263 264 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); 265 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); 266 writel(timcon2, dsi->regs + DSI_PHY_TIMECON2); 267 writel(timcon3, dsi->regs + DSI_PHY_TIMECON3); 268 } 269 270 static void mtk_dsi_enable(struct mtk_dsi *dsi) 271 { 272 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN); 273 } 274 275 static void mtk_dsi_disable(struct mtk_dsi *dsi) 276 { 277 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0); 278 } 279 280 static void mtk_dsi_reset_engine(struct mtk_dsi *dsi) 281 { 282 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET); 283 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); 284 } 285 286 static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi) 287 { 288 mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET); 289 mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0); 290 } 291 292 static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) 293 { 294 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 295 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 296 } 297 298 static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi) 299 { 300 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 301 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN); 302 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0); 303 } 304 305 static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi) 306 { 307 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0); 308 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 309 } 310 311 static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) 312 { 313 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 314 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN); 315 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0); 316 } 317 318 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) 319 { 320 return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN; 321 } 322 323 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) 324 { 325 if (enter && !mtk_dsi_clk_hs_state(dsi)) 326 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN); 327 else if (!enter && mtk_dsi_clk_hs_state(dsi)) 328 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 329 } 330 331 static void mtk_dsi_set_mode(struct mtk_dsi *dsi) 332 { 333 u32 vid_mode = CMD_MODE; 334 335 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 336 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 337 vid_mode = BURST_MODE; 338 else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 339 vid_mode = SYNC_PULSE_MODE; 340 else 341 vid_mode = SYNC_EVENT_MODE; 342 } 343 344 writel(vid_mode, dsi->regs + DSI_MODE_CTRL); 345 } 346 347 static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi) 348 { 349 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN); 350 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN); 351 } 352 353 static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi) 354 { 355 struct videomode *vm = &dsi->vm; 356 u32 dsi_buf_bpp, ps_wc; 357 u32 ps_bpp_mode; 358 359 if (dsi->format == MIPI_DSI_FMT_RGB565) 360 dsi_buf_bpp = 2; 361 else 362 dsi_buf_bpp = 3; 363 364 ps_wc = vm->hactive * dsi_buf_bpp; 365 ps_bpp_mode = ps_wc; 366 367 switch (dsi->format) { 368 case MIPI_DSI_FMT_RGB888: 369 ps_bpp_mode |= PACKED_PS_24BIT_RGB888; 370 break; 371 case MIPI_DSI_FMT_RGB666: 372 ps_bpp_mode |= PACKED_PS_18BIT_RGB666; 373 break; 374 case MIPI_DSI_FMT_RGB666_PACKED: 375 ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666; 376 break; 377 case MIPI_DSI_FMT_RGB565: 378 ps_bpp_mode |= PACKED_PS_16BIT_RGB565; 379 break; 380 } 381 382 writel(vm->vactive, dsi->regs + DSI_VACT_NL); 383 writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL); 384 writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC); 385 } 386 387 static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi) 388 { 389 u32 tmp_reg; 390 391 switch (dsi->lanes) { 392 case 1: 393 tmp_reg = 1 << 2; 394 break; 395 case 2: 396 tmp_reg = 3 << 2; 397 break; 398 case 3: 399 tmp_reg = 7 << 2; 400 break; 401 case 4: 402 tmp_reg = 0xf << 2; 403 break; 404 default: 405 tmp_reg = 0xf << 2; 406 break; 407 } 408 409 if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) 410 tmp_reg |= HSTX_CKLP_EN; 411 412 if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) 413 tmp_reg |= DIS_EOT; 414 415 writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL); 416 } 417 418 static void mtk_dsi_ps_control(struct mtk_dsi *dsi) 419 { 420 u32 dsi_tmp_buf_bpp; 421 u32 tmp_reg; 422 423 switch (dsi->format) { 424 case MIPI_DSI_FMT_RGB888: 425 tmp_reg = PACKED_PS_24BIT_RGB888; 426 dsi_tmp_buf_bpp = 3; 427 break; 428 case MIPI_DSI_FMT_RGB666: 429 tmp_reg = LOOSELY_PS_18BIT_RGB666; 430 dsi_tmp_buf_bpp = 3; 431 break; 432 case MIPI_DSI_FMT_RGB666_PACKED: 433 tmp_reg = PACKED_PS_18BIT_RGB666; 434 dsi_tmp_buf_bpp = 3; 435 break; 436 case MIPI_DSI_FMT_RGB565: 437 tmp_reg = PACKED_PS_16BIT_RGB565; 438 dsi_tmp_buf_bpp = 2; 439 break; 440 default: 441 tmp_reg = PACKED_PS_24BIT_RGB888; 442 dsi_tmp_buf_bpp = 3; 443 break; 444 } 445 446 tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC; 447 writel(tmp_reg, dsi->regs + DSI_PSCTRL); 448 } 449 450 static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) 451 { 452 u32 horizontal_sync_active_byte; 453 u32 horizontal_backporch_byte; 454 u32 horizontal_frontporch_byte; 455 u32 horizontal_front_back_byte; 456 u32 data_phy_cycles_byte; 457 u32 dsi_tmp_buf_bpp, data_phy_cycles; 458 u32 delta; 459 struct mtk_phy_timing *timing = &dsi->phy_timing; 460 461 struct videomode *vm = &dsi->vm; 462 463 if (dsi->format == MIPI_DSI_FMT_RGB565) 464 dsi_tmp_buf_bpp = 2; 465 else 466 dsi_tmp_buf_bpp = 3; 467 468 writel(vm->vsync_len, dsi->regs + DSI_VSA_NL); 469 writel(vm->vback_porch, dsi->regs + DSI_VBP_NL); 470 writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); 471 writel(vm->vactive, dsi->regs + DSI_VACT_NL); 472 473 if (dsi->driver_data->has_size_ctl) 474 writel(vm->vactive << 16 | vm->hactive, 475 dsi->regs + DSI_SIZE_CON); 476 477 horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); 478 479 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 480 horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10; 481 else 482 horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * 483 dsi_tmp_buf_bpp - 10; 484 485 data_phy_cycles = timing->lpx + timing->da_hs_prepare + 486 timing->da_hs_zero + timing->da_hs_exit + 3; 487 488 delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12; 489 delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2; 490 491 horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; 492 horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte; 493 data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta; 494 495 if (horizontal_front_back_byte > data_phy_cycles_byte) { 496 horizontal_frontporch_byte -= data_phy_cycles_byte * 497 horizontal_frontporch_byte / 498 horizontal_front_back_byte; 499 500 horizontal_backporch_byte -= data_phy_cycles_byte * 501 horizontal_backporch_byte / 502 horizontal_front_back_byte; 503 } else { 504 DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n"); 505 } 506 507 if ((dsi->mode_flags & MIPI_DSI_HS_PKT_END_ALIGNED) && 508 (dsi->lanes == 4)) { 509 horizontal_sync_active_byte = 510 roundup(horizontal_sync_active_byte, dsi->lanes) - 2; 511 horizontal_frontporch_byte = 512 roundup(horizontal_frontporch_byte, dsi->lanes) - 2; 513 horizontal_backporch_byte = 514 roundup(horizontal_backporch_byte, dsi->lanes) - 2; 515 horizontal_backporch_byte -= 516 (vm->hactive * dsi_tmp_buf_bpp + 2) % dsi->lanes; 517 } 518 519 writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); 520 writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); 521 writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); 522 523 mtk_dsi_ps_control(dsi); 524 } 525 526 static void mtk_dsi_start(struct mtk_dsi *dsi) 527 { 528 writel(0, dsi->regs + DSI_START); 529 writel(1, dsi->regs + DSI_START); 530 } 531 532 static void mtk_dsi_stop(struct mtk_dsi *dsi) 533 { 534 writel(0, dsi->regs + DSI_START); 535 } 536 537 static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi) 538 { 539 writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL); 540 } 541 542 static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi) 543 { 544 u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; 545 546 writel(inten, dsi->regs + DSI_INTEN); 547 } 548 549 static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit) 550 { 551 dsi->irq_data |= irq_bit; 552 } 553 554 static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit) 555 { 556 dsi->irq_data &= ~irq_bit; 557 } 558 559 static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag, 560 unsigned int timeout) 561 { 562 s32 ret = 0; 563 unsigned long jiffies = msecs_to_jiffies(timeout); 564 565 ret = wait_event_interruptible_timeout(dsi->irq_wait_queue, 566 dsi->irq_data & irq_flag, 567 jiffies); 568 if (ret == 0) { 569 DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag); 570 571 mtk_dsi_enable(dsi); 572 mtk_dsi_reset_engine(dsi); 573 } 574 575 return ret; 576 } 577 578 static irqreturn_t mtk_dsi_irq(int irq, void *dev_id) 579 { 580 struct mtk_dsi *dsi = dev_id; 581 u32 status, tmp; 582 u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; 583 584 status = readl(dsi->regs + DSI_INTSTA) & flag; 585 586 if (status) { 587 do { 588 mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK); 589 tmp = readl(dsi->regs + DSI_INTSTA); 590 } while (tmp & DSI_BUSY); 591 592 mtk_dsi_mask(dsi, DSI_INTSTA, status, 0); 593 mtk_dsi_irq_data_set(dsi, status); 594 wake_up_interruptible(&dsi->irq_wait_queue); 595 } 596 597 return IRQ_HANDLED; 598 } 599 600 static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t) 601 { 602 mtk_dsi_irq_data_clear(dsi, irq_flag); 603 mtk_dsi_set_cmd_mode(dsi); 604 605 if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) { 606 DRM_ERROR("failed to switch cmd mode\n"); 607 return -ETIME; 608 } else { 609 return 0; 610 } 611 } 612 613 static int mtk_dsi_poweron(struct mtk_dsi *dsi) 614 { 615 struct device *dev = dsi->host.dev; 616 int ret; 617 u32 bit_per_pixel; 618 619 if (++dsi->refcount != 1) 620 return 0; 621 622 switch (dsi->format) { 623 case MIPI_DSI_FMT_RGB565: 624 bit_per_pixel = 16; 625 break; 626 case MIPI_DSI_FMT_RGB666_PACKED: 627 bit_per_pixel = 18; 628 break; 629 case MIPI_DSI_FMT_RGB666: 630 case MIPI_DSI_FMT_RGB888: 631 default: 632 bit_per_pixel = 24; 633 break; 634 } 635 636 dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel, 637 dsi->lanes); 638 639 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); 640 if (ret < 0) { 641 dev_err(dev, "Failed to set data rate: %d\n", ret); 642 goto err_refcount; 643 } 644 645 phy_power_on(dsi->phy); 646 647 ret = clk_prepare_enable(dsi->engine_clk); 648 if (ret < 0) { 649 dev_err(dev, "Failed to enable engine clock: %d\n", ret); 650 goto err_phy_power_off; 651 } 652 653 ret = clk_prepare_enable(dsi->digital_clk); 654 if (ret < 0) { 655 dev_err(dev, "Failed to enable digital clock: %d\n", ret); 656 goto err_disable_engine_clk; 657 } 658 659 mtk_dsi_enable(dsi); 660 661 if (dsi->driver_data->has_shadow_ctl) 662 writel(FORCE_COMMIT | BYPASS_SHADOW, 663 dsi->regs + DSI_SHADOW_DEBUG); 664 665 mtk_dsi_reset_engine(dsi); 666 mtk_dsi_phy_timconfig(dsi); 667 668 mtk_dsi_ps_control_vact(dsi); 669 mtk_dsi_set_vm_cmd(dsi); 670 mtk_dsi_config_vdo_timing(dsi); 671 mtk_dsi_set_interrupt_enable(dsi); 672 673 return 0; 674 err_disable_engine_clk: 675 clk_disable_unprepare(dsi->engine_clk); 676 err_phy_power_off: 677 phy_power_off(dsi->phy); 678 err_refcount: 679 dsi->refcount--; 680 return ret; 681 } 682 683 static void mtk_dsi_poweroff(struct mtk_dsi *dsi) 684 { 685 if (WARN_ON(dsi->refcount == 0)) 686 return; 687 688 if (--dsi->refcount != 0) 689 return; 690 691 /* 692 * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since 693 * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), 694 * which needs irq for vblank, and mtk_dsi_stop() will disable irq. 695 * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), 696 * after dsi is fully set. 697 */ 698 mtk_dsi_stop(dsi); 699 700 mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); 701 mtk_dsi_reset_engine(dsi); 702 mtk_dsi_lane0_ulp_mode_enter(dsi); 703 mtk_dsi_clk_ulp_mode_enter(dsi); 704 /* set the lane number as 0 to pull down mipi */ 705 writel(0, dsi->regs + DSI_TXRX_CTRL); 706 707 mtk_dsi_disable(dsi); 708 709 clk_disable_unprepare(dsi->engine_clk); 710 clk_disable_unprepare(dsi->digital_clk); 711 712 phy_power_off(dsi->phy); 713 714 dsi->lanes_ready = false; 715 } 716 717 static void mtk_dsi_lane_ready(struct mtk_dsi *dsi) 718 { 719 if (!dsi->lanes_ready) { 720 dsi->lanes_ready = true; 721 mtk_dsi_rxtx_control(dsi); 722 usleep_range(30, 100); 723 mtk_dsi_reset_dphy(dsi); 724 mtk_dsi_clk_ulp_mode_leave(dsi); 725 mtk_dsi_lane0_ulp_mode_leave(dsi); 726 mtk_dsi_clk_hs_mode(dsi, 0); 727 usleep_range(1000, 3000); 728 /* The reaction time after pulling up the mipi signal for dsi_rx */ 729 } 730 } 731 732 static void mtk_output_dsi_enable(struct mtk_dsi *dsi) 733 { 734 if (dsi->enabled) 735 return; 736 737 mtk_dsi_lane_ready(dsi); 738 mtk_dsi_set_mode(dsi); 739 mtk_dsi_clk_hs_mode(dsi, 1); 740 741 mtk_dsi_start(dsi); 742 743 dsi->enabled = true; 744 } 745 746 static void mtk_output_dsi_disable(struct mtk_dsi *dsi) 747 { 748 if (!dsi->enabled) 749 return; 750 751 dsi->enabled = false; 752 } 753 754 static int mtk_dsi_bridge_attach(struct drm_bridge *bridge, 755 enum drm_bridge_attach_flags flags) 756 { 757 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 758 759 /* Attach the panel or bridge to the dsi bridge */ 760 return drm_bridge_attach(bridge->encoder, dsi->next_bridge, 761 &dsi->bridge, flags); 762 } 763 764 static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge, 765 const struct drm_display_mode *mode, 766 const struct drm_display_mode *adjusted) 767 { 768 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 769 770 drm_display_mode_to_videomode(adjusted, &dsi->vm); 771 } 772 773 static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge, 774 struct drm_bridge_state *old_bridge_state) 775 { 776 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 777 778 mtk_output_dsi_disable(dsi); 779 } 780 781 static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge, 782 struct drm_bridge_state *old_bridge_state) 783 { 784 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 785 786 if (dsi->refcount == 0) 787 return; 788 789 mtk_output_dsi_enable(dsi); 790 } 791 792 static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, 793 struct drm_bridge_state *old_bridge_state) 794 { 795 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 796 int ret; 797 798 ret = mtk_dsi_poweron(dsi); 799 if (ret < 0) 800 DRM_ERROR("failed to power on dsi\n"); 801 } 802 803 static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, 804 struct drm_bridge_state *old_bridge_state) 805 { 806 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 807 808 mtk_dsi_poweroff(dsi); 809 } 810 811 static enum drm_mode_status 812 mtk_dsi_bridge_mode_valid(struct drm_bridge *bridge, 813 const struct drm_display_info *info, 814 const struct drm_display_mode *mode) 815 { 816 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 817 u32 bpp; 818 819 if (dsi->format == MIPI_DSI_FMT_RGB565) 820 bpp = 16; 821 else 822 bpp = 24; 823 824 if (mode->clock * bpp / dsi->lanes > 1500000) 825 return MODE_CLOCK_HIGH; 826 827 return MODE_OK; 828 } 829 830 static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { 831 .attach = mtk_dsi_bridge_attach, 832 .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 833 .atomic_disable = mtk_dsi_bridge_atomic_disable, 834 .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, 835 .atomic_enable = mtk_dsi_bridge_atomic_enable, 836 .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, 837 .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, 838 .atomic_reset = drm_atomic_helper_bridge_reset, 839 .mode_valid = mtk_dsi_bridge_mode_valid, 840 .mode_set = mtk_dsi_bridge_mode_set, 841 }; 842 843 void mtk_dsi_ddp_start(struct device *dev) 844 { 845 struct mtk_dsi *dsi = dev_get_drvdata(dev); 846 847 mtk_dsi_poweron(dsi); 848 } 849 850 void mtk_dsi_ddp_stop(struct device *dev) 851 { 852 struct mtk_dsi *dsi = dev_get_drvdata(dev); 853 854 mtk_dsi_poweroff(dsi); 855 } 856 857 static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) 858 { 859 int ret; 860 861 ret = drm_simple_encoder_init(drm, &dsi->encoder, 862 DRM_MODE_ENCODER_DSI); 863 if (ret) { 864 DRM_ERROR("Failed to encoder init to drm\n"); 865 return ret; 866 } 867 868 dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); 869 870 ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, 871 DRM_BRIDGE_ATTACH_NO_CONNECTOR); 872 if (ret) 873 goto err_cleanup_encoder; 874 875 dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); 876 if (IS_ERR(dsi->connector)) { 877 DRM_ERROR("Unable to create bridge connector\n"); 878 ret = PTR_ERR(dsi->connector); 879 goto err_cleanup_encoder; 880 } 881 drm_connector_attach_encoder(dsi->connector, &dsi->encoder); 882 883 return 0; 884 885 err_cleanup_encoder: 886 drm_encoder_cleanup(&dsi->encoder); 887 return ret; 888 } 889 890 unsigned int mtk_dsi_encoder_index(struct device *dev) 891 { 892 struct mtk_dsi *dsi = dev_get_drvdata(dev); 893 unsigned int encoder_index = drm_encoder_index(&dsi->encoder); 894 895 dev_dbg(dev, "encoder index:%d\n", encoder_index); 896 return encoder_index; 897 } 898 899 static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) 900 { 901 int ret; 902 struct drm_device *drm = data; 903 struct mtk_dsi *dsi = dev_get_drvdata(dev); 904 905 ret = mtk_dsi_encoder_init(drm, dsi); 906 if (ret) 907 return ret; 908 909 return device_reset_optional(dev); 910 } 911 912 static void mtk_dsi_unbind(struct device *dev, struct device *master, 913 void *data) 914 { 915 struct mtk_dsi *dsi = dev_get_drvdata(dev); 916 917 drm_encoder_cleanup(&dsi->encoder); 918 } 919 920 static const struct component_ops mtk_dsi_component_ops = { 921 .bind = mtk_dsi_bind, 922 .unbind = mtk_dsi_unbind, 923 }; 924 925 static int mtk_dsi_host_attach(struct mipi_dsi_host *host, 926 struct mipi_dsi_device *device) 927 { 928 struct mtk_dsi *dsi = host_to_dsi(host); 929 struct device *dev = host->dev; 930 int ret; 931 932 dsi->lanes = device->lanes; 933 dsi->format = device->format; 934 dsi->mode_flags = device->mode_flags; 935 dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); 936 if (IS_ERR(dsi->next_bridge)) 937 return PTR_ERR(dsi->next_bridge); 938 939 drm_bridge_add(&dsi->bridge); 940 941 ret = component_add(host->dev, &mtk_dsi_component_ops); 942 if (ret) { 943 DRM_ERROR("failed to add dsi_host component: %d\n", ret); 944 drm_bridge_remove(&dsi->bridge); 945 return ret; 946 } 947 948 return 0; 949 } 950 951 static int mtk_dsi_host_detach(struct mipi_dsi_host *host, 952 struct mipi_dsi_device *device) 953 { 954 struct mtk_dsi *dsi = host_to_dsi(host); 955 956 component_del(host->dev, &mtk_dsi_component_ops); 957 drm_bridge_remove(&dsi->bridge); 958 return 0; 959 } 960 961 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) 962 { 963 int ret; 964 u32 val; 965 966 ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY), 967 4, 2000000); 968 if (ret) { 969 DRM_WARN("polling dsi wait not busy timeout!\n"); 970 971 mtk_dsi_enable(dsi); 972 mtk_dsi_reset_engine(dsi); 973 } 974 } 975 976 static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) 977 { 978 switch (type) { 979 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 980 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 981 return 1; 982 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 983 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 984 return 2; 985 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: 986 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: 987 return read_data[1] + read_data[2] * 16; 988 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 989 DRM_INFO("type is 0x02, try again\n"); 990 break; 991 default: 992 DRM_INFO("type(0x%x) not recognized\n", type); 993 break; 994 } 995 996 return 0; 997 } 998 999 static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) 1000 { 1001 const char *tx_buf = msg->tx_buf; 1002 u8 config, cmdq_size, cmdq_off, type = msg->type; 1003 u32 reg_val, cmdq_mask, i; 1004 u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off; 1005 1006 if (MTK_DSI_HOST_IS_READ(type)) 1007 config = BTA; 1008 else 1009 config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET; 1010 1011 if (msg->tx_len > 2) { 1012 cmdq_size = 1 + (msg->tx_len + 3) / 4; 1013 cmdq_off = 4; 1014 cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1; 1015 reg_val = (msg->tx_len << 16) | (type << 8) | config; 1016 } else { 1017 cmdq_size = 1; 1018 cmdq_off = 2; 1019 cmdq_mask = CONFIG | DATA_ID; 1020 reg_val = (type << 8) | config; 1021 } 1022 1023 for (i = 0; i < msg->tx_len; i++) 1024 mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U), 1025 (0xffUL << (((i + cmdq_off) & 3U) * 8U)), 1026 tx_buf[i] << (((i + cmdq_off) & 3U) * 8U)); 1027 1028 mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val); 1029 mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size); 1030 if (dsi->driver_data->cmdq_long_packet_ctl) { 1031 /* Disable setting cmdq_size automatically for long packets */ 1032 mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE_SEL, CMDQ_SIZE_SEL); 1033 } 1034 } 1035 1036 static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi, 1037 const struct mipi_dsi_msg *msg, u8 flag) 1038 { 1039 mtk_dsi_wait_for_idle(dsi); 1040 mtk_dsi_irq_data_clear(dsi, flag); 1041 mtk_dsi_cmdq(dsi, msg); 1042 mtk_dsi_start(dsi); 1043 1044 if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000)) 1045 return -ETIME; 1046 else 1047 return 0; 1048 } 1049 1050 static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, 1051 const struct mipi_dsi_msg *msg) 1052 { 1053 struct mtk_dsi *dsi = host_to_dsi(host); 1054 u32 recv_cnt, i; 1055 u8 read_data[16]; 1056 void *src_addr; 1057 u8 irq_flag = CMD_DONE_INT_FLAG; 1058 u32 dsi_mode; 1059 int ret; 1060 1061 dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); 1062 if (dsi_mode & MODE) { 1063 mtk_dsi_stop(dsi); 1064 ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); 1065 if (ret) 1066 goto restore_dsi_mode; 1067 } 1068 1069 if (MTK_DSI_HOST_IS_READ(msg->type)) 1070 irq_flag |= LPRX_RD_RDY_INT_FLAG; 1071 1072 mtk_dsi_lane_ready(dsi); 1073 1074 ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag); 1075 if (ret) 1076 goto restore_dsi_mode; 1077 1078 if (!MTK_DSI_HOST_IS_READ(msg->type)) { 1079 recv_cnt = 0; 1080 goto restore_dsi_mode; 1081 } 1082 1083 if (!msg->rx_buf) { 1084 DRM_ERROR("dsi receive buffer size may be NULL\n"); 1085 ret = -EINVAL; 1086 goto restore_dsi_mode; 1087 } 1088 1089 for (i = 0; i < 16; i++) 1090 *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i); 1091 1092 recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data); 1093 1094 if (recv_cnt > 2) 1095 src_addr = &read_data[4]; 1096 else 1097 src_addr = &read_data[1]; 1098 1099 if (recv_cnt > 10) 1100 recv_cnt = 10; 1101 1102 if (recv_cnt > msg->rx_len) 1103 recv_cnt = msg->rx_len; 1104 1105 if (recv_cnt) 1106 memcpy(msg->rx_buf, src_addr, recv_cnt); 1107 1108 DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", 1109 recv_cnt, *((u8 *)(msg->tx_buf))); 1110 1111 restore_dsi_mode: 1112 if (dsi_mode & MODE) { 1113 mtk_dsi_set_mode(dsi); 1114 mtk_dsi_start(dsi); 1115 } 1116 1117 return ret < 0 ? ret : recv_cnt; 1118 } 1119 1120 static const struct mipi_dsi_host_ops mtk_dsi_ops = { 1121 .attach = mtk_dsi_host_attach, 1122 .detach = mtk_dsi_host_detach, 1123 .transfer = mtk_dsi_host_transfer, 1124 }; 1125 1126 static int mtk_dsi_probe(struct platform_device *pdev) 1127 { 1128 struct mtk_dsi *dsi; 1129 struct device *dev = &pdev->dev; 1130 struct resource *regs; 1131 int irq_num; 1132 int ret; 1133 1134 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 1135 if (!dsi) 1136 return -ENOMEM; 1137 1138 dsi->host.ops = &mtk_dsi_ops; 1139 dsi->host.dev = dev; 1140 ret = mipi_dsi_host_register(&dsi->host); 1141 if (ret < 0) { 1142 dev_err(dev, "failed to register DSI host: %d\n", ret); 1143 return ret; 1144 } 1145 1146 dsi->driver_data = of_device_get_match_data(dev); 1147 1148 dsi->engine_clk = devm_clk_get(dev, "engine"); 1149 if (IS_ERR(dsi->engine_clk)) { 1150 ret = PTR_ERR(dsi->engine_clk); 1151 1152 if (ret != -EPROBE_DEFER) 1153 dev_err(dev, "Failed to get engine clock: %d\n", ret); 1154 goto err_unregister_host; 1155 } 1156 1157 dsi->digital_clk = devm_clk_get(dev, "digital"); 1158 if (IS_ERR(dsi->digital_clk)) { 1159 ret = PTR_ERR(dsi->digital_clk); 1160 1161 if (ret != -EPROBE_DEFER) 1162 dev_err(dev, "Failed to get digital clock: %d\n", ret); 1163 goto err_unregister_host; 1164 } 1165 1166 dsi->hs_clk = devm_clk_get(dev, "hs"); 1167 if (IS_ERR(dsi->hs_clk)) { 1168 ret = PTR_ERR(dsi->hs_clk); 1169 dev_err(dev, "Failed to get hs clock: %d\n", ret); 1170 goto err_unregister_host; 1171 } 1172 1173 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1174 dsi->regs = devm_ioremap_resource(dev, regs); 1175 if (IS_ERR(dsi->regs)) { 1176 ret = PTR_ERR(dsi->regs); 1177 dev_err(dev, "Failed to ioremap memory: %d\n", ret); 1178 goto err_unregister_host; 1179 } 1180 1181 dsi->phy = devm_phy_get(dev, "dphy"); 1182 if (IS_ERR(dsi->phy)) { 1183 ret = PTR_ERR(dsi->phy); 1184 dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret); 1185 goto err_unregister_host; 1186 } 1187 1188 irq_num = platform_get_irq(pdev, 0); 1189 if (irq_num < 0) { 1190 ret = irq_num; 1191 goto err_unregister_host; 1192 } 1193 1194 ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq, 1195 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi); 1196 if (ret) { 1197 dev_err(&pdev->dev, "failed to request mediatek dsi irq\n"); 1198 goto err_unregister_host; 1199 } 1200 1201 init_waitqueue_head(&dsi->irq_wait_queue); 1202 1203 platform_set_drvdata(pdev, dsi); 1204 1205 dsi->bridge.funcs = &mtk_dsi_bridge_funcs; 1206 dsi->bridge.of_node = dev->of_node; 1207 dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; 1208 1209 return 0; 1210 1211 err_unregister_host: 1212 mipi_dsi_host_unregister(&dsi->host); 1213 return ret; 1214 } 1215 1216 static void mtk_dsi_remove(struct platform_device *pdev) 1217 { 1218 struct mtk_dsi *dsi = platform_get_drvdata(pdev); 1219 1220 mtk_output_dsi_disable(dsi); 1221 mipi_dsi_host_unregister(&dsi->host); 1222 } 1223 1224 static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = { 1225 .reg_cmdq_off = 0x200, 1226 }; 1227 1228 static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = { 1229 .reg_cmdq_off = 0x180, 1230 }; 1231 1232 static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = { 1233 .reg_cmdq_off = 0x200, 1234 .has_shadow_ctl = true, 1235 .has_size_ctl = true, 1236 }; 1237 1238 static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = { 1239 .reg_cmdq_off = 0xd00, 1240 .has_shadow_ctl = true, 1241 .has_size_ctl = true, 1242 }; 1243 1244 static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = { 1245 .reg_cmdq_off = 0xd00, 1246 .has_shadow_ctl = true, 1247 .has_size_ctl = true, 1248 .cmdq_long_packet_ctl = true, 1249 }; 1250 1251 static const struct of_device_id mtk_dsi_of_match[] = { 1252 { .compatible = "mediatek,mt2701-dsi", 1253 .data = &mt2701_dsi_driver_data }, 1254 { .compatible = "mediatek,mt8173-dsi", 1255 .data = &mt8173_dsi_driver_data }, 1256 { .compatible = "mediatek,mt8183-dsi", 1257 .data = &mt8183_dsi_driver_data }, 1258 { .compatible = "mediatek,mt8186-dsi", 1259 .data = &mt8186_dsi_driver_data }, 1260 { .compatible = "mediatek,mt8188-dsi", 1261 .data = &mt8188_dsi_driver_data }, 1262 { }, 1263 }; 1264 MODULE_DEVICE_TABLE(of, mtk_dsi_of_match); 1265 1266 struct platform_driver mtk_dsi_driver = { 1267 .probe = mtk_dsi_probe, 1268 .remove_new = mtk_dsi_remove, 1269 .driver = { 1270 .name = "mtk-dsi", 1271 .of_match_table = mtk_dsi_of_match, 1272 }, 1273 }; 1274