1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2009 Nokia Corporation 4 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 5 */ 6 7 #define DSS_SUBSYS_NAME "DSI" 8 9 #include <linux/kernel.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/regmap.h> 12 #include <linux/io.h> 13 #include <linux/clk.h> 14 #include <linux/device.h> 15 #include <linux/err.h> 16 #include <linux/interrupt.h> 17 #include <linux/irq.h> 18 #include <linux/delay.h> 19 #include <linux/gpio/consumer.h> 20 #include <linux/mutex.h> 21 #include <linux/module.h> 22 #include <linux/semaphore.h> 23 #include <linux/seq_file.h> 24 #include <linux/platform_device.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/wait.h> 27 #include <linux/workqueue.h> 28 #include <linux/sched.h> 29 #include <linux/slab.h> 30 #include <linux/debugfs.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/of.h> 33 #include <linux/of_graph.h> 34 #include <linux/of_platform.h> 35 #include <linux/component.h> 36 #include <linux/sys_soc.h> 37 38 #include <drm/drm_bridge.h> 39 #include <drm/drm_mipi_dsi.h> 40 #include <drm/drm_panel.h> 41 #include <video/mipi_display.h> 42 43 #include "omapdss.h" 44 #include "dss.h" 45 46 #define DSI_CATCH_MISSING_TE 47 48 #include "dsi.h" 49 50 #define REG_GET(dsi, idx, start, end) \ 51 FLD_GET(dsi_read_reg(dsi, idx), start, end) 52 53 #define REG_FLD_MOD(dsi, idx, val, start, end) \ 54 dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end)) 55 56 static int dsi_init_dispc(struct dsi_data *dsi); 57 static void dsi_uninit_dispc(struct dsi_data *dsi); 58 59 static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel); 60 61 static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc, 62 const struct mipi_dsi_msg *msg); 63 64 #ifdef DSI_PERF_MEASURE 65 static bool dsi_perf; 66 module_param(dsi_perf, bool, 0644); 67 #endif 68 69 /* Note: for some reason video mode seems to work only if VC_VIDEO is 0 */ 70 #define VC_VIDEO 0 71 #define VC_CMD 1 72 73 #define drm_bridge_to_dsi(bridge) \ 74 container_of(bridge, struct dsi_data, bridge) 75 76 static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev) 77 { 78 return dev_get_drvdata(dssdev->dev); 79 } 80 81 static inline struct dsi_data *host_to_omap(struct mipi_dsi_host *host) 82 { 83 return container_of(host, struct dsi_data, host); 84 } 85 86 static inline void dsi_write_reg(struct dsi_data *dsi, 87 const struct dsi_reg idx, u32 val) 88 { 89 void __iomem *base; 90 91 switch(idx.module) { 92 case DSI_PROTO: base = dsi->proto_base; break; 93 case DSI_PHY: base = dsi->phy_base; break; 94 case DSI_PLL: base = dsi->pll_base; break; 95 default: return; 96 } 97 98 __raw_writel(val, base + idx.idx); 99 } 100 101 static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx) 102 { 103 void __iomem *base; 104 105 switch(idx.module) { 106 case DSI_PROTO: base = dsi->proto_base; break; 107 case DSI_PHY: base = dsi->phy_base; break; 108 case DSI_PLL: base = dsi->pll_base; break; 109 default: return 0; 110 } 111 112 return __raw_readl(base + idx.idx); 113 } 114 115 static void dsi_bus_lock(struct dsi_data *dsi) 116 { 117 down(&dsi->bus_lock); 118 } 119 120 static void dsi_bus_unlock(struct dsi_data *dsi) 121 { 122 up(&dsi->bus_lock); 123 } 124 125 static bool dsi_bus_is_locked(struct dsi_data *dsi) 126 { 127 return dsi->bus_lock.count == 0; 128 } 129 130 static void dsi_completion_handler(void *data, u32 mask) 131 { 132 complete((struct completion *)data); 133 } 134 135 static inline bool wait_for_bit_change(struct dsi_data *dsi, 136 const struct dsi_reg idx, 137 int bitnum, int value) 138 { 139 unsigned long timeout; 140 ktime_t wait; 141 int t; 142 143 /* first busyloop to see if the bit changes right away */ 144 t = 100; 145 while (t-- > 0) { 146 if (REG_GET(dsi, idx, bitnum, bitnum) == value) 147 return true; 148 } 149 150 /* then loop for 500ms, sleeping for 1ms in between */ 151 timeout = jiffies + msecs_to_jiffies(500); 152 while (time_before(jiffies, timeout)) { 153 if (REG_GET(dsi, idx, bitnum, bitnum) == value) 154 return true; 155 156 wait = ns_to_ktime(1000 * 1000); 157 set_current_state(TASK_UNINTERRUPTIBLE); 158 schedule_hrtimeout(&wait, HRTIMER_MODE_REL); 159 } 160 161 return false; 162 } 163 164 #ifdef DSI_PERF_MEASURE 165 static void dsi_perf_mark_setup(struct dsi_data *dsi) 166 { 167 dsi->perf_setup_time = ktime_get(); 168 } 169 170 static void dsi_perf_mark_start(struct dsi_data *dsi) 171 { 172 dsi->perf_start_time = ktime_get(); 173 } 174 175 static void dsi_perf_show(struct dsi_data *dsi, const char *name) 176 { 177 ktime_t t, setup_time, trans_time; 178 u32 total_bytes; 179 u32 setup_us, trans_us, total_us; 180 181 if (!dsi_perf) 182 return; 183 184 t = ktime_get(); 185 186 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time); 187 setup_us = (u32)ktime_to_us(setup_time); 188 if (setup_us == 0) 189 setup_us = 1; 190 191 trans_time = ktime_sub(t, dsi->perf_start_time); 192 trans_us = (u32)ktime_to_us(trans_time); 193 if (trans_us == 0) 194 trans_us = 1; 195 196 total_us = setup_us + trans_us; 197 198 total_bytes = dsi->update_bytes; 199 200 pr_info("DSI(%s): %u us + %u us = %u us (%uHz), %u bytes, %u kbytes/sec\n", 201 name, 202 setup_us, 203 trans_us, 204 total_us, 205 1000 * 1000 / total_us, 206 total_bytes, 207 total_bytes * 1000 / total_us); 208 } 209 #else 210 static inline void dsi_perf_mark_setup(struct dsi_data *dsi) 211 { 212 } 213 214 static inline void dsi_perf_mark_start(struct dsi_data *dsi) 215 { 216 } 217 218 static inline void dsi_perf_show(struct dsi_data *dsi, const char *name) 219 { 220 } 221 #endif 222 223 static int verbose_irq; 224 225 static void print_irq_status(u32 status) 226 { 227 if (status == 0) 228 return; 229 230 if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0) 231 return; 232 233 #define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : "" 234 235 pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 236 status, 237 verbose_irq ? PIS(VC0) : "", 238 verbose_irq ? PIS(VC1) : "", 239 verbose_irq ? PIS(VC2) : "", 240 verbose_irq ? PIS(VC3) : "", 241 PIS(WAKEUP), 242 PIS(RESYNC), 243 PIS(PLL_LOCK), 244 PIS(PLL_UNLOCK), 245 PIS(PLL_RECALL), 246 PIS(COMPLEXIO_ERR), 247 PIS(HS_TX_TIMEOUT), 248 PIS(LP_RX_TIMEOUT), 249 PIS(TE_TRIGGER), 250 PIS(ACK_TRIGGER), 251 PIS(SYNC_LOST), 252 PIS(LDO_POWER_GOOD), 253 PIS(TA_TIMEOUT)); 254 #undef PIS 255 } 256 257 static void print_irq_status_vc(int vc, u32 status) 258 { 259 if (status == 0) 260 return; 261 262 if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0) 263 return; 264 265 #define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : "" 266 267 pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n", 268 vc, 269 status, 270 PIS(CS), 271 PIS(ECC_CORR), 272 PIS(ECC_NO_CORR), 273 verbose_irq ? PIS(PACKET_SENT) : "", 274 PIS(BTA), 275 PIS(FIFO_TX_OVF), 276 PIS(FIFO_RX_OVF), 277 PIS(FIFO_TX_UDF), 278 PIS(PP_BUSY_CHANGE)); 279 #undef PIS 280 } 281 282 static void print_irq_status_cio(u32 status) 283 { 284 if (status == 0) 285 return; 286 287 #define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : "" 288 289 pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 290 status, 291 PIS(ERRSYNCESC1), 292 PIS(ERRSYNCESC2), 293 PIS(ERRSYNCESC3), 294 PIS(ERRESC1), 295 PIS(ERRESC2), 296 PIS(ERRESC3), 297 PIS(ERRCONTROL1), 298 PIS(ERRCONTROL2), 299 PIS(ERRCONTROL3), 300 PIS(STATEULPS1), 301 PIS(STATEULPS2), 302 PIS(STATEULPS3), 303 PIS(ERRCONTENTIONLP0_1), 304 PIS(ERRCONTENTIONLP1_1), 305 PIS(ERRCONTENTIONLP0_2), 306 PIS(ERRCONTENTIONLP1_2), 307 PIS(ERRCONTENTIONLP0_3), 308 PIS(ERRCONTENTIONLP1_3), 309 PIS(ULPSACTIVENOT_ALL0), 310 PIS(ULPSACTIVENOT_ALL1)); 311 #undef PIS 312 } 313 314 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 315 static void dsi_collect_irq_stats(struct dsi_data *dsi, u32 irqstatus, 316 u32 *vcstatus, u32 ciostatus) 317 { 318 int i; 319 320 spin_lock(&dsi->irq_stats_lock); 321 322 dsi->irq_stats.irq_count++; 323 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs); 324 325 for (i = 0; i < 4; ++i) 326 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]); 327 328 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs); 329 330 spin_unlock(&dsi->irq_stats_lock); 331 } 332 #else 333 #define dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus) 334 #endif 335 336 static int debug_irq; 337 338 static void dsi_handle_irq_errors(struct dsi_data *dsi, u32 irqstatus, 339 u32 *vcstatus, u32 ciostatus) 340 { 341 int i; 342 343 if (irqstatus & DSI_IRQ_ERROR_MASK) { 344 DSSERR("DSI error, irqstatus %x\n", irqstatus); 345 print_irq_status(irqstatus); 346 spin_lock(&dsi->errors_lock); 347 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK; 348 spin_unlock(&dsi->errors_lock); 349 } else if (debug_irq) { 350 print_irq_status(irqstatus); 351 } 352 353 for (i = 0; i < 4; ++i) { 354 if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) { 355 DSSERR("DSI VC(%d) error, vc irqstatus %x\n", 356 i, vcstatus[i]); 357 print_irq_status_vc(i, vcstatus[i]); 358 } else if (debug_irq) { 359 print_irq_status_vc(i, vcstatus[i]); 360 } 361 } 362 363 if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) { 364 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus); 365 print_irq_status_cio(ciostatus); 366 } else if (debug_irq) { 367 print_irq_status_cio(ciostatus); 368 } 369 } 370 371 static void dsi_call_isrs(struct dsi_isr_data *isr_array, 372 unsigned int isr_array_size, u32 irqstatus) 373 { 374 struct dsi_isr_data *isr_data; 375 int i; 376 377 for (i = 0; i < isr_array_size; i++) { 378 isr_data = &isr_array[i]; 379 if (isr_data->isr && isr_data->mask & irqstatus) 380 isr_data->isr(isr_data->arg, irqstatus); 381 } 382 } 383 384 static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables, 385 u32 irqstatus, u32 *vcstatus, u32 ciostatus) 386 { 387 int i; 388 389 dsi_call_isrs(isr_tables->isr_table, 390 ARRAY_SIZE(isr_tables->isr_table), 391 irqstatus); 392 393 for (i = 0; i < 4; ++i) { 394 if (vcstatus[i] == 0) 395 continue; 396 dsi_call_isrs(isr_tables->isr_table_vc[i], 397 ARRAY_SIZE(isr_tables->isr_table_vc[i]), 398 vcstatus[i]); 399 } 400 401 if (ciostatus != 0) 402 dsi_call_isrs(isr_tables->isr_table_cio, 403 ARRAY_SIZE(isr_tables->isr_table_cio), 404 ciostatus); 405 } 406 407 static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) 408 { 409 struct dsi_data *dsi = arg; 410 u32 irqstatus, vcstatus[4], ciostatus; 411 int i; 412 413 if (!dsi->is_enabled) 414 return IRQ_NONE; 415 416 spin_lock(&dsi->irq_lock); 417 418 irqstatus = dsi_read_reg(dsi, DSI_IRQSTATUS); 419 420 /* IRQ is not for us */ 421 if (!irqstatus) { 422 spin_unlock(&dsi->irq_lock); 423 return IRQ_NONE; 424 } 425 426 dsi_write_reg(dsi, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); 427 /* flush posted write */ 428 dsi_read_reg(dsi, DSI_IRQSTATUS); 429 430 for (i = 0; i < 4; ++i) { 431 if ((irqstatus & (1 << i)) == 0) { 432 vcstatus[i] = 0; 433 continue; 434 } 435 436 vcstatus[i] = dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i)); 437 438 dsi_write_reg(dsi, DSI_VC_IRQSTATUS(i), vcstatus[i]); 439 /* flush posted write */ 440 dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i)); 441 } 442 443 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { 444 ciostatus = dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS); 445 446 dsi_write_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS, ciostatus); 447 /* flush posted write */ 448 dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS); 449 } else { 450 ciostatus = 0; 451 } 452 453 #ifdef DSI_CATCH_MISSING_TE 454 if (irqstatus & DSI_IRQ_TE_TRIGGER) 455 del_timer(&dsi->te_timer); 456 #endif 457 458 /* make a copy and unlock, so that isrs can unregister 459 * themselves */ 460 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables, 461 sizeof(dsi->isr_tables)); 462 463 spin_unlock(&dsi->irq_lock); 464 465 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus); 466 467 dsi_handle_irq_errors(dsi, irqstatus, vcstatus, ciostatus); 468 469 dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus); 470 471 return IRQ_HANDLED; 472 } 473 474 /* dsi->irq_lock has to be locked by the caller */ 475 static void _omap_dsi_configure_irqs(struct dsi_data *dsi, 476 struct dsi_isr_data *isr_array, 477 unsigned int isr_array_size, 478 u32 default_mask, 479 const struct dsi_reg enable_reg, 480 const struct dsi_reg status_reg) 481 { 482 struct dsi_isr_data *isr_data; 483 u32 mask; 484 u32 old_mask; 485 int i; 486 487 mask = default_mask; 488 489 for (i = 0; i < isr_array_size; i++) { 490 isr_data = &isr_array[i]; 491 492 if (isr_data->isr == NULL) 493 continue; 494 495 mask |= isr_data->mask; 496 } 497 498 old_mask = dsi_read_reg(dsi, enable_reg); 499 /* clear the irqstatus for newly enabled irqs */ 500 dsi_write_reg(dsi, status_reg, (mask ^ old_mask) & mask); 501 dsi_write_reg(dsi, enable_reg, mask); 502 503 /* flush posted writes */ 504 dsi_read_reg(dsi, enable_reg); 505 dsi_read_reg(dsi, status_reg); 506 } 507 508 /* dsi->irq_lock has to be locked by the caller */ 509 static void _omap_dsi_set_irqs(struct dsi_data *dsi) 510 { 511 u32 mask = DSI_IRQ_ERROR_MASK; 512 #ifdef DSI_CATCH_MISSING_TE 513 mask |= DSI_IRQ_TE_TRIGGER; 514 #endif 515 _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table, 516 ARRAY_SIZE(dsi->isr_tables.isr_table), mask, 517 DSI_IRQENABLE, DSI_IRQSTATUS); 518 } 519 520 /* dsi->irq_lock has to be locked by the caller */ 521 static void _omap_dsi_set_irqs_vc(struct dsi_data *dsi, int vc) 522 { 523 _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_vc[vc], 524 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]), 525 DSI_VC_IRQ_ERROR_MASK, 526 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); 527 } 528 529 /* dsi->irq_lock has to be locked by the caller */ 530 static void _omap_dsi_set_irqs_cio(struct dsi_data *dsi) 531 { 532 _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_cio, 533 ARRAY_SIZE(dsi->isr_tables.isr_table_cio), 534 DSI_CIO_IRQ_ERROR_MASK, 535 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); 536 } 537 538 static void _dsi_initialize_irq(struct dsi_data *dsi) 539 { 540 unsigned long flags; 541 int vc; 542 543 spin_lock_irqsave(&dsi->irq_lock, flags); 544 545 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables)); 546 547 _omap_dsi_set_irqs(dsi); 548 for (vc = 0; vc < 4; ++vc) 549 _omap_dsi_set_irqs_vc(dsi, vc); 550 _omap_dsi_set_irqs_cio(dsi); 551 552 spin_unlock_irqrestore(&dsi->irq_lock, flags); 553 } 554 555 static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask, 556 struct dsi_isr_data *isr_array, unsigned int isr_array_size) 557 { 558 struct dsi_isr_data *isr_data; 559 int free_idx; 560 int i; 561 562 BUG_ON(isr == NULL); 563 564 /* check for duplicate entry and find a free slot */ 565 free_idx = -1; 566 for (i = 0; i < isr_array_size; i++) { 567 isr_data = &isr_array[i]; 568 569 if (isr_data->isr == isr && isr_data->arg == arg && 570 isr_data->mask == mask) { 571 return -EINVAL; 572 } 573 574 if (isr_data->isr == NULL && free_idx == -1) 575 free_idx = i; 576 } 577 578 if (free_idx == -1) 579 return -EBUSY; 580 581 isr_data = &isr_array[free_idx]; 582 isr_data->isr = isr; 583 isr_data->arg = arg; 584 isr_data->mask = mask; 585 586 return 0; 587 } 588 589 static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask, 590 struct dsi_isr_data *isr_array, unsigned int isr_array_size) 591 { 592 struct dsi_isr_data *isr_data; 593 int i; 594 595 for (i = 0; i < isr_array_size; i++) { 596 isr_data = &isr_array[i]; 597 if (isr_data->isr != isr || isr_data->arg != arg || 598 isr_data->mask != mask) 599 continue; 600 601 isr_data->isr = NULL; 602 isr_data->arg = NULL; 603 isr_data->mask = 0; 604 605 return 0; 606 } 607 608 return -EINVAL; 609 } 610 611 static int dsi_register_isr(struct dsi_data *dsi, omap_dsi_isr_t isr, 612 void *arg, u32 mask) 613 { 614 unsigned long flags; 615 int r; 616 617 spin_lock_irqsave(&dsi->irq_lock, flags); 618 619 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table, 620 ARRAY_SIZE(dsi->isr_tables.isr_table)); 621 622 if (r == 0) 623 _omap_dsi_set_irqs(dsi); 624 625 spin_unlock_irqrestore(&dsi->irq_lock, flags); 626 627 return r; 628 } 629 630 static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr, 631 void *arg, u32 mask) 632 { 633 unsigned long flags; 634 int r; 635 636 spin_lock_irqsave(&dsi->irq_lock, flags); 637 638 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table, 639 ARRAY_SIZE(dsi->isr_tables.isr_table)); 640 641 if (r == 0) 642 _omap_dsi_set_irqs(dsi); 643 644 spin_unlock_irqrestore(&dsi->irq_lock, flags); 645 646 return r; 647 } 648 649 static int dsi_register_isr_vc(struct dsi_data *dsi, int vc, 650 omap_dsi_isr_t isr, void *arg, u32 mask) 651 { 652 unsigned long flags; 653 int r; 654 655 spin_lock_irqsave(&dsi->irq_lock, flags); 656 657 r = _dsi_register_isr(isr, arg, mask, 658 dsi->isr_tables.isr_table_vc[vc], 659 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc])); 660 661 if (r == 0) 662 _omap_dsi_set_irqs_vc(dsi, vc); 663 664 spin_unlock_irqrestore(&dsi->irq_lock, flags); 665 666 return r; 667 } 668 669 static int dsi_unregister_isr_vc(struct dsi_data *dsi, int vc, 670 omap_dsi_isr_t isr, void *arg, u32 mask) 671 { 672 unsigned long flags; 673 int r; 674 675 spin_lock_irqsave(&dsi->irq_lock, flags); 676 677 r = _dsi_unregister_isr(isr, arg, mask, 678 dsi->isr_tables.isr_table_vc[vc], 679 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc])); 680 681 if (r == 0) 682 _omap_dsi_set_irqs_vc(dsi, vc); 683 684 spin_unlock_irqrestore(&dsi->irq_lock, flags); 685 686 return r; 687 } 688 689 static u32 dsi_get_errors(struct dsi_data *dsi) 690 { 691 unsigned long flags; 692 u32 e; 693 694 spin_lock_irqsave(&dsi->errors_lock, flags); 695 e = dsi->errors; 696 dsi->errors = 0; 697 spin_unlock_irqrestore(&dsi->errors_lock, flags); 698 return e; 699 } 700 701 static int dsi_runtime_get(struct dsi_data *dsi) 702 { 703 int r; 704 705 DSSDBG("dsi_runtime_get\n"); 706 707 r = pm_runtime_get_sync(dsi->dev); 708 if (WARN_ON(r < 0)) { 709 pm_runtime_put_noidle(dsi->dev); 710 return r; 711 } 712 return 0; 713 } 714 715 static void dsi_runtime_put(struct dsi_data *dsi) 716 { 717 int r; 718 719 DSSDBG("dsi_runtime_put\n"); 720 721 r = pm_runtime_put_sync(dsi->dev); 722 WARN_ON(r < 0 && r != -ENOSYS); 723 } 724 725 static void _dsi_print_reset_status(struct dsi_data *dsi) 726 { 727 int b0, b1, b2; 728 729 /* A dummy read using the SCP interface to any DSIPHY register is 730 * required after DSIPHY reset to complete the reset of the DSI complex 731 * I/O. */ 732 dsi_read_reg(dsi, DSI_DSIPHY_CFG5); 733 734 if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) { 735 b0 = 28; 736 b1 = 27; 737 b2 = 26; 738 } else { 739 b0 = 24; 740 b1 = 25; 741 b2 = 26; 742 } 743 744 #define DSI_FLD_GET(fld, start, end)\ 745 FLD_GET(dsi_read_reg(dsi, DSI_##fld), start, end) 746 747 pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n", 748 DSI_FLD_GET(PLL_STATUS, 0, 0), 749 DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29), 750 DSI_FLD_GET(DSIPHY_CFG5, b0, b0), 751 DSI_FLD_GET(DSIPHY_CFG5, b1, b1), 752 DSI_FLD_GET(DSIPHY_CFG5, b2, b2), 753 DSI_FLD_GET(DSIPHY_CFG5, 29, 29), 754 DSI_FLD_GET(DSIPHY_CFG5, 30, 30), 755 DSI_FLD_GET(DSIPHY_CFG5, 31, 31)); 756 757 #undef DSI_FLD_GET 758 } 759 760 static inline int dsi_if_enable(struct dsi_data *dsi, bool enable) 761 { 762 DSSDBG("dsi_if_enable(%d)\n", enable); 763 764 enable = enable ? 1 : 0; 765 REG_FLD_MOD(dsi, DSI_CTRL, enable, 0, 0); /* IF_EN */ 766 767 if (!wait_for_bit_change(dsi, DSI_CTRL, 0, enable)) { 768 DSSERR("Failed to set dsi_if_enable to %d\n", enable); 769 return -EIO; 770 } 771 772 return 0; 773 } 774 775 static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct dsi_data *dsi) 776 { 777 return dsi->pll.cinfo.clkout[HSDIV_DISPC]; 778 } 779 780 static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct dsi_data *dsi) 781 { 782 return dsi->pll.cinfo.clkout[HSDIV_DSI]; 783 } 784 785 static unsigned long dsi_get_txbyteclkhs(struct dsi_data *dsi) 786 { 787 return dsi->pll.cinfo.clkdco / 16; 788 } 789 790 static unsigned long dsi_fclk_rate(struct dsi_data *dsi) 791 { 792 unsigned long r; 793 enum dss_clk_source source; 794 795 source = dss_get_dsi_clk_source(dsi->dss, dsi->module_id); 796 if (source == DSS_CLK_SRC_FCK) { 797 /* DSI FCLK source is DSS_CLK_FCK */ 798 r = clk_get_rate(dsi->dss_clk); 799 } else { 800 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ 801 r = dsi_get_pll_hsdiv_dsi_rate(dsi); 802 } 803 804 return r; 805 } 806 807 static int dsi_lp_clock_calc(unsigned long dsi_fclk, 808 unsigned long lp_clk_min, unsigned long lp_clk_max, 809 struct dsi_lp_clock_info *lp_cinfo) 810 { 811 unsigned int lp_clk_div; 812 unsigned long lp_clk; 813 814 lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2); 815 lp_clk = dsi_fclk / 2 / lp_clk_div; 816 817 if (lp_clk < lp_clk_min || lp_clk > lp_clk_max) 818 return -EINVAL; 819 820 lp_cinfo->lp_clk_div = lp_clk_div; 821 lp_cinfo->lp_clk = lp_clk; 822 823 return 0; 824 } 825 826 static int dsi_set_lp_clk_divisor(struct dsi_data *dsi) 827 { 828 unsigned long dsi_fclk; 829 unsigned int lp_clk_div; 830 unsigned long lp_clk; 831 unsigned int lpdiv_max = dsi->data->max_pll_lpdiv; 832 833 834 lp_clk_div = dsi->user_lp_cinfo.lp_clk_div; 835 836 if (lp_clk_div == 0 || lp_clk_div > lpdiv_max) 837 return -EINVAL; 838 839 dsi_fclk = dsi_fclk_rate(dsi); 840 841 lp_clk = dsi_fclk / 2 / lp_clk_div; 842 843 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); 844 dsi->current_lp_cinfo.lp_clk = lp_clk; 845 dsi->current_lp_cinfo.lp_clk_div = lp_clk_div; 846 847 /* LP_CLK_DIVISOR */ 848 REG_FLD_MOD(dsi, DSI_CLK_CTRL, lp_clk_div, 12, 0); 849 850 /* LP_RX_SYNCHRO_ENABLE */ 851 REG_FLD_MOD(dsi, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21); 852 853 return 0; 854 } 855 856 static void dsi_enable_scp_clk(struct dsi_data *dsi) 857 { 858 if (dsi->scp_clk_refcount++ == 0) 859 REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */ 860 } 861 862 static void dsi_disable_scp_clk(struct dsi_data *dsi) 863 { 864 WARN_ON(dsi->scp_clk_refcount == 0); 865 if (--dsi->scp_clk_refcount == 0) 866 REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */ 867 } 868 869 enum dsi_pll_power_state { 870 DSI_PLL_POWER_OFF = 0x0, 871 DSI_PLL_POWER_ON_HSCLK = 0x1, 872 DSI_PLL_POWER_ON_ALL = 0x2, 873 DSI_PLL_POWER_ON_DIV = 0x3, 874 }; 875 876 static int dsi_pll_power(struct dsi_data *dsi, enum dsi_pll_power_state state) 877 { 878 int t = 0; 879 880 /* DSI-PLL power command 0x3 is not working */ 881 if ((dsi->data->quirks & DSI_QUIRK_PLL_PWR_BUG) && 882 state == DSI_PLL_POWER_ON_DIV) 883 state = DSI_PLL_POWER_ON_ALL; 884 885 /* PLL_PWR_CMD */ 886 REG_FLD_MOD(dsi, DSI_CLK_CTRL, state, 31, 30); 887 888 /* PLL_PWR_STATUS */ 889 while (FLD_GET(dsi_read_reg(dsi, DSI_CLK_CTRL), 29, 28) != state) { 890 if (++t > 1000) { 891 DSSERR("Failed to set DSI PLL power mode to %d\n", 892 state); 893 return -ENODEV; 894 } 895 udelay(1); 896 } 897 898 return 0; 899 } 900 901 902 static void dsi_pll_calc_dsi_fck(struct dsi_data *dsi, 903 struct dss_pll_clock_info *cinfo) 904 { 905 unsigned long max_dsi_fck; 906 907 max_dsi_fck = dsi->data->max_fck_freq; 908 909 cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck); 910 cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI]; 911 } 912 913 static int dsi_pll_enable(struct dss_pll *pll) 914 { 915 struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); 916 int r = 0; 917 918 DSSDBG("PLL init\n"); 919 920 r = dsi_runtime_get(dsi); 921 if (r) 922 return r; 923 924 /* 925 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. 926 */ 927 dsi_enable_scp_clk(dsi); 928 929 r = regulator_enable(dsi->vdds_dsi_reg); 930 if (r) 931 goto err0; 932 933 /* XXX PLL does not come out of reset without this... */ 934 dispc_pck_free_enable(dsi->dss->dispc, 1); 935 936 if (!wait_for_bit_change(dsi, DSI_PLL_STATUS, 0, 1)) { 937 DSSERR("PLL not coming out of reset.\n"); 938 r = -ENODEV; 939 dispc_pck_free_enable(dsi->dss->dispc, 0); 940 goto err1; 941 } 942 943 /* XXX ... but if left on, we get problems when planes do not 944 * fill the whole display. No idea about this */ 945 dispc_pck_free_enable(dsi->dss->dispc, 0); 946 947 r = dsi_pll_power(dsi, DSI_PLL_POWER_ON_ALL); 948 949 if (r) 950 goto err1; 951 952 DSSDBG("PLL init done\n"); 953 954 return 0; 955 err1: 956 regulator_disable(dsi->vdds_dsi_reg); 957 err0: 958 dsi_disable_scp_clk(dsi); 959 dsi_runtime_put(dsi); 960 return r; 961 } 962 963 static void dsi_pll_disable(struct dss_pll *pll) 964 { 965 struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); 966 967 dsi_pll_power(dsi, DSI_PLL_POWER_OFF); 968 969 regulator_disable(dsi->vdds_dsi_reg); 970 971 dsi_disable_scp_clk(dsi); 972 dsi_runtime_put(dsi); 973 974 DSSDBG("PLL disable done\n"); 975 } 976 977 static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) 978 { 979 struct dsi_data *dsi = s->private; 980 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 981 enum dss_clk_source dispc_clk_src, dsi_clk_src; 982 int dsi_module = dsi->module_id; 983 struct dss_pll *pll = &dsi->pll; 984 985 dispc_clk_src = dss_get_dispc_clk_source(dsi->dss); 986 dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module); 987 988 if (dsi_runtime_get(dsi)) 989 return 0; 990 991 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); 992 993 seq_printf(s, "dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin)); 994 995 seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n); 996 997 seq_printf(s, "CLKIN4DDR\t%-16lum %u\n", 998 cinfo->clkdco, cinfo->m); 999 1000 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", 1001 dss_get_clk_source_name(dsi_module == 0 ? 1002 DSS_CLK_SRC_PLL1_1 : 1003 DSS_CLK_SRC_PLL2_1), 1004 cinfo->clkout[HSDIV_DISPC], 1005 cinfo->mX[HSDIV_DISPC], 1006 dispc_clk_src == DSS_CLK_SRC_FCK ? 1007 "off" : "on"); 1008 1009 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", 1010 dss_get_clk_source_name(dsi_module == 0 ? 1011 DSS_CLK_SRC_PLL1_2 : 1012 DSS_CLK_SRC_PLL2_2), 1013 cinfo->clkout[HSDIV_DSI], 1014 cinfo->mX[HSDIV_DSI], 1015 dsi_clk_src == DSS_CLK_SRC_FCK ? 1016 "off" : "on"); 1017 1018 seq_printf(s, "- DSI%d -\n", dsi_module + 1); 1019 1020 seq_printf(s, "dsi fclk source = %s\n", 1021 dss_get_clk_source_name(dsi_clk_src)); 1022 1023 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsi)); 1024 1025 seq_printf(s, "DDR_CLK\t\t%lu\n", 1026 cinfo->clkdco / 4); 1027 1028 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsi)); 1029 1030 seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk); 1031 1032 dsi_runtime_put(dsi); 1033 1034 return 0; 1035 } 1036 1037 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1038 static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) 1039 { 1040 struct dsi_data *dsi = s->private; 1041 unsigned long flags; 1042 struct dsi_irq_stats *stats; 1043 1044 stats = kmalloc(sizeof(*stats), GFP_KERNEL); 1045 if (!stats) 1046 return -ENOMEM; 1047 1048 spin_lock_irqsave(&dsi->irq_stats_lock, flags); 1049 1050 *stats = dsi->irq_stats; 1051 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats)); 1052 dsi->irq_stats.last_reset = jiffies; 1053 1054 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags); 1055 1056 seq_printf(s, "period %u ms\n", 1057 jiffies_to_msecs(jiffies - stats->last_reset)); 1058 1059 seq_printf(s, "irqs %d\n", stats->irq_count); 1060 #define PIS(x) \ 1061 seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]); 1062 1063 seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1); 1064 PIS(VC0); 1065 PIS(VC1); 1066 PIS(VC2); 1067 PIS(VC3); 1068 PIS(WAKEUP); 1069 PIS(RESYNC); 1070 PIS(PLL_LOCK); 1071 PIS(PLL_UNLOCK); 1072 PIS(PLL_RECALL); 1073 PIS(COMPLEXIO_ERR); 1074 PIS(HS_TX_TIMEOUT); 1075 PIS(LP_RX_TIMEOUT); 1076 PIS(TE_TRIGGER); 1077 PIS(ACK_TRIGGER); 1078 PIS(SYNC_LOST); 1079 PIS(LDO_POWER_GOOD); 1080 PIS(TA_TIMEOUT); 1081 #undef PIS 1082 1083 #define PIS(x) \ 1084 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \ 1085 stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ 1086 stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ 1087 stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ 1088 stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); 1089 1090 seq_printf(s, "-- VC interrupts --\n"); 1091 PIS(CS); 1092 PIS(ECC_CORR); 1093 PIS(PACKET_SENT); 1094 PIS(FIFO_TX_OVF); 1095 PIS(FIFO_RX_OVF); 1096 PIS(BTA); 1097 PIS(ECC_NO_CORR); 1098 PIS(FIFO_TX_UDF); 1099 PIS(PP_BUSY_CHANGE); 1100 #undef PIS 1101 1102 #define PIS(x) \ 1103 seq_printf(s, "%-20s %10d\n", #x, \ 1104 stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); 1105 1106 seq_printf(s, "-- CIO interrupts --\n"); 1107 PIS(ERRSYNCESC1); 1108 PIS(ERRSYNCESC2); 1109 PIS(ERRSYNCESC3); 1110 PIS(ERRESC1); 1111 PIS(ERRESC2); 1112 PIS(ERRESC3); 1113 PIS(ERRCONTROL1); 1114 PIS(ERRCONTROL2); 1115 PIS(ERRCONTROL3); 1116 PIS(STATEULPS1); 1117 PIS(STATEULPS2); 1118 PIS(STATEULPS3); 1119 PIS(ERRCONTENTIONLP0_1); 1120 PIS(ERRCONTENTIONLP1_1); 1121 PIS(ERRCONTENTIONLP0_2); 1122 PIS(ERRCONTENTIONLP1_2); 1123 PIS(ERRCONTENTIONLP0_3); 1124 PIS(ERRCONTENTIONLP1_3); 1125 PIS(ULPSACTIVENOT_ALL0); 1126 PIS(ULPSACTIVENOT_ALL1); 1127 #undef PIS 1128 1129 kfree(stats); 1130 1131 return 0; 1132 } 1133 #endif 1134 1135 static int dsi_dump_dsi_regs(struct seq_file *s, void *p) 1136 { 1137 struct dsi_data *dsi = s->private; 1138 1139 if (dsi_runtime_get(dsi)) 1140 return 0; 1141 dsi_enable_scp_clk(dsi); 1142 1143 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r)) 1144 DUMPREG(DSI_REVISION); 1145 DUMPREG(DSI_SYSCONFIG); 1146 DUMPREG(DSI_SYSSTATUS); 1147 DUMPREG(DSI_IRQSTATUS); 1148 DUMPREG(DSI_IRQENABLE); 1149 DUMPREG(DSI_CTRL); 1150 DUMPREG(DSI_COMPLEXIO_CFG1); 1151 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS); 1152 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE); 1153 DUMPREG(DSI_CLK_CTRL); 1154 DUMPREG(DSI_TIMING1); 1155 DUMPREG(DSI_TIMING2); 1156 DUMPREG(DSI_VM_TIMING1); 1157 DUMPREG(DSI_VM_TIMING2); 1158 DUMPREG(DSI_VM_TIMING3); 1159 DUMPREG(DSI_CLK_TIMING); 1160 DUMPREG(DSI_TX_FIFO_VC_SIZE); 1161 DUMPREG(DSI_RX_FIFO_VC_SIZE); 1162 DUMPREG(DSI_COMPLEXIO_CFG2); 1163 DUMPREG(DSI_RX_FIFO_VC_FULLNESS); 1164 DUMPREG(DSI_VM_TIMING4); 1165 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS); 1166 DUMPREG(DSI_VM_TIMING5); 1167 DUMPREG(DSI_VM_TIMING6); 1168 DUMPREG(DSI_VM_TIMING7); 1169 DUMPREG(DSI_STOPCLK_TIMING); 1170 1171 DUMPREG(DSI_VC_CTRL(0)); 1172 DUMPREG(DSI_VC_TE(0)); 1173 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0)); 1174 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0)); 1175 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0)); 1176 DUMPREG(DSI_VC_IRQSTATUS(0)); 1177 DUMPREG(DSI_VC_IRQENABLE(0)); 1178 1179 DUMPREG(DSI_VC_CTRL(1)); 1180 DUMPREG(DSI_VC_TE(1)); 1181 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1)); 1182 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1)); 1183 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1)); 1184 DUMPREG(DSI_VC_IRQSTATUS(1)); 1185 DUMPREG(DSI_VC_IRQENABLE(1)); 1186 1187 DUMPREG(DSI_VC_CTRL(2)); 1188 DUMPREG(DSI_VC_TE(2)); 1189 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2)); 1190 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2)); 1191 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2)); 1192 DUMPREG(DSI_VC_IRQSTATUS(2)); 1193 DUMPREG(DSI_VC_IRQENABLE(2)); 1194 1195 DUMPREG(DSI_VC_CTRL(3)); 1196 DUMPREG(DSI_VC_TE(3)); 1197 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3)); 1198 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3)); 1199 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3)); 1200 DUMPREG(DSI_VC_IRQSTATUS(3)); 1201 DUMPREG(DSI_VC_IRQENABLE(3)); 1202 1203 DUMPREG(DSI_DSIPHY_CFG0); 1204 DUMPREG(DSI_DSIPHY_CFG1); 1205 DUMPREG(DSI_DSIPHY_CFG2); 1206 DUMPREG(DSI_DSIPHY_CFG5); 1207 1208 DUMPREG(DSI_PLL_CONTROL); 1209 DUMPREG(DSI_PLL_STATUS); 1210 DUMPREG(DSI_PLL_GO); 1211 DUMPREG(DSI_PLL_CONFIGURATION1); 1212 DUMPREG(DSI_PLL_CONFIGURATION2); 1213 #undef DUMPREG 1214 1215 dsi_disable_scp_clk(dsi); 1216 dsi_runtime_put(dsi); 1217 1218 return 0; 1219 } 1220 1221 enum dsi_cio_power_state { 1222 DSI_COMPLEXIO_POWER_OFF = 0x0, 1223 DSI_COMPLEXIO_POWER_ON = 0x1, 1224 DSI_COMPLEXIO_POWER_ULPS = 0x2, 1225 }; 1226 1227 static int dsi_cio_power(struct dsi_data *dsi, enum dsi_cio_power_state state) 1228 { 1229 int t = 0; 1230 1231 /* PWR_CMD */ 1232 REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG1, state, 28, 27); 1233 1234 /* PWR_STATUS */ 1235 while (FLD_GET(dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1), 1236 26, 25) != state) { 1237 if (++t > 1000) { 1238 DSSERR("failed to set complexio power state to " 1239 "%d\n", state); 1240 return -ENODEV; 1241 } 1242 udelay(1); 1243 } 1244 1245 return 0; 1246 } 1247 1248 static unsigned int dsi_get_line_buf_size(struct dsi_data *dsi) 1249 { 1250 int val; 1251 1252 /* line buffer on OMAP3 is 1024 x 24bits */ 1253 /* XXX: for some reason using full buffer size causes 1254 * considerable TX slowdown with update sizes that fill the 1255 * whole buffer */ 1256 if (!(dsi->data->quirks & DSI_QUIRK_GNQ)) 1257 return 1023 * 3; 1258 1259 val = REG_GET(dsi, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */ 1260 1261 switch (val) { 1262 case 1: 1263 return 512 * 3; /* 512x24 bits */ 1264 case 2: 1265 return 682 * 3; /* 682x24 bits */ 1266 case 3: 1267 return 853 * 3; /* 853x24 bits */ 1268 case 4: 1269 return 1024 * 3; /* 1024x24 bits */ 1270 case 5: 1271 return 1194 * 3; /* 1194x24 bits */ 1272 case 6: 1273 return 1365 * 3; /* 1365x24 bits */ 1274 case 7: 1275 return 1920 * 3; /* 1920x24 bits */ 1276 default: 1277 BUG(); 1278 return 0; 1279 } 1280 } 1281 1282 static int dsi_set_lane_config(struct dsi_data *dsi) 1283 { 1284 static const u8 offsets[] = { 0, 4, 8, 12, 16 }; 1285 static const enum dsi_lane_function functions[] = { 1286 DSI_LANE_CLK, 1287 DSI_LANE_DATA1, 1288 DSI_LANE_DATA2, 1289 DSI_LANE_DATA3, 1290 DSI_LANE_DATA4, 1291 }; 1292 u32 r; 1293 int i; 1294 1295 r = dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1); 1296 1297 for (i = 0; i < dsi->num_lanes_used; ++i) { 1298 unsigned int offset = offsets[i]; 1299 unsigned int polarity, lane_number; 1300 unsigned int t; 1301 1302 for (t = 0; t < dsi->num_lanes_supported; ++t) 1303 if (dsi->lanes[t].function == functions[i]) 1304 break; 1305 1306 if (t == dsi->num_lanes_supported) 1307 return -EINVAL; 1308 1309 lane_number = t; 1310 polarity = dsi->lanes[t].polarity; 1311 1312 r = FLD_MOD(r, lane_number + 1, offset + 2, offset); 1313 r = FLD_MOD(r, polarity, offset + 3, offset + 3); 1314 } 1315 1316 /* clear the unused lanes */ 1317 for (; i < dsi->num_lanes_supported; ++i) { 1318 unsigned int offset = offsets[i]; 1319 1320 r = FLD_MOD(r, 0, offset + 2, offset); 1321 r = FLD_MOD(r, 0, offset + 3, offset + 3); 1322 } 1323 1324 dsi_write_reg(dsi, DSI_COMPLEXIO_CFG1, r); 1325 1326 return 0; 1327 } 1328 1329 static inline unsigned int ns2ddr(struct dsi_data *dsi, unsigned int ns) 1330 { 1331 /* convert time in ns to ddr ticks, rounding up */ 1332 unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4; 1333 1334 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; 1335 } 1336 1337 static inline unsigned int ddr2ns(struct dsi_data *dsi, unsigned int ddr) 1338 { 1339 unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4; 1340 1341 return ddr * 1000 * 1000 / (ddr_clk / 1000); 1342 } 1343 1344 static void dsi_cio_timings(struct dsi_data *dsi) 1345 { 1346 u32 r; 1347 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; 1348 u32 tlpx_half, tclk_trail, tclk_zero; 1349 u32 tclk_prepare; 1350 1351 /* calculate timings */ 1352 1353 /* 1 * DDR_CLK = 2 * UI */ 1354 1355 /* min 40ns + 4*UI max 85ns + 6*UI */ 1356 ths_prepare = ns2ddr(dsi, 70) + 2; 1357 1358 /* min 145ns + 10*UI */ 1359 ths_prepare_ths_zero = ns2ddr(dsi, 175) + 2; 1360 1361 /* min max(8*UI, 60ns+4*UI) */ 1362 ths_trail = ns2ddr(dsi, 60) + 5; 1363 1364 /* min 100ns */ 1365 ths_exit = ns2ddr(dsi, 145); 1366 1367 /* tlpx min 50n */ 1368 tlpx_half = ns2ddr(dsi, 25); 1369 1370 /* min 60ns */ 1371 tclk_trail = ns2ddr(dsi, 60) + 2; 1372 1373 /* min 38ns, max 95ns */ 1374 tclk_prepare = ns2ddr(dsi, 65); 1375 1376 /* min tclk-prepare + tclk-zero = 300ns */ 1377 tclk_zero = ns2ddr(dsi, 260); 1378 1379 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", 1380 ths_prepare, ddr2ns(dsi, ths_prepare), 1381 ths_prepare_ths_zero, ddr2ns(dsi, ths_prepare_ths_zero)); 1382 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", 1383 ths_trail, ddr2ns(dsi, ths_trail), 1384 ths_exit, ddr2ns(dsi, ths_exit)); 1385 1386 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " 1387 "tclk_zero %u (%uns)\n", 1388 tlpx_half, ddr2ns(dsi, tlpx_half), 1389 tclk_trail, ddr2ns(dsi, tclk_trail), 1390 tclk_zero, ddr2ns(dsi, tclk_zero)); 1391 DSSDBG("tclk_prepare %u (%uns)\n", 1392 tclk_prepare, ddr2ns(dsi, tclk_prepare)); 1393 1394 /* program timings */ 1395 1396 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0); 1397 r = FLD_MOD(r, ths_prepare, 31, 24); 1398 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); 1399 r = FLD_MOD(r, ths_trail, 15, 8); 1400 r = FLD_MOD(r, ths_exit, 7, 0); 1401 dsi_write_reg(dsi, DSI_DSIPHY_CFG0, r); 1402 1403 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1); 1404 r = FLD_MOD(r, tlpx_half, 20, 16); 1405 r = FLD_MOD(r, tclk_trail, 15, 8); 1406 r = FLD_MOD(r, tclk_zero, 7, 0); 1407 1408 if (dsi->data->quirks & DSI_QUIRK_PHY_DCC) { 1409 r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */ 1410 r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */ 1411 r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */ 1412 } 1413 1414 dsi_write_reg(dsi, DSI_DSIPHY_CFG1, r); 1415 1416 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2); 1417 r = FLD_MOD(r, tclk_prepare, 7, 0); 1418 dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r); 1419 } 1420 1421 static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi) 1422 { 1423 int t, i; 1424 bool in_use[DSI_MAX_NR_LANES]; 1425 static const u8 offsets_old[] = { 28, 27, 26 }; 1426 static const u8 offsets_new[] = { 24, 25, 26, 27, 28 }; 1427 const u8 *offsets; 1428 1429 if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) 1430 offsets = offsets_old; 1431 else 1432 offsets = offsets_new; 1433 1434 for (i = 0; i < dsi->num_lanes_supported; ++i) 1435 in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED; 1436 1437 t = 100000; 1438 while (true) { 1439 u32 l; 1440 int ok; 1441 1442 l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5); 1443 1444 ok = 0; 1445 for (i = 0; i < dsi->num_lanes_supported; ++i) { 1446 if (!in_use[i] || (l & (1 << offsets[i]))) 1447 ok++; 1448 } 1449 1450 if (ok == dsi->num_lanes_supported) 1451 break; 1452 1453 if (--t == 0) { 1454 for (i = 0; i < dsi->num_lanes_supported; ++i) { 1455 if (!in_use[i] || (l & (1 << offsets[i]))) 1456 continue; 1457 1458 DSSERR("CIO TXCLKESC%d domain not coming " \ 1459 "out of reset\n", i); 1460 } 1461 return -EIO; 1462 } 1463 } 1464 1465 return 0; 1466 } 1467 1468 /* return bitmask of enabled lanes, lane0 being the lsb */ 1469 static unsigned int dsi_get_lane_mask(struct dsi_data *dsi) 1470 { 1471 unsigned int mask = 0; 1472 int i; 1473 1474 for (i = 0; i < dsi->num_lanes_supported; ++i) { 1475 if (dsi->lanes[i].function != DSI_LANE_UNUSED) 1476 mask |= 1 << i; 1477 } 1478 1479 return mask; 1480 } 1481 1482 /* OMAP4 CONTROL_DSIPHY */ 1483 #define OMAP4_DSIPHY_SYSCON_OFFSET 0x78 1484 1485 #define OMAP4_DSI2_LANEENABLE_SHIFT 29 1486 #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) 1487 #define OMAP4_DSI1_LANEENABLE_SHIFT 24 1488 #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) 1489 #define OMAP4_DSI1_PIPD_SHIFT 19 1490 #define OMAP4_DSI1_PIPD_MASK (0x1f << 19) 1491 #define OMAP4_DSI2_PIPD_SHIFT 14 1492 #define OMAP4_DSI2_PIPD_MASK (0x1f << 14) 1493 1494 static int dsi_omap4_mux_pads(struct dsi_data *dsi, unsigned int lanes) 1495 { 1496 u32 enable_mask, enable_shift; 1497 u32 pipd_mask, pipd_shift; 1498 1499 if (dsi->module_id == 0) { 1500 enable_mask = OMAP4_DSI1_LANEENABLE_MASK; 1501 enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; 1502 pipd_mask = OMAP4_DSI1_PIPD_MASK; 1503 pipd_shift = OMAP4_DSI1_PIPD_SHIFT; 1504 } else if (dsi->module_id == 1) { 1505 enable_mask = OMAP4_DSI2_LANEENABLE_MASK; 1506 enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; 1507 pipd_mask = OMAP4_DSI2_PIPD_MASK; 1508 pipd_shift = OMAP4_DSI2_PIPD_SHIFT; 1509 } else { 1510 return -ENODEV; 1511 } 1512 1513 return regmap_update_bits(dsi->syscon, OMAP4_DSIPHY_SYSCON_OFFSET, 1514 enable_mask | pipd_mask, 1515 (lanes << enable_shift) | (lanes << pipd_shift)); 1516 } 1517 1518 /* OMAP5 CONTROL_DSIPHY */ 1519 1520 #define OMAP5_DSIPHY_SYSCON_OFFSET 0x74 1521 1522 #define OMAP5_DSI1_LANEENABLE_SHIFT 24 1523 #define OMAP5_DSI2_LANEENABLE_SHIFT 19 1524 #define OMAP5_DSI_LANEENABLE_MASK 0x1f 1525 1526 static int dsi_omap5_mux_pads(struct dsi_data *dsi, unsigned int lanes) 1527 { 1528 u32 enable_shift; 1529 1530 if (dsi->module_id == 0) 1531 enable_shift = OMAP5_DSI1_LANEENABLE_SHIFT; 1532 else if (dsi->module_id == 1) 1533 enable_shift = OMAP5_DSI2_LANEENABLE_SHIFT; 1534 else 1535 return -ENODEV; 1536 1537 return regmap_update_bits(dsi->syscon, OMAP5_DSIPHY_SYSCON_OFFSET, 1538 OMAP5_DSI_LANEENABLE_MASK << enable_shift, 1539 lanes << enable_shift); 1540 } 1541 1542 static int dsi_enable_pads(struct dsi_data *dsi, unsigned int lane_mask) 1543 { 1544 if (dsi->data->model == DSI_MODEL_OMAP4) 1545 return dsi_omap4_mux_pads(dsi, lane_mask); 1546 if (dsi->data->model == DSI_MODEL_OMAP5) 1547 return dsi_omap5_mux_pads(dsi, lane_mask); 1548 return 0; 1549 } 1550 1551 static void dsi_disable_pads(struct dsi_data *dsi) 1552 { 1553 if (dsi->data->model == DSI_MODEL_OMAP4) 1554 dsi_omap4_mux_pads(dsi, 0); 1555 else if (dsi->data->model == DSI_MODEL_OMAP5) 1556 dsi_omap5_mux_pads(dsi, 0); 1557 } 1558 1559 static int dsi_cio_init(struct dsi_data *dsi) 1560 { 1561 int r; 1562 u32 l; 1563 1564 DSSDBG("DSI CIO init starts"); 1565 1566 r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsi)); 1567 if (r) 1568 return r; 1569 1570 dsi_enable_scp_clk(dsi); 1571 1572 /* A dummy read using the SCP interface to any DSIPHY register is 1573 * required after DSIPHY reset to complete the reset of the DSI complex 1574 * I/O. */ 1575 dsi_read_reg(dsi, DSI_DSIPHY_CFG5); 1576 1577 if (!wait_for_bit_change(dsi, DSI_DSIPHY_CFG5, 30, 1)) { 1578 DSSERR("CIO SCP Clock domain not coming out of reset.\n"); 1579 r = -EIO; 1580 goto err_scp_clk_dom; 1581 } 1582 1583 r = dsi_set_lane_config(dsi); 1584 if (r) 1585 goto err_scp_clk_dom; 1586 1587 /* set TX STOP MODE timer to maximum for this operation */ 1588 l = dsi_read_reg(dsi, DSI_TIMING1); 1589 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 1590 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ 1591 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ 1592 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ 1593 dsi_write_reg(dsi, DSI_TIMING1, l); 1594 1595 r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON); 1596 if (r) 1597 goto err_cio_pwr; 1598 1599 if (!wait_for_bit_change(dsi, DSI_COMPLEXIO_CFG1, 29, 1)) { 1600 DSSERR("CIO PWR clock domain not coming out of reset.\n"); 1601 r = -ENODEV; 1602 goto err_cio_pwr_dom; 1603 } 1604 1605 dsi_if_enable(dsi, true); 1606 dsi_if_enable(dsi, false); 1607 REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ 1608 1609 r = dsi_cio_wait_tx_clk_esc_reset(dsi); 1610 if (r) 1611 goto err_tx_clk_esc_rst; 1612 1613 /* FORCE_TX_STOP_MODE_IO */ 1614 REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15); 1615 1616 dsi_cio_timings(dsi); 1617 1618 /* DDR_CLK_ALWAYS_ON */ 1619 REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1620 !(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS), 1621 13, 13); 1622 1623 DSSDBG("CIO init done\n"); 1624 1625 return 0; 1626 1627 err_tx_clk_esc_rst: 1628 REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */ 1629 err_cio_pwr_dom: 1630 dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF); 1631 err_cio_pwr: 1632 err_scp_clk_dom: 1633 dsi_disable_scp_clk(dsi); 1634 dsi_disable_pads(dsi); 1635 return r; 1636 } 1637 1638 static void dsi_cio_uninit(struct dsi_data *dsi) 1639 { 1640 /* DDR_CLK_ALWAYS_ON */ 1641 REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13); 1642 1643 dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF); 1644 dsi_disable_scp_clk(dsi); 1645 dsi_disable_pads(dsi); 1646 } 1647 1648 static void dsi_config_tx_fifo(struct dsi_data *dsi, 1649 enum fifo_size size1, enum fifo_size size2, 1650 enum fifo_size size3, enum fifo_size size4) 1651 { 1652 u32 r = 0; 1653 int add = 0; 1654 int i; 1655 1656 dsi->vc[0].tx_fifo_size = size1; 1657 dsi->vc[1].tx_fifo_size = size2; 1658 dsi->vc[2].tx_fifo_size = size3; 1659 dsi->vc[3].tx_fifo_size = size4; 1660 1661 for (i = 0; i < 4; i++) { 1662 u8 v; 1663 int size = dsi->vc[i].tx_fifo_size; 1664 1665 if (add + size > 4) { 1666 DSSERR("Illegal FIFO configuration\n"); 1667 BUG(); 1668 return; 1669 } 1670 1671 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); 1672 r |= v << (8 * i); 1673 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */ 1674 add += size; 1675 } 1676 1677 dsi_write_reg(dsi, DSI_TX_FIFO_VC_SIZE, r); 1678 } 1679 1680 static void dsi_config_rx_fifo(struct dsi_data *dsi, 1681 enum fifo_size size1, enum fifo_size size2, 1682 enum fifo_size size3, enum fifo_size size4) 1683 { 1684 u32 r = 0; 1685 int add = 0; 1686 int i; 1687 1688 dsi->vc[0].rx_fifo_size = size1; 1689 dsi->vc[1].rx_fifo_size = size2; 1690 dsi->vc[2].rx_fifo_size = size3; 1691 dsi->vc[3].rx_fifo_size = size4; 1692 1693 for (i = 0; i < 4; i++) { 1694 u8 v; 1695 int size = dsi->vc[i].rx_fifo_size; 1696 1697 if (add + size > 4) { 1698 DSSERR("Illegal FIFO configuration\n"); 1699 BUG(); 1700 return; 1701 } 1702 1703 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); 1704 r |= v << (8 * i); 1705 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */ 1706 add += size; 1707 } 1708 1709 dsi_write_reg(dsi, DSI_RX_FIFO_VC_SIZE, r); 1710 } 1711 1712 static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi) 1713 { 1714 u32 r; 1715 1716 r = dsi_read_reg(dsi, DSI_TIMING1); 1717 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 1718 dsi_write_reg(dsi, DSI_TIMING1, r); 1719 1720 if (!wait_for_bit_change(dsi, DSI_TIMING1, 15, 0)) { 1721 DSSERR("TX_STOP bit not going down\n"); 1722 return -EIO; 1723 } 1724 1725 return 0; 1726 } 1727 1728 static bool dsi_vc_is_enabled(struct dsi_data *dsi, int vc) 1729 { 1730 return REG_GET(dsi, DSI_VC_CTRL(vc), 0, 0); 1731 } 1732 1733 static void dsi_packet_sent_handler_vp(void *data, u32 mask) 1734 { 1735 struct dsi_packet_sent_handler_data *vp_data = 1736 (struct dsi_packet_sent_handler_data *) data; 1737 struct dsi_data *dsi = vp_data->dsi; 1738 const int vc = dsi->update_vc; 1739 u8 bit = dsi->te_enabled ? 30 : 31; 1740 1741 if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit) == 0) 1742 complete(vp_data->completion); 1743 } 1744 1745 static int dsi_sync_vc_vp(struct dsi_data *dsi, int vc) 1746 { 1747 DECLARE_COMPLETION_ONSTACK(completion); 1748 struct dsi_packet_sent_handler_data vp_data = { 1749 .dsi = dsi, 1750 .completion = &completion 1751 }; 1752 int r = 0; 1753 u8 bit; 1754 1755 bit = dsi->te_enabled ? 30 : 31; 1756 1757 r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_vp, 1758 &vp_data, DSI_VC_IRQ_PACKET_SENT); 1759 if (r) 1760 goto err0; 1761 1762 /* Wait for completion only if TE_EN/TE_START is still set */ 1763 if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit)) { 1764 if (wait_for_completion_timeout(&completion, 1765 msecs_to_jiffies(10)) == 0) { 1766 DSSERR("Failed to complete previous frame transfer\n"); 1767 r = -EIO; 1768 goto err1; 1769 } 1770 } 1771 1772 dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp, 1773 &vp_data, DSI_VC_IRQ_PACKET_SENT); 1774 1775 return 0; 1776 err1: 1777 dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp, 1778 &vp_data, DSI_VC_IRQ_PACKET_SENT); 1779 err0: 1780 return r; 1781 } 1782 1783 static void dsi_packet_sent_handler_l4(void *data, u32 mask) 1784 { 1785 struct dsi_packet_sent_handler_data *l4_data = 1786 (struct dsi_packet_sent_handler_data *) data; 1787 struct dsi_data *dsi = l4_data->dsi; 1788 const int vc = dsi->update_vc; 1789 1790 if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5) == 0) 1791 complete(l4_data->completion); 1792 } 1793 1794 static int dsi_sync_vc_l4(struct dsi_data *dsi, int vc) 1795 { 1796 DECLARE_COMPLETION_ONSTACK(completion); 1797 struct dsi_packet_sent_handler_data l4_data = { 1798 .dsi = dsi, 1799 .completion = &completion 1800 }; 1801 int r = 0; 1802 1803 r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_l4, 1804 &l4_data, DSI_VC_IRQ_PACKET_SENT); 1805 if (r) 1806 goto err0; 1807 1808 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */ 1809 if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5)) { 1810 if (wait_for_completion_timeout(&completion, 1811 msecs_to_jiffies(10)) == 0) { 1812 DSSERR("Failed to complete previous l4 transfer\n"); 1813 r = -EIO; 1814 goto err1; 1815 } 1816 } 1817 1818 dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4, 1819 &l4_data, DSI_VC_IRQ_PACKET_SENT); 1820 1821 return 0; 1822 err1: 1823 dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4, 1824 &l4_data, DSI_VC_IRQ_PACKET_SENT); 1825 err0: 1826 return r; 1827 } 1828 1829 static int dsi_sync_vc(struct dsi_data *dsi, int vc) 1830 { 1831 WARN_ON(!dsi_bus_is_locked(dsi)); 1832 1833 WARN_ON(in_interrupt()); 1834 1835 if (!dsi_vc_is_enabled(dsi, vc)) 1836 return 0; 1837 1838 switch (dsi->vc[vc].source) { 1839 case DSI_VC_SOURCE_VP: 1840 return dsi_sync_vc_vp(dsi, vc); 1841 case DSI_VC_SOURCE_L4: 1842 return dsi_sync_vc_l4(dsi, vc); 1843 default: 1844 BUG(); 1845 return -EINVAL; 1846 } 1847 } 1848 1849 static int dsi_vc_enable(struct dsi_data *dsi, int vc, bool enable) 1850 { 1851 DSSDBG("dsi_vc_enable vc %d, enable %d\n", 1852 vc, enable); 1853 1854 enable = enable ? 1 : 0; 1855 1856 REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 0, 0); 1857 1858 if (!wait_for_bit_change(dsi, DSI_VC_CTRL(vc), 0, enable)) { 1859 DSSERR("Failed to set dsi_vc_enable to %d\n", enable); 1860 return -EIO; 1861 } 1862 1863 return 0; 1864 } 1865 1866 static void dsi_vc_initial_config(struct dsi_data *dsi, int vc) 1867 { 1868 u32 r; 1869 1870 DSSDBG("Initial config of VC %d", vc); 1871 1872 r = dsi_read_reg(dsi, DSI_VC_CTRL(vc)); 1873 1874 if (FLD_GET(r, 15, 15)) /* VC_BUSY */ 1875 DSSERR("VC(%d) busy when trying to configure it!\n", 1876 vc); 1877 1878 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */ 1879 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */ 1880 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */ 1881 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */ 1882 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ 1883 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ 1884 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */ 1885 if (dsi->data->quirks & DSI_QUIRK_VC_OCP_WIDTH) 1886 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */ 1887 1888 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ 1889 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ 1890 1891 dsi_write_reg(dsi, DSI_VC_CTRL(vc), r); 1892 1893 dsi->vc[vc].source = DSI_VC_SOURCE_L4; 1894 } 1895 1896 static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int vc, 1897 bool enable) 1898 { 1899 struct dsi_data *dsi = to_dsi_data(dssdev); 1900 1901 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", vc, enable); 1902 1903 if (REG_GET(dsi, DSI_VC_CTRL(vc), 9, 9) == enable) 1904 return; 1905 1906 WARN_ON(!dsi_bus_is_locked(dsi)); 1907 1908 dsi_vc_enable(dsi, vc, 0); 1909 dsi_if_enable(dsi, 0); 1910 1911 REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 9, 9); 1912 1913 dsi_vc_enable(dsi, vc, 1); 1914 dsi_if_enable(dsi, 1); 1915 1916 dsi_force_tx_stop_mode_io(dsi); 1917 } 1918 1919 static void dsi_vc_flush_long_data(struct dsi_data *dsi, int vc) 1920 { 1921 while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { 1922 u32 val; 1923 val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc)); 1924 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", 1925 (val >> 0) & 0xff, 1926 (val >> 8) & 0xff, 1927 (val >> 16) & 0xff, 1928 (val >> 24) & 0xff); 1929 } 1930 } 1931 1932 static void dsi_show_rx_ack_with_err(u16 err) 1933 { 1934 DSSERR("\tACK with ERROR (%#x):\n", err); 1935 if (err & (1 << 0)) 1936 DSSERR("\t\tSoT Error\n"); 1937 if (err & (1 << 1)) 1938 DSSERR("\t\tSoT Sync Error\n"); 1939 if (err & (1 << 2)) 1940 DSSERR("\t\tEoT Sync Error\n"); 1941 if (err & (1 << 3)) 1942 DSSERR("\t\tEscape Mode Entry Command Error\n"); 1943 if (err & (1 << 4)) 1944 DSSERR("\t\tLP Transmit Sync Error\n"); 1945 if (err & (1 << 5)) 1946 DSSERR("\t\tHS Receive Timeout Error\n"); 1947 if (err & (1 << 6)) 1948 DSSERR("\t\tFalse Control Error\n"); 1949 if (err & (1 << 7)) 1950 DSSERR("\t\t(reserved7)\n"); 1951 if (err & (1 << 8)) 1952 DSSERR("\t\tECC Error, single-bit (corrected)\n"); 1953 if (err & (1 << 9)) 1954 DSSERR("\t\tECC Error, multi-bit (not corrected)\n"); 1955 if (err & (1 << 10)) 1956 DSSERR("\t\tChecksum Error\n"); 1957 if (err & (1 << 11)) 1958 DSSERR("\t\tData type not recognized\n"); 1959 if (err & (1 << 12)) 1960 DSSERR("\t\tInvalid VC ID\n"); 1961 if (err & (1 << 13)) 1962 DSSERR("\t\tInvalid Transmission Length\n"); 1963 if (err & (1 << 14)) 1964 DSSERR("\t\t(reserved14)\n"); 1965 if (err & (1 << 15)) 1966 DSSERR("\t\tDSI Protocol Violation\n"); 1967 } 1968 1969 static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int vc) 1970 { 1971 /* RX_FIFO_NOT_EMPTY */ 1972 while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { 1973 u32 val; 1974 u8 dt; 1975 val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc)); 1976 DSSERR("\trawval %#08x\n", val); 1977 dt = FLD_GET(val, 5, 0); 1978 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) { 1979 u16 err = FLD_GET(val, 23, 8); 1980 dsi_show_rx_ack_with_err(err); 1981 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) { 1982 DSSERR("\tDCS short response, 1 byte: %#x\n", 1983 FLD_GET(val, 23, 8)); 1984 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) { 1985 DSSERR("\tDCS short response, 2 byte: %#x\n", 1986 FLD_GET(val, 23, 8)); 1987 } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) { 1988 DSSERR("\tDCS long response, len %d\n", 1989 FLD_GET(val, 23, 8)); 1990 dsi_vc_flush_long_data(dsi, vc); 1991 } else { 1992 DSSERR("\tunknown datatype 0x%02x\n", dt); 1993 } 1994 } 1995 return 0; 1996 } 1997 1998 static int dsi_vc_send_bta(struct dsi_data *dsi, int vc) 1999 { 2000 if (dsi->debug_write || dsi->debug_read) 2001 DSSDBG("dsi_vc_send_bta %d\n", vc); 2002 2003 WARN_ON(!dsi_bus_is_locked(dsi)); 2004 2005 /* RX_FIFO_NOT_EMPTY */ 2006 if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { 2007 DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); 2008 dsi_vc_flush_receive_data(dsi, vc); 2009 } 2010 2011 REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 6, 6); /* BTA_EN */ 2012 2013 /* flush posted write */ 2014 dsi_read_reg(dsi, DSI_VC_CTRL(vc)); 2015 2016 return 0; 2017 } 2018 2019 static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int vc) 2020 { 2021 struct dsi_data *dsi = to_dsi_data(dssdev); 2022 DECLARE_COMPLETION_ONSTACK(completion); 2023 int r = 0; 2024 u32 err; 2025 2026 r = dsi_register_isr_vc(dsi, vc, dsi_completion_handler, 2027 &completion, DSI_VC_IRQ_BTA); 2028 if (r) 2029 goto err0; 2030 2031 r = dsi_register_isr(dsi, dsi_completion_handler, &completion, 2032 DSI_IRQ_ERROR_MASK); 2033 if (r) 2034 goto err1; 2035 2036 r = dsi_vc_send_bta(dsi, vc); 2037 if (r) 2038 goto err2; 2039 2040 if (wait_for_completion_timeout(&completion, 2041 msecs_to_jiffies(500)) == 0) { 2042 DSSERR("Failed to receive BTA\n"); 2043 r = -EIO; 2044 goto err2; 2045 } 2046 2047 err = dsi_get_errors(dsi); 2048 if (err) { 2049 DSSERR("Error while sending BTA: %x\n", err); 2050 r = -EIO; 2051 goto err2; 2052 } 2053 err2: 2054 dsi_unregister_isr(dsi, dsi_completion_handler, &completion, 2055 DSI_IRQ_ERROR_MASK); 2056 err1: 2057 dsi_unregister_isr_vc(dsi, vc, dsi_completion_handler, 2058 &completion, DSI_VC_IRQ_BTA); 2059 err0: 2060 return r; 2061 } 2062 2063 static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int vc, 2064 int channel, u8 data_type, u16 len, 2065 u8 ecc) 2066 { 2067 u32 val; 2068 u8 data_id; 2069 2070 WARN_ON(!dsi_bus_is_locked(dsi)); 2071 2072 data_id = data_type | channel << 6; 2073 2074 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | 2075 FLD_VAL(ecc, 31, 24); 2076 2077 dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(vc), val); 2078 } 2079 2080 static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int vc, 2081 u8 b1, u8 b2, u8 b3, u8 b4) 2082 { 2083 u32 val; 2084 2085 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0; 2086 2087 /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", 2088 b1, b2, b3, b4, val); */ 2089 2090 dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(vc), val); 2091 } 2092 2093 static int dsi_vc_send_long(struct dsi_data *dsi, int vc, 2094 const struct mipi_dsi_msg *msg) 2095 { 2096 /*u32 val; */ 2097 int i; 2098 const u8 *p; 2099 int r = 0; 2100 u8 b1, b2, b3, b4; 2101 2102 if (dsi->debug_write) 2103 DSSDBG("dsi_vc_send_long, %zu bytes\n", msg->tx_len); 2104 2105 /* len + header */ 2106 if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) { 2107 DSSERR("unable to send long packet: packet too long.\n"); 2108 return -EINVAL; 2109 } 2110 2111 dsi_vc_write_long_header(dsi, vc, msg->channel, msg->type, msg->tx_len, 0); 2112 2113 p = msg->tx_buf; 2114 for (i = 0; i < msg->tx_len >> 2; i++) { 2115 if (dsi->debug_write) 2116 DSSDBG("\tsending full packet %d\n", i); 2117 2118 b1 = *p++; 2119 b2 = *p++; 2120 b3 = *p++; 2121 b4 = *p++; 2122 2123 dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, b4); 2124 } 2125 2126 i = msg->tx_len % 4; 2127 if (i) { 2128 b1 = 0; b2 = 0; b3 = 0; 2129 2130 if (dsi->debug_write) 2131 DSSDBG("\tsending remainder bytes %d\n", i); 2132 2133 switch (i) { 2134 case 3: 2135 b1 = *p++; 2136 b2 = *p++; 2137 b3 = *p++; 2138 break; 2139 case 2: 2140 b1 = *p++; 2141 b2 = *p++; 2142 break; 2143 case 1: 2144 b1 = *p++; 2145 break; 2146 } 2147 2148 dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, 0); 2149 } 2150 2151 return r; 2152 } 2153 2154 static int dsi_vc_send_short(struct dsi_data *dsi, int vc, 2155 const struct mipi_dsi_msg *msg) 2156 { 2157 struct mipi_dsi_packet pkt; 2158 int ret; 2159 u32 r; 2160 2161 ret = mipi_dsi_create_packet(&pkt, msg); 2162 if (ret < 0) 2163 return ret; 2164 2165 WARN_ON(!dsi_bus_is_locked(dsi)); 2166 2167 if (dsi->debug_write) 2168 DSSDBG("dsi_vc_send_short(vc%d, dt %#x, b1 %#x, b2 %#x)\n", 2169 vc, msg->type, pkt.header[1], pkt.header[2]); 2170 2171 if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(vc)), 16, 16)) { 2172 DSSERR("ERROR FIFO FULL, aborting transfer\n"); 2173 return -EINVAL; 2174 } 2175 2176 r = pkt.header[3] << 24 | pkt.header[2] << 16 | pkt.header[1] << 8 | 2177 pkt.header[0]; 2178 2179 dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc), r); 2180 2181 return 0; 2182 } 2183 2184 static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel) 2185 { 2186 const struct mipi_dsi_msg msg = { 2187 .channel = channel, 2188 .type = MIPI_DSI_NULL_PACKET, 2189 }; 2190 2191 return dsi_vc_send_long(dsi, vc, &msg); 2192 } 2193 2194 static int dsi_vc_write_common(struct omap_dss_device *dssdev, int vc, 2195 const struct mipi_dsi_msg *msg) 2196 { 2197 struct dsi_data *dsi = to_dsi_data(dssdev); 2198 int r; 2199 2200 if (mipi_dsi_packet_format_is_short(msg->type)) 2201 r = dsi_vc_send_short(dsi, vc, msg); 2202 else 2203 r = dsi_vc_send_long(dsi, vc, msg); 2204 2205 if (r < 0) 2206 return r; 2207 2208 /* 2209 * TODO: we do not always have to do the BTA sync, for example 2210 * we can improve performance by setting the update window 2211 * information without sending BTA sync between the commands. 2212 * In that case we can return early. 2213 */ 2214 2215 r = dsi_vc_send_bta_sync(dssdev, vc); 2216 if (r) { 2217 DSSERR("bta sync failed\n"); 2218 return r; 2219 } 2220 2221 /* RX_FIFO_NOT_EMPTY */ 2222 if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { 2223 DSSERR("rx fifo not empty after write, dumping data:\n"); 2224 dsi_vc_flush_receive_data(dsi, vc); 2225 return -EIO; 2226 } 2227 2228 return 0; 2229 } 2230 2231 static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int vc, u8 *buf, 2232 int buflen, enum dss_dsi_content_type type) 2233 { 2234 u32 val; 2235 u8 dt; 2236 int r; 2237 2238 /* RX_FIFO_NOT_EMPTY */ 2239 if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20) == 0) { 2240 DSSERR("RX fifo empty when trying to read.\n"); 2241 r = -EIO; 2242 goto err; 2243 } 2244 2245 val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc)); 2246 if (dsi->debug_read) 2247 DSSDBG("\theader: %08x\n", val); 2248 dt = FLD_GET(val, 5, 0); 2249 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) { 2250 u16 err = FLD_GET(val, 23, 8); 2251 dsi_show_rx_ack_with_err(err); 2252 r = -EIO; 2253 goto err; 2254 2255 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? 2256 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE : 2257 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) { 2258 u8 data = FLD_GET(val, 15, 8); 2259 if (dsi->debug_read) 2260 DSSDBG("\t%s short response, 1 byte: %02x\n", 2261 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : 2262 "DCS", data); 2263 2264 if (buflen < 1) { 2265 r = -EIO; 2266 goto err; 2267 } 2268 2269 buf[0] = data; 2270 2271 return 1; 2272 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? 2273 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE : 2274 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) { 2275 u16 data = FLD_GET(val, 23, 8); 2276 if (dsi->debug_read) 2277 DSSDBG("\t%s short response, 2 byte: %04x\n", 2278 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : 2279 "DCS", data); 2280 2281 if (buflen < 2) { 2282 r = -EIO; 2283 goto err; 2284 } 2285 2286 buf[0] = data & 0xff; 2287 buf[1] = (data >> 8) & 0xff; 2288 2289 return 2; 2290 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? 2291 MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE : 2292 MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) { 2293 int w; 2294 int len = FLD_GET(val, 23, 8); 2295 if (dsi->debug_read) 2296 DSSDBG("\t%s long response, len %d\n", 2297 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : 2298 "DCS", len); 2299 2300 if (len > buflen) { 2301 r = -EIO; 2302 goto err; 2303 } 2304 2305 /* two byte checksum ends the packet, not included in len */ 2306 for (w = 0; w < len + 2;) { 2307 int b; 2308 val = dsi_read_reg(dsi, 2309 DSI_VC_SHORT_PACKET_HEADER(vc)); 2310 if (dsi->debug_read) 2311 DSSDBG("\t\t%02x %02x %02x %02x\n", 2312 (val >> 0) & 0xff, 2313 (val >> 8) & 0xff, 2314 (val >> 16) & 0xff, 2315 (val >> 24) & 0xff); 2316 2317 for (b = 0; b < 4; ++b) { 2318 if (w < len) 2319 buf[w] = (val >> (b * 8)) & 0xff; 2320 /* we discard the 2 byte checksum */ 2321 ++w; 2322 } 2323 } 2324 2325 return len; 2326 } else { 2327 DSSERR("\tunknown datatype 0x%02x\n", dt); 2328 r = -EIO; 2329 goto err; 2330 } 2331 2332 err: 2333 DSSERR("dsi_vc_read_rx_fifo(vc %d type %s) failed\n", vc, 2334 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS"); 2335 2336 return r; 2337 } 2338 2339 static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int vc, 2340 const struct mipi_dsi_msg *msg) 2341 { 2342 struct dsi_data *dsi = to_dsi_data(dssdev); 2343 u8 cmd = ((u8 *)msg->tx_buf)[0]; 2344 int r; 2345 2346 if (dsi->debug_read) 2347 DSSDBG("%s(vc %d, cmd %x)\n", __func__, vc, cmd); 2348 2349 r = dsi_vc_send_short(dsi, vc, msg); 2350 if (r) 2351 goto err; 2352 2353 r = dsi_vc_send_bta_sync(dssdev, vc); 2354 if (r) 2355 goto err; 2356 2357 r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len, 2358 DSS_DSI_CONTENT_DCS); 2359 if (r < 0) 2360 goto err; 2361 2362 if (r != msg->rx_len) { 2363 r = -EIO; 2364 goto err; 2365 } 2366 2367 return 0; 2368 err: 2369 DSSERR("%s(vc %d, cmd 0x%02x) failed\n", __func__, vc, cmd); 2370 return r; 2371 } 2372 2373 static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc, 2374 const struct mipi_dsi_msg *msg) 2375 { 2376 struct dsi_data *dsi = to_dsi_data(dssdev); 2377 int r; 2378 2379 r = dsi_vc_send_short(dsi, vc, msg); 2380 if (r) 2381 goto err; 2382 2383 r = dsi_vc_send_bta_sync(dssdev, vc); 2384 if (r) 2385 goto err; 2386 2387 r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len, 2388 DSS_DSI_CONTENT_GENERIC); 2389 if (r < 0) 2390 goto err; 2391 2392 if (r != msg->rx_len) { 2393 r = -EIO; 2394 goto err; 2395 } 2396 2397 return 0; 2398 err: 2399 DSSERR("%s(vc %d, reqlen %zu) failed\n", __func__, vc, msg->tx_len); 2400 return r; 2401 } 2402 2403 static void dsi_set_lp_rx_timeout(struct dsi_data *dsi, unsigned int ticks, 2404 bool x4, bool x16) 2405 { 2406 unsigned long fck; 2407 unsigned long total_ticks; 2408 u32 r; 2409 2410 BUG_ON(ticks > 0x1fff); 2411 2412 /* ticks in DSI_FCK */ 2413 fck = dsi_fclk_rate(dsi); 2414 2415 r = dsi_read_reg(dsi, DSI_TIMING2); 2416 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ 2417 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ 2418 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ 2419 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ 2420 dsi_write_reg(dsi, DSI_TIMING2, r); 2421 2422 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 2423 2424 DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n", 2425 total_ticks, 2426 ticks, x4 ? " x4" : "", x16 ? " x16" : "", 2427 (total_ticks * 1000) / (fck / 1000 / 1000)); 2428 } 2429 2430 static void dsi_set_ta_timeout(struct dsi_data *dsi, unsigned int ticks, 2431 bool x8, bool x16) 2432 { 2433 unsigned long fck; 2434 unsigned long total_ticks; 2435 u32 r; 2436 2437 BUG_ON(ticks > 0x1fff); 2438 2439 /* ticks in DSI_FCK */ 2440 fck = dsi_fclk_rate(dsi); 2441 2442 r = dsi_read_reg(dsi, DSI_TIMING1); 2443 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ 2444 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ 2445 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ 2446 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ 2447 dsi_write_reg(dsi, DSI_TIMING1, r); 2448 2449 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); 2450 2451 DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n", 2452 total_ticks, 2453 ticks, x8 ? " x8" : "", x16 ? " x16" : "", 2454 (total_ticks * 1000) / (fck / 1000 / 1000)); 2455 } 2456 2457 static void dsi_set_stop_state_counter(struct dsi_data *dsi, unsigned int ticks, 2458 bool x4, bool x16) 2459 { 2460 unsigned long fck; 2461 unsigned long total_ticks; 2462 u32 r; 2463 2464 BUG_ON(ticks > 0x1fff); 2465 2466 /* ticks in DSI_FCK */ 2467 fck = dsi_fclk_rate(dsi); 2468 2469 r = dsi_read_reg(dsi, DSI_TIMING1); 2470 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 2471 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ 2472 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ 2473 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ 2474 dsi_write_reg(dsi, DSI_TIMING1, r); 2475 2476 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 2477 2478 DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n", 2479 total_ticks, 2480 ticks, x4 ? " x4" : "", x16 ? " x16" : "", 2481 (total_ticks * 1000) / (fck / 1000 / 1000)); 2482 } 2483 2484 static void dsi_set_hs_tx_timeout(struct dsi_data *dsi, unsigned int ticks, 2485 bool x4, bool x16) 2486 { 2487 unsigned long fck; 2488 unsigned long total_ticks; 2489 u32 r; 2490 2491 BUG_ON(ticks > 0x1fff); 2492 2493 /* ticks in TxByteClkHS */ 2494 fck = dsi_get_txbyteclkhs(dsi); 2495 2496 r = dsi_read_reg(dsi, DSI_TIMING2); 2497 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ 2498 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ 2499 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ 2500 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ 2501 dsi_write_reg(dsi, DSI_TIMING2, r); 2502 2503 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 2504 2505 DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n", 2506 total_ticks, 2507 ticks, x4 ? " x4" : "", x16 ? " x16" : "", 2508 (total_ticks * 1000) / (fck / 1000 / 1000)); 2509 } 2510 2511 static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi) 2512 { 2513 int num_line_buffers; 2514 2515 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { 2516 int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); 2517 const struct videomode *vm = &dsi->vm; 2518 /* 2519 * Don't use line buffers if width is greater than the video 2520 * port's line buffer size 2521 */ 2522 if (dsi->line_buffer_size <= vm->hactive * bpp / 8) 2523 num_line_buffers = 0; 2524 else 2525 num_line_buffers = 2; 2526 } else { 2527 /* Use maximum number of line buffers in command mode */ 2528 num_line_buffers = 2; 2529 } 2530 2531 /* LINE_BUFFER */ 2532 REG_FLD_MOD(dsi, DSI_CTRL, num_line_buffers, 13, 12); 2533 } 2534 2535 static void dsi_config_vp_sync_events(struct dsi_data *dsi) 2536 { 2537 bool sync_end; 2538 u32 r; 2539 2540 if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE) 2541 sync_end = true; 2542 else 2543 sync_end = false; 2544 2545 r = dsi_read_reg(dsi, DSI_CTRL); 2546 r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */ 2547 r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */ 2548 r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */ 2549 r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */ 2550 r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */ 2551 r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */ 2552 r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */ 2553 dsi_write_reg(dsi, DSI_CTRL, r); 2554 } 2555 2556 static void dsi_config_blanking_modes(struct dsi_data *dsi) 2557 { 2558 int blanking_mode = dsi->vm_timings.blanking_mode; 2559 int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode; 2560 int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode; 2561 int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode; 2562 u32 r; 2563 2564 /* 2565 * 0 = TX FIFO packets sent or LPS in corresponding blanking periods 2566 * 1 = Long blanking packets are sent in corresponding blanking periods 2567 */ 2568 r = dsi_read_reg(dsi, DSI_CTRL); 2569 r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */ 2570 r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */ 2571 r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */ 2572 r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */ 2573 dsi_write_reg(dsi, DSI_CTRL, r); 2574 } 2575 2576 /* 2577 * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3 2578 * results in maximum transition time for data and clock lanes to enter and 2579 * exit HS mode. Hence, this is the scenario where the least amount of command 2580 * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS 2581 * clock cycles that can be used to interleave command mode data in HS so that 2582 * all scenarios are satisfied. 2583 */ 2584 static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs, 2585 int exit_hs, int exiths_clk, int ddr_pre, int ddr_post) 2586 { 2587 int transition; 2588 2589 /* 2590 * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition 2591 * time of data lanes only, if it isn't set, we need to consider HS 2592 * transition time of both data and clock lanes. HS transition time 2593 * of Scenario 3 is considered. 2594 */ 2595 if (ddr_alwon) { 2596 transition = enter_hs + exit_hs + max(enter_hs, 2) + 1; 2597 } else { 2598 int trans1, trans2; 2599 trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1; 2600 trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre + 2601 enter_hs + 1; 2602 transition = max(trans1, trans2); 2603 } 2604 2605 return blank > transition ? blank - transition : 0; 2606 } 2607 2608 /* 2609 * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1 2610 * results in maximum transition time for data lanes to enter and exit LP mode. 2611 * Hence, this is the scenario where the least amount of command mode data can 2612 * be interleaved. We program the minimum amount of bytes that can be 2613 * interleaved in LP so that all scenarios are satisfied. 2614 */ 2615 static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, 2616 int lp_clk_div, int tdsi_fclk) 2617 { 2618 int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */ 2619 int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */ 2620 int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */ 2621 int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */ 2622 int lp_inter; /* cmd mode data that can be interleaved, in bytes */ 2623 2624 /* maximum LP transition time according to Scenario 1 */ 2625 trans_lp = exit_hs + max(enter_hs, 2) + 1; 2626 2627 /* CLKIN4DDR = 16 * TXBYTECLKHS */ 2628 tlp_avail = thsbyte_clk * (blank - trans_lp); 2629 2630 ttxclkesc = tdsi_fclk * lp_clk_div; 2631 2632 lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - 2633 26) / 16; 2634 2635 return max(lp_inter, 0); 2636 } 2637 2638 static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi) 2639 { 2640 int blanking_mode; 2641 int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode; 2642 int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div; 2643 int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat; 2644 int tclk_trail, ths_exit, exiths_clk; 2645 bool ddr_alwon; 2646 const struct videomode *vm = &dsi->vm; 2647 int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); 2648 int ndl = dsi->num_lanes_used - 1; 2649 int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1; 2650 int hsa_interleave_hs = 0, hsa_interleave_lp = 0; 2651 int hfp_interleave_hs = 0, hfp_interleave_lp = 0; 2652 int hbp_interleave_hs = 0, hbp_interleave_lp = 0; 2653 int bl_interleave_hs = 0, bl_interleave_lp = 0; 2654 u32 r; 2655 2656 r = dsi_read_reg(dsi, DSI_CTRL); 2657 blanking_mode = FLD_GET(r, 20, 20); 2658 hfp_blanking_mode = FLD_GET(r, 21, 21); 2659 hbp_blanking_mode = FLD_GET(r, 22, 22); 2660 hsa_blanking_mode = FLD_GET(r, 23, 23); 2661 2662 r = dsi_read_reg(dsi, DSI_VM_TIMING1); 2663 hbp = FLD_GET(r, 11, 0); 2664 hfp = FLD_GET(r, 23, 12); 2665 hsa = FLD_GET(r, 31, 24); 2666 2667 r = dsi_read_reg(dsi, DSI_CLK_TIMING); 2668 ddr_clk_post = FLD_GET(r, 7, 0); 2669 ddr_clk_pre = FLD_GET(r, 15, 8); 2670 2671 r = dsi_read_reg(dsi, DSI_VM_TIMING7); 2672 exit_hs_mode_lat = FLD_GET(r, 15, 0); 2673 enter_hs_mode_lat = FLD_GET(r, 31, 16); 2674 2675 r = dsi_read_reg(dsi, DSI_CLK_CTRL); 2676 lp_clk_div = FLD_GET(r, 12, 0); 2677 ddr_alwon = FLD_GET(r, 13, 13); 2678 2679 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0); 2680 ths_exit = FLD_GET(r, 7, 0); 2681 2682 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1); 2683 tclk_trail = FLD_GET(r, 15, 8); 2684 2685 exiths_clk = ths_exit + tclk_trail; 2686 2687 width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8); 2688 bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl); 2689 2690 if (!hsa_blanking_mode) { 2691 hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon, 2692 enter_hs_mode_lat, exit_hs_mode_lat, 2693 exiths_clk, ddr_clk_pre, ddr_clk_post); 2694 hsa_interleave_lp = dsi_compute_interleave_lp(hsa, 2695 enter_hs_mode_lat, exit_hs_mode_lat, 2696 lp_clk_div, dsi_fclk_hsdiv); 2697 } 2698 2699 if (!hfp_blanking_mode) { 2700 hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon, 2701 enter_hs_mode_lat, exit_hs_mode_lat, 2702 exiths_clk, ddr_clk_pre, ddr_clk_post); 2703 hfp_interleave_lp = dsi_compute_interleave_lp(hfp, 2704 enter_hs_mode_lat, exit_hs_mode_lat, 2705 lp_clk_div, dsi_fclk_hsdiv); 2706 } 2707 2708 if (!hbp_blanking_mode) { 2709 hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon, 2710 enter_hs_mode_lat, exit_hs_mode_lat, 2711 exiths_clk, ddr_clk_pre, ddr_clk_post); 2712 2713 hbp_interleave_lp = dsi_compute_interleave_lp(hbp, 2714 enter_hs_mode_lat, exit_hs_mode_lat, 2715 lp_clk_div, dsi_fclk_hsdiv); 2716 } 2717 2718 if (!blanking_mode) { 2719 bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon, 2720 enter_hs_mode_lat, exit_hs_mode_lat, 2721 exiths_clk, ddr_clk_pre, ddr_clk_post); 2722 2723 bl_interleave_lp = dsi_compute_interleave_lp(bllp, 2724 enter_hs_mode_lat, exit_hs_mode_lat, 2725 lp_clk_div, dsi_fclk_hsdiv); 2726 } 2727 2728 DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n", 2729 hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs, 2730 bl_interleave_hs); 2731 2732 DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n", 2733 hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp, 2734 bl_interleave_lp); 2735 2736 r = dsi_read_reg(dsi, DSI_VM_TIMING4); 2737 r = FLD_MOD(r, hsa_interleave_hs, 23, 16); 2738 r = FLD_MOD(r, hfp_interleave_hs, 15, 8); 2739 r = FLD_MOD(r, hbp_interleave_hs, 7, 0); 2740 dsi_write_reg(dsi, DSI_VM_TIMING4, r); 2741 2742 r = dsi_read_reg(dsi, DSI_VM_TIMING5); 2743 r = FLD_MOD(r, hsa_interleave_lp, 23, 16); 2744 r = FLD_MOD(r, hfp_interleave_lp, 15, 8); 2745 r = FLD_MOD(r, hbp_interleave_lp, 7, 0); 2746 dsi_write_reg(dsi, DSI_VM_TIMING5, r); 2747 2748 r = dsi_read_reg(dsi, DSI_VM_TIMING6); 2749 r = FLD_MOD(r, bl_interleave_hs, 31, 15); 2750 r = FLD_MOD(r, bl_interleave_lp, 16, 0); 2751 dsi_write_reg(dsi, DSI_VM_TIMING6, r); 2752 } 2753 2754 static int dsi_proto_config(struct dsi_data *dsi) 2755 { 2756 u32 r; 2757 int buswidth = 0; 2758 2759 dsi_config_tx_fifo(dsi, DSI_FIFO_SIZE_32, 2760 DSI_FIFO_SIZE_32, 2761 DSI_FIFO_SIZE_32, 2762 DSI_FIFO_SIZE_32); 2763 2764 dsi_config_rx_fifo(dsi, DSI_FIFO_SIZE_32, 2765 DSI_FIFO_SIZE_32, 2766 DSI_FIFO_SIZE_32, 2767 DSI_FIFO_SIZE_32); 2768 2769 /* XXX what values for the timeouts? */ 2770 dsi_set_stop_state_counter(dsi, 0x1000, false, false); 2771 dsi_set_ta_timeout(dsi, 0x1fff, true, true); 2772 dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true); 2773 dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true); 2774 2775 switch (mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt)) { 2776 case 16: 2777 buswidth = 0; 2778 break; 2779 case 18: 2780 buswidth = 1; 2781 break; 2782 case 24: 2783 buswidth = 2; 2784 break; 2785 default: 2786 BUG(); 2787 return -EINVAL; 2788 } 2789 2790 r = dsi_read_reg(dsi, DSI_CTRL); 2791 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ 2792 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ 2793 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ 2794 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/ 2795 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */ 2796 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */ 2797 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */ 2798 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ 2799 if (!(dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC)) { 2800 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ 2801 /* DCS_CMD_CODE, 1=start, 0=continue */ 2802 r = FLD_MOD(r, 0, 25, 25); 2803 } 2804 2805 dsi_write_reg(dsi, DSI_CTRL, r); 2806 2807 dsi_config_vp_num_line_buffers(dsi); 2808 2809 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { 2810 dsi_config_vp_sync_events(dsi); 2811 dsi_config_blanking_modes(dsi); 2812 dsi_config_cmd_mode_interleaving(dsi); 2813 } 2814 2815 dsi_vc_initial_config(dsi, 0); 2816 dsi_vc_initial_config(dsi, 1); 2817 dsi_vc_initial_config(dsi, 2); 2818 dsi_vc_initial_config(dsi, 3); 2819 2820 return 0; 2821 } 2822 2823 static void dsi_proto_timings(struct dsi_data *dsi) 2824 { 2825 unsigned int tlpx, tclk_zero, tclk_prepare; 2826 unsigned int tclk_pre, tclk_post; 2827 unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero; 2828 unsigned int ths_trail, ths_exit; 2829 unsigned int ddr_clk_pre, ddr_clk_post; 2830 unsigned int enter_hs_mode_lat, exit_hs_mode_lat; 2831 unsigned int ths_eot; 2832 int ndl = dsi->num_lanes_used - 1; 2833 u32 r; 2834 2835 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0); 2836 ths_prepare = FLD_GET(r, 31, 24); 2837 ths_prepare_ths_zero = FLD_GET(r, 23, 16); 2838 ths_zero = ths_prepare_ths_zero - ths_prepare; 2839 ths_trail = FLD_GET(r, 15, 8); 2840 ths_exit = FLD_GET(r, 7, 0); 2841 2842 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1); 2843 tlpx = FLD_GET(r, 20, 16) * 2; 2844 tclk_zero = FLD_GET(r, 7, 0); 2845 2846 r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2); 2847 tclk_prepare = FLD_GET(r, 7, 0); 2848 2849 /* min 8*UI */ 2850 tclk_pre = 20; 2851 /* min 60ns + 52*UI */ 2852 tclk_post = ns2ddr(dsi, 60) + 26; 2853 2854 ths_eot = DIV_ROUND_UP(4, ndl); 2855 2856 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, 2857 4); 2858 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot; 2859 2860 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); 2861 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); 2862 2863 r = dsi_read_reg(dsi, DSI_CLK_TIMING); 2864 r = FLD_MOD(r, ddr_clk_pre, 15, 8); 2865 r = FLD_MOD(r, ddr_clk_post, 7, 0); 2866 dsi_write_reg(dsi, DSI_CLK_TIMING, r); 2867 2868 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", 2869 ddr_clk_pre, 2870 ddr_clk_post); 2871 2872 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) + 2873 DIV_ROUND_UP(ths_prepare, 4) + 2874 DIV_ROUND_UP(ths_zero + 3, 4); 2875 2876 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot; 2877 2878 r = FLD_VAL(enter_hs_mode_lat, 31, 16) | 2879 FLD_VAL(exit_hs_mode_lat, 15, 0); 2880 dsi_write_reg(dsi, DSI_VM_TIMING7, r); 2881 2882 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", 2883 enter_hs_mode_lat, exit_hs_mode_lat); 2884 2885 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { 2886 /* TODO: Implement a video mode check_timings function */ 2887 int hsa = dsi->vm_timings.hsa; 2888 int hfp = dsi->vm_timings.hfp; 2889 int hbp = dsi->vm_timings.hbp; 2890 int vsa = dsi->vm_timings.vsa; 2891 int vfp = dsi->vm_timings.vfp; 2892 int vbp = dsi->vm_timings.vbp; 2893 int window_sync = dsi->vm_timings.window_sync; 2894 bool hsync_end; 2895 const struct videomode *vm = &dsi->vm; 2896 int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); 2897 int tl, t_he, width_bytes; 2898 2899 hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE; 2900 t_he = hsync_end ? 2901 ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0; 2902 2903 width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8); 2904 2905 /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */ 2906 tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp + 2907 DIV_ROUND_UP(width_bytes + 6, ndl) + hbp; 2908 2909 DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp, 2910 hfp, hsync_end ? hsa : 0, tl); 2911 DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp, 2912 vsa, vm->vactive); 2913 2914 r = dsi_read_reg(dsi, DSI_VM_TIMING1); 2915 r = FLD_MOD(r, hbp, 11, 0); /* HBP */ 2916 r = FLD_MOD(r, hfp, 23, 12); /* HFP */ 2917 r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */ 2918 dsi_write_reg(dsi, DSI_VM_TIMING1, r); 2919 2920 r = dsi_read_reg(dsi, DSI_VM_TIMING2); 2921 r = FLD_MOD(r, vbp, 7, 0); /* VBP */ 2922 r = FLD_MOD(r, vfp, 15, 8); /* VFP */ 2923 r = FLD_MOD(r, vsa, 23, 16); /* VSA */ 2924 r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */ 2925 dsi_write_reg(dsi, DSI_VM_TIMING2, r); 2926 2927 r = dsi_read_reg(dsi, DSI_VM_TIMING3); 2928 r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */ 2929 r = FLD_MOD(r, tl, 31, 16); /* TL */ 2930 dsi_write_reg(dsi, DSI_VM_TIMING3, r); 2931 } 2932 } 2933 2934 static int dsi_configure_pins(struct dsi_data *dsi, 2935 int num_pins, const u32 *pins) 2936 { 2937 struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; 2938 int num_lanes; 2939 int i; 2940 2941 static const enum dsi_lane_function functions[] = { 2942 DSI_LANE_CLK, 2943 DSI_LANE_DATA1, 2944 DSI_LANE_DATA2, 2945 DSI_LANE_DATA3, 2946 DSI_LANE_DATA4, 2947 }; 2948 2949 if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2 2950 || num_pins % 2 != 0) 2951 return -EINVAL; 2952 2953 for (i = 0; i < DSI_MAX_NR_LANES; ++i) 2954 lanes[i].function = DSI_LANE_UNUSED; 2955 2956 num_lanes = 0; 2957 2958 for (i = 0; i < num_pins; i += 2) { 2959 u8 lane, pol; 2960 u32 dx, dy; 2961 2962 dx = pins[i]; 2963 dy = pins[i + 1]; 2964 2965 if (dx >= dsi->num_lanes_supported * 2) 2966 return -EINVAL; 2967 2968 if (dy >= dsi->num_lanes_supported * 2) 2969 return -EINVAL; 2970 2971 if (dx & 1) { 2972 if (dy != dx - 1) 2973 return -EINVAL; 2974 pol = 1; 2975 } else { 2976 if (dy != dx + 1) 2977 return -EINVAL; 2978 pol = 0; 2979 } 2980 2981 lane = dx / 2; 2982 2983 lanes[lane].function = functions[i / 2]; 2984 lanes[lane].polarity = pol; 2985 num_lanes++; 2986 } 2987 2988 memcpy(dsi->lanes, lanes, sizeof(dsi->lanes)); 2989 dsi->num_lanes_used = num_lanes; 2990 2991 return 0; 2992 } 2993 2994 static int dsi_enable_video_mode(struct dsi_data *dsi, int vc) 2995 { 2996 int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); 2997 u8 data_type; 2998 u16 word_count; 2999 3000 switch (dsi->pix_fmt) { 3001 case MIPI_DSI_FMT_RGB888: 3002 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; 3003 break; 3004 case MIPI_DSI_FMT_RGB666: 3005 data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; 3006 break; 3007 case MIPI_DSI_FMT_RGB666_PACKED: 3008 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; 3009 break; 3010 case MIPI_DSI_FMT_RGB565: 3011 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; 3012 break; 3013 default: 3014 return -EINVAL; 3015 } 3016 3017 dsi_if_enable(dsi, false); 3018 dsi_vc_enable(dsi, vc, false); 3019 3020 /* MODE, 1 = video mode */ 3021 REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 4, 4); 3022 3023 word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8); 3024 3025 dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, data_type, 3026 word_count, 0); 3027 3028 dsi_vc_enable(dsi, vc, true); 3029 dsi_if_enable(dsi, true); 3030 3031 return 0; 3032 } 3033 3034 static void dsi_disable_video_mode(struct dsi_data *dsi, int vc) 3035 { 3036 dsi_if_enable(dsi, false); 3037 dsi_vc_enable(dsi, vc, false); 3038 3039 /* MODE, 0 = command mode */ 3040 REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 0, 4, 4); 3041 3042 dsi_vc_enable(dsi, vc, true); 3043 dsi_if_enable(dsi, true); 3044 } 3045 3046 static void dsi_enable_video_output(struct omap_dss_device *dssdev, int vc) 3047 { 3048 struct dsi_data *dsi = to_dsi_data(dssdev); 3049 int r; 3050 3051 r = dsi_init_dispc(dsi); 3052 if (r) { 3053 dev_err(dsi->dev, "failed to init dispc!\n"); 3054 return; 3055 } 3056 3057 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { 3058 r = dsi_enable_video_mode(dsi, vc); 3059 if (r) 3060 goto err_video_mode; 3061 } 3062 3063 r = dss_mgr_enable(&dsi->output); 3064 if (r) 3065 goto err_mgr_enable; 3066 3067 return; 3068 3069 err_mgr_enable: 3070 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { 3071 dsi_if_enable(dsi, false); 3072 dsi_vc_enable(dsi, vc, false); 3073 } 3074 err_video_mode: 3075 dsi_uninit_dispc(dsi); 3076 dev_err(dsi->dev, "failed to enable DSI encoder!\n"); 3077 return; 3078 } 3079 3080 static void dsi_disable_video_output(struct omap_dss_device *dssdev, int vc) 3081 { 3082 struct dsi_data *dsi = to_dsi_data(dssdev); 3083 3084 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) 3085 dsi_disable_video_mode(dsi, vc); 3086 3087 dss_mgr_disable(&dsi->output); 3088 3089 dsi_uninit_dispc(dsi); 3090 } 3091 3092 static void dsi_update_screen_dispc(struct dsi_data *dsi) 3093 { 3094 unsigned int bytespp; 3095 unsigned int bytespl; 3096 unsigned int bytespf; 3097 unsigned int total_len; 3098 unsigned int packet_payload; 3099 unsigned int packet_len; 3100 u32 l; 3101 int r; 3102 const unsigned vc = dsi->update_vc; 3103 const unsigned int line_buf_size = dsi->line_buffer_size; 3104 u16 w = dsi->vm.hactive; 3105 u16 h = dsi->vm.vactive; 3106 3107 DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); 3108 3109 bytespp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8; 3110 bytespl = w * bytespp; 3111 bytespf = bytespl * h; 3112 3113 /* NOTE: packet_payload has to be equal to N * bytespl, where N is 3114 * number of lines in a packet. See errata about VP_CLK_RATIO */ 3115 3116 if (bytespf < line_buf_size) 3117 packet_payload = bytespf; 3118 else 3119 packet_payload = (line_buf_size) / bytespl * bytespl; 3120 3121 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */ 3122 total_len = (bytespf / packet_payload) * packet_len; 3123 3124 if (bytespf % packet_payload) 3125 total_len += (bytespf % packet_payload) + 1; 3126 3127 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ 3128 dsi_write_reg(dsi, DSI_VC_TE(vc), l); 3129 3130 dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, MIPI_DSI_DCS_LONG_WRITE, 3131 packet_len, 0); 3132 3133 if (dsi->te_enabled) 3134 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ 3135 else 3136 l = FLD_MOD(l, 1, 31, 31); /* TE_START */ 3137 dsi_write_reg(dsi, DSI_VC_TE(vc), l); 3138 3139 /* We put SIDLEMODE to no-idle for the duration of the transfer, 3140 * because DSS interrupts are not capable of waking up the CPU and the 3141 * framedone interrupt could be delayed for quite a long time. I think 3142 * the same goes for any DSS interrupts, but for some reason I have not 3143 * seen the problem anywhere else than here. 3144 */ 3145 dispc_disable_sidle(dsi->dss->dispc); 3146 3147 dsi_perf_mark_start(dsi); 3148 3149 r = schedule_delayed_work(&dsi->framedone_timeout_work, 3150 msecs_to_jiffies(250)); 3151 BUG_ON(r == 0); 3152 3153 dss_mgr_start_update(&dsi->output); 3154 3155 if (dsi->te_enabled) { 3156 /* disable LP_RX_TO, so that we can receive TE. Time to wait 3157 * for TE is longer than the timer allows */ 3158 REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ 3159 3160 dsi_vc_send_bta(dsi, vc); 3161 3162 #ifdef DSI_CATCH_MISSING_TE 3163 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250)); 3164 #endif 3165 } 3166 } 3167 3168 #ifdef DSI_CATCH_MISSING_TE 3169 static void dsi_te_timeout(struct timer_list *unused) 3170 { 3171 DSSERR("TE not received for 250ms!\n"); 3172 } 3173 #endif 3174 3175 static void dsi_handle_framedone(struct dsi_data *dsi, int error) 3176 { 3177 /* SIDLEMODE back to smart-idle */ 3178 dispc_enable_sidle(dsi->dss->dispc); 3179 3180 if (dsi->te_enabled) { 3181 /* enable LP_RX_TO again after the TE */ 3182 REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ 3183 } 3184 3185 dsi_bus_unlock(dsi); 3186 3187 if (!error) 3188 dsi_perf_show(dsi, "DISPC"); 3189 } 3190 3191 static void dsi_framedone_timeout_work_callback(struct work_struct *work) 3192 { 3193 struct dsi_data *dsi = container_of(work, struct dsi_data, 3194 framedone_timeout_work.work); 3195 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after 3196 * 250ms which would conflict with this timeout work. What should be 3197 * done is first cancel the transfer on the HW, and then cancel the 3198 * possibly scheduled framedone work. However, cancelling the transfer 3199 * on the HW is buggy, and would probably require resetting the whole 3200 * DSI */ 3201 3202 DSSERR("Framedone not received for 250ms!\n"); 3203 3204 dsi_handle_framedone(dsi, -ETIMEDOUT); 3205 } 3206 3207 static void dsi_framedone_irq_callback(void *data) 3208 { 3209 struct dsi_data *dsi = data; 3210 3211 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and 3212 * turns itself off. However, DSI still has the pixels in its buffers, 3213 * and is sending the data. 3214 */ 3215 3216 cancel_delayed_work(&dsi->framedone_timeout_work); 3217 3218 DSSDBG("Framedone received!\n"); 3219 3220 dsi_handle_framedone(dsi, 0); 3221 } 3222 3223 static int _dsi_update(struct dsi_data *dsi) 3224 { 3225 dsi_perf_mark_setup(dsi); 3226 3227 #ifdef DSI_PERF_MEASURE 3228 dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive * 3229 mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8; 3230 #endif 3231 dsi_update_screen_dispc(dsi); 3232 3233 return 0; 3234 } 3235 3236 static int _dsi_send_nop(struct dsi_data *dsi, int vc, int channel) 3237 { 3238 const u8 payload[] = { MIPI_DCS_NOP }; 3239 const struct mipi_dsi_msg msg = { 3240 .channel = channel, 3241 .type = MIPI_DSI_DCS_SHORT_WRITE, 3242 .tx_len = 1, 3243 .tx_buf = payload, 3244 }; 3245 3246 WARN_ON(!dsi_bus_is_locked(dsi)); 3247 3248 return _omap_dsi_host_transfer(dsi, vc, &msg); 3249 } 3250 3251 static int dsi_update_channel(struct omap_dss_device *dssdev, int vc) 3252 { 3253 struct dsi_data *dsi = to_dsi_data(dssdev); 3254 int r; 3255 3256 dsi_bus_lock(dsi); 3257 3258 if (!dsi->video_enabled) { 3259 r = -EIO; 3260 goto err; 3261 } 3262 3263 if (dsi->vm.hactive == 0 || dsi->vm.vactive == 0) { 3264 r = -EINVAL; 3265 goto err; 3266 } 3267 3268 DSSDBG("dsi_update_channel: %d", vc); 3269 3270 /* 3271 * Send NOP between the frames. If we don't send something here, the 3272 * updates stop working. This is probably related to DSI spec stating 3273 * that the DSI host should transition to LP at least once per frame. 3274 */ 3275 r = _dsi_send_nop(dsi, VC_CMD, dsi->dsidev->channel); 3276 if (r < 0) { 3277 DSSWARN("failed to send nop between frames: %d\n", r); 3278 goto err; 3279 } 3280 3281 dsi->update_vc = vc; 3282 3283 if (dsi->te_enabled && dsi->te_gpio) { 3284 schedule_delayed_work(&dsi->te_timeout_work, 3285 msecs_to_jiffies(250)); 3286 atomic_set(&dsi->do_ext_te_update, 1); 3287 } else { 3288 _dsi_update(dsi); 3289 } 3290 3291 return 0; 3292 3293 err: 3294 dsi_bus_unlock(dsi); 3295 return r; 3296 } 3297 3298 static int dsi_update_all(struct omap_dss_device *dssdev) 3299 { 3300 return dsi_update_channel(dssdev, VC_VIDEO); 3301 } 3302 3303 /* Display funcs */ 3304 3305 static int dsi_configure_dispc_clocks(struct dsi_data *dsi) 3306 { 3307 struct dispc_clock_info dispc_cinfo; 3308 int r; 3309 unsigned long fck; 3310 3311 fck = dsi_get_pll_hsdiv_dispc_rate(dsi); 3312 3313 dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div; 3314 dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div; 3315 3316 r = dispc_calc_clock_rates(dsi->dss->dispc, fck, &dispc_cinfo); 3317 if (r) { 3318 DSSERR("Failed to calc dispc clocks\n"); 3319 return r; 3320 } 3321 3322 dsi->mgr_config.clock_info = dispc_cinfo; 3323 3324 return 0; 3325 } 3326 3327 static int dsi_init_dispc(struct dsi_data *dsi) 3328 { 3329 enum omap_channel dispc_channel = dsi->output.dispc_channel; 3330 int r; 3331 3332 dss_select_lcd_clk_source(dsi->dss, dispc_channel, dsi->module_id == 0 ? 3333 DSS_CLK_SRC_PLL1_1 : 3334 DSS_CLK_SRC_PLL2_1); 3335 3336 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { 3337 r = dss_mgr_register_framedone_handler(&dsi->output, 3338 dsi_framedone_irq_callback, dsi); 3339 if (r) { 3340 DSSERR("can't register FRAMEDONE handler\n"); 3341 goto err; 3342 } 3343 3344 dsi->mgr_config.stallmode = true; 3345 dsi->mgr_config.fifohandcheck = true; 3346 } else { 3347 dsi->mgr_config.stallmode = false; 3348 dsi->mgr_config.fifohandcheck = false; 3349 } 3350 3351 r = dsi_configure_dispc_clocks(dsi); 3352 if (r) 3353 goto err1; 3354 3355 dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; 3356 dsi->mgr_config.video_port_width = 3357 mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); 3358 dsi->mgr_config.lcden_sig_polarity = 0; 3359 3360 dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config); 3361 3362 return 0; 3363 err1: 3364 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) 3365 dss_mgr_unregister_framedone_handler(&dsi->output, 3366 dsi_framedone_irq_callback, dsi); 3367 err: 3368 dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK); 3369 return r; 3370 } 3371 3372 static void dsi_uninit_dispc(struct dsi_data *dsi) 3373 { 3374 enum omap_channel dispc_channel = dsi->output.dispc_channel; 3375 3376 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) 3377 dss_mgr_unregister_framedone_handler(&dsi->output, 3378 dsi_framedone_irq_callback, dsi); 3379 3380 dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK); 3381 } 3382 3383 static int dsi_configure_dsi_clocks(struct dsi_data *dsi) 3384 { 3385 struct dss_pll_clock_info cinfo; 3386 int r; 3387 3388 cinfo = dsi->user_dsi_cinfo; 3389 3390 r = dss_pll_set_config(&dsi->pll, &cinfo); 3391 if (r) { 3392 DSSERR("Failed to set dsi clocks\n"); 3393 return r; 3394 } 3395 3396 return 0; 3397 } 3398 3399 static void dsi_setup_dsi_vcs(struct dsi_data *dsi) 3400 { 3401 /* Setup VC_CMD for LP and cpu transfers */ 3402 REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 9, 9); /* LP */ 3403 3404 REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 1, 1); /* SOURCE_L4 */ 3405 dsi->vc[VC_CMD].source = DSI_VC_SOURCE_L4; 3406 3407 /* Setup VC_VIDEO for HS and dispc transfers */ 3408 REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 9, 9); /* HS */ 3409 3410 REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 1, 1); /* SOURCE_VP */ 3411 dsi->vc[VC_VIDEO].source = DSI_VC_SOURCE_VP; 3412 3413 if ((dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) && 3414 !(dsi->dsidev->mode_flags & MIPI_DSI_MODE_VIDEO)) 3415 REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 30, 30); /* DCS_CMD_ENABLE */ 3416 3417 dsi_vc_enable(dsi, VC_CMD, 1); 3418 dsi_vc_enable(dsi, VC_VIDEO, 1); 3419 3420 dsi_if_enable(dsi, 1); 3421 3422 dsi_force_tx_stop_mode_io(dsi); 3423 3424 /* start the DDR clock by sending a NULL packet */ 3425 if (!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) 3426 dsi_vc_send_null(dsi, VC_CMD, dsi->dsidev->channel); 3427 } 3428 3429 static int dsi_init_dsi(struct dsi_data *dsi) 3430 { 3431 int r; 3432 3433 r = dss_pll_enable(&dsi->pll); 3434 if (r) 3435 return r; 3436 3437 r = dsi_configure_dsi_clocks(dsi); 3438 if (r) 3439 goto err0; 3440 3441 dss_select_dsi_clk_source(dsi->dss, dsi->module_id, 3442 dsi->module_id == 0 ? 3443 DSS_CLK_SRC_PLL1_2 : DSS_CLK_SRC_PLL2_2); 3444 3445 DSSDBG("PLL OK\n"); 3446 3447 if (!dsi->vdds_dsi_enabled) { 3448 r = regulator_enable(dsi->vdds_dsi_reg); 3449 if (r) 3450 goto err1; 3451 3452 dsi->vdds_dsi_enabled = true; 3453 } 3454 3455 r = dsi_cio_init(dsi); 3456 if (r) 3457 goto err2; 3458 3459 _dsi_print_reset_status(dsi); 3460 3461 dsi_proto_timings(dsi); 3462 dsi_set_lp_clk_divisor(dsi); 3463 3464 if (1) 3465 _dsi_print_reset_status(dsi); 3466 3467 r = dsi_proto_config(dsi); 3468 if (r) 3469 goto err3; 3470 3471 dsi_setup_dsi_vcs(dsi); 3472 3473 return 0; 3474 err3: 3475 dsi_cio_uninit(dsi); 3476 err2: 3477 regulator_disable(dsi->vdds_dsi_reg); 3478 dsi->vdds_dsi_enabled = false; 3479 err1: 3480 dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); 3481 err0: 3482 dss_pll_disable(&dsi->pll); 3483 3484 return r; 3485 } 3486 3487 static void dsi_uninit_dsi(struct dsi_data *dsi) 3488 { 3489 /* disable interface */ 3490 dsi_if_enable(dsi, 0); 3491 dsi_vc_enable(dsi, 0, 0); 3492 dsi_vc_enable(dsi, 1, 0); 3493 dsi_vc_enable(dsi, 2, 0); 3494 dsi_vc_enable(dsi, 3, 0); 3495 3496 dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); 3497 dsi_cio_uninit(dsi); 3498 dss_pll_disable(&dsi->pll); 3499 3500 regulator_disable(dsi->vdds_dsi_reg); 3501 dsi->vdds_dsi_enabled = false; 3502 } 3503 3504 static void dsi_enable(struct dsi_data *dsi) 3505 { 3506 int r; 3507 3508 WARN_ON(!dsi_bus_is_locked(dsi)); 3509 3510 if (WARN_ON(dsi->iface_enabled)) 3511 return; 3512 3513 mutex_lock(&dsi->lock); 3514 3515 r = dsi_runtime_get(dsi); 3516 if (r) 3517 goto err_get_dsi; 3518 3519 _dsi_initialize_irq(dsi); 3520 3521 r = dsi_init_dsi(dsi); 3522 if (r) 3523 goto err_init_dsi; 3524 3525 dsi->iface_enabled = true; 3526 3527 mutex_unlock(&dsi->lock); 3528 3529 return; 3530 3531 err_init_dsi: 3532 dsi_runtime_put(dsi); 3533 err_get_dsi: 3534 mutex_unlock(&dsi->lock); 3535 DSSDBG("dsi_enable FAILED\n"); 3536 } 3537 3538 static void dsi_disable(struct dsi_data *dsi) 3539 { 3540 WARN_ON(!dsi_bus_is_locked(dsi)); 3541 3542 if (WARN_ON(!dsi->iface_enabled)) 3543 return; 3544 3545 mutex_lock(&dsi->lock); 3546 3547 dsi_sync_vc(dsi, 0); 3548 dsi_sync_vc(dsi, 1); 3549 dsi_sync_vc(dsi, 2); 3550 dsi_sync_vc(dsi, 3); 3551 3552 dsi_uninit_dsi(dsi); 3553 3554 dsi_runtime_put(dsi); 3555 3556 dsi->iface_enabled = false; 3557 3558 mutex_unlock(&dsi->lock); 3559 } 3560 3561 static int dsi_enable_te(struct dsi_data *dsi, bool enable) 3562 { 3563 dsi->te_enabled = enable; 3564 3565 if (dsi->te_gpio) { 3566 if (enable) 3567 enable_irq(dsi->te_irq); 3568 else 3569 disable_irq(dsi->te_irq); 3570 } 3571 3572 return 0; 3573 } 3574 3575 #ifdef PRINT_VERBOSE_VM_TIMINGS 3576 static void print_dsi_vm(const char *str, 3577 const struct omap_dss_dsi_videomode_timings *t) 3578 { 3579 unsigned long byteclk = t->hsclk / 4; 3580 int bl, wc, pps, tot; 3581 3582 wc = DIV_ROUND_UP(t->hact * t->bitspp, 8); 3583 pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */ 3584 bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp; 3585 tot = bl + pps; 3586 3587 #define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk)) 3588 3589 pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, " 3590 "%u/%u/%u/%u/%u/%u = %u + %u = %u\n", 3591 str, 3592 byteclk, 3593 t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp, 3594 bl, pps, tot, 3595 TO_DSI_T(t->hss), 3596 TO_DSI_T(t->hsa), 3597 TO_DSI_T(t->hse), 3598 TO_DSI_T(t->hbp), 3599 TO_DSI_T(pps), 3600 TO_DSI_T(t->hfp), 3601 3602 TO_DSI_T(bl), 3603 TO_DSI_T(pps), 3604 3605 TO_DSI_T(tot)); 3606 #undef TO_DSI_T 3607 } 3608 3609 static void print_dispc_vm(const char *str, const struct videomode *vm) 3610 { 3611 unsigned long pck = vm->pixelclock; 3612 int hact, bl, tot; 3613 3614 hact = vm->hactive; 3615 bl = vm->hsync_len + vm->hback_porch + vm->hfront_porch; 3616 tot = hact + bl; 3617 3618 #define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck)) 3619 3620 pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, " 3621 "%u/%u/%u/%u = %u + %u = %u\n", 3622 str, 3623 pck, 3624 vm->hsync_len, vm->hback_porch, hact, vm->hfront_porch, 3625 bl, hact, tot, 3626 TO_DISPC_T(vm->hsync_len), 3627 TO_DISPC_T(vm->hback_porch), 3628 TO_DISPC_T(hact), 3629 TO_DISPC_T(vm->hfront_porch), 3630 TO_DISPC_T(bl), 3631 TO_DISPC_T(hact), 3632 TO_DISPC_T(tot)); 3633 #undef TO_DISPC_T 3634 } 3635 3636 /* note: this is not quite accurate */ 3637 static void print_dsi_dispc_vm(const char *str, 3638 const struct omap_dss_dsi_videomode_timings *t) 3639 { 3640 struct videomode vm = { 0 }; 3641 unsigned long byteclk = t->hsclk / 4; 3642 unsigned long pck; 3643 u64 dsi_tput; 3644 int dsi_hact, dsi_htot; 3645 3646 dsi_tput = (u64)byteclk * t->ndl * 8; 3647 pck = (u32)div64_u64(dsi_tput, t->bitspp); 3648 dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl); 3649 dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp; 3650 3651 vm.pixelclock = pck; 3652 vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk); 3653 vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk); 3654 vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk); 3655 vm.hactive = t->hact; 3656 3657 print_dispc_vm(str, &vm); 3658 } 3659 #endif /* PRINT_VERBOSE_VM_TIMINGS */ 3660 3661 static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck, 3662 unsigned long pck, void *data) 3663 { 3664 struct dsi_clk_calc_ctx *ctx = data; 3665 struct videomode *vm = &ctx->vm; 3666 3667 ctx->dispc_cinfo.lck_div = lckd; 3668 ctx->dispc_cinfo.pck_div = pckd; 3669 ctx->dispc_cinfo.lck = lck; 3670 ctx->dispc_cinfo.pck = pck; 3671 3672 *vm = *ctx->config->vm; 3673 vm->pixelclock = pck; 3674 vm->hactive = ctx->config->vm->hactive; 3675 vm->vactive = ctx->config->vm->vactive; 3676 vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1; 3677 vm->vfront_porch = vm->vback_porch = 0; 3678 3679 return true; 3680 } 3681 3682 static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc, 3683 void *data) 3684 { 3685 struct dsi_clk_calc_ctx *ctx = data; 3686 3687 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; 3688 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; 3689 3690 return dispc_div_calc(ctx->dsi->dss->dispc, dispc, 3691 ctx->req_pck_min, ctx->req_pck_max, 3692 dsi_cm_calc_dispc_cb, ctx); 3693 } 3694 3695 static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, 3696 unsigned long clkdco, void *data) 3697 { 3698 struct dsi_clk_calc_ctx *ctx = data; 3699 struct dsi_data *dsi = ctx->dsi; 3700 3701 ctx->dsi_cinfo.n = n; 3702 ctx->dsi_cinfo.m = m; 3703 ctx->dsi_cinfo.fint = fint; 3704 ctx->dsi_cinfo.clkdco = clkdco; 3705 3706 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, 3707 dsi->data->max_fck_freq, 3708 dsi_cm_calc_hsdiv_cb, ctx); 3709 } 3710 3711 static bool dsi_cm_calc(struct dsi_data *dsi, 3712 const struct omap_dss_dsi_config *cfg, 3713 struct dsi_clk_calc_ctx *ctx) 3714 { 3715 unsigned long clkin; 3716 int bitspp, ndl; 3717 unsigned long pll_min, pll_max; 3718 unsigned long pck, txbyteclk; 3719 3720 clkin = clk_get_rate(dsi->pll.clkin); 3721 bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format); 3722 ndl = dsi->num_lanes_used - 1; 3723 3724 /* 3725 * Here we should calculate minimum txbyteclk to be able to send the 3726 * frame in time, and also to handle TE. That's not very simple, though, 3727 * especially as we go to LP between each pixel packet due to HW 3728 * "feature". So let's just estimate very roughly and multiply by 1.5. 3729 */ 3730 pck = cfg->vm->pixelclock; 3731 pck = pck * 3 / 2; 3732 txbyteclk = pck * bitspp / 8 / ndl; 3733 3734 memset(ctx, 0, sizeof(*ctx)); 3735 ctx->dsi = dsi; 3736 ctx->pll = &dsi->pll; 3737 ctx->config = cfg; 3738 ctx->req_pck_min = pck; 3739 ctx->req_pck_nom = pck; 3740 ctx->req_pck_max = pck * 3 / 2; 3741 3742 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); 3743 pll_max = cfg->hs_clk_max * 4; 3744 3745 return dss_pll_calc_a(ctx->pll, clkin, 3746 pll_min, pll_max, 3747 dsi_cm_calc_pll_cb, ctx); 3748 } 3749 3750 static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx) 3751 { 3752 struct dsi_data *dsi = ctx->dsi; 3753 const struct omap_dss_dsi_config *cfg = ctx->config; 3754 int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format); 3755 int ndl = dsi->num_lanes_used - 1; 3756 unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4; 3757 unsigned long byteclk = hsclk / 4; 3758 3759 unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max; 3760 int xres; 3761 int panel_htot, panel_hbl; /* pixels */ 3762 int dispc_htot, dispc_hbl; /* pixels */ 3763 int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */ 3764 int hfp, hsa, hbp; 3765 const struct videomode *req_vm; 3766 struct videomode *dispc_vm; 3767 struct omap_dss_dsi_videomode_timings *dsi_vm; 3768 u64 dsi_tput, dispc_tput; 3769 3770 dsi_tput = (u64)byteclk * ndl * 8; 3771 3772 req_vm = cfg->vm; 3773 req_pck_min = ctx->req_pck_min; 3774 req_pck_max = ctx->req_pck_max; 3775 req_pck_nom = ctx->req_pck_nom; 3776 3777 dispc_pck = ctx->dispc_cinfo.pck; 3778 dispc_tput = (u64)dispc_pck * bitspp; 3779 3780 xres = req_vm->hactive; 3781 3782 panel_hbl = req_vm->hfront_porch + req_vm->hback_porch + 3783 req_vm->hsync_len; 3784 panel_htot = xres + panel_hbl; 3785 3786 dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl); 3787 3788 /* 3789 * When there are no line buffers, DISPC and DSI must have the 3790 * same tput. Otherwise DISPC tput needs to be higher than DSI's. 3791 */ 3792 if (dsi->line_buffer_size < xres * bitspp / 8) { 3793 if (dispc_tput != dsi_tput) 3794 return false; 3795 } else { 3796 if (dispc_tput < dsi_tput) 3797 return false; 3798 } 3799 3800 /* DSI tput must be over the min requirement */ 3801 if (dsi_tput < (u64)bitspp * req_pck_min) 3802 return false; 3803 3804 /* When non-burst mode, DSI tput must be below max requirement. */ 3805 if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) { 3806 if (dsi_tput > (u64)bitspp * req_pck_max) 3807 return false; 3808 } 3809 3810 hss = DIV_ROUND_UP(4, ndl); 3811 3812 if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) { 3813 if (ndl == 3 && req_vm->hsync_len == 0) 3814 hse = 1; 3815 else 3816 hse = DIV_ROUND_UP(4, ndl); 3817 } else { 3818 hse = 0; 3819 } 3820 3821 /* DSI htot to match the panel's nominal pck */ 3822 dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom); 3823 3824 /* fail if there would be no time for blanking */ 3825 if (dsi_htot < hss + hse + dsi_hact) 3826 return false; 3827 3828 /* total DSI blanking needed to achieve panel's TL */ 3829 dsi_hbl = dsi_htot - dsi_hact; 3830 3831 /* DISPC htot to match the DSI TL */ 3832 dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk); 3833 3834 /* verify that the DSI and DISPC TLs are the same */ 3835 if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk) 3836 return false; 3837 3838 dispc_hbl = dispc_htot - xres; 3839 3840 /* setup DSI videomode */ 3841 3842 dsi_vm = &ctx->dsi_vm; 3843 memset(dsi_vm, 0, sizeof(*dsi_vm)); 3844 3845 dsi_vm->hsclk = hsclk; 3846 3847 dsi_vm->ndl = ndl; 3848 dsi_vm->bitspp = bitspp; 3849 3850 if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) { 3851 hsa = 0; 3852 } else if (ndl == 3 && req_vm->hsync_len == 0) { 3853 hsa = 0; 3854 } else { 3855 hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom); 3856 hsa = max(hsa - hse, 1); 3857 } 3858 3859 hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom); 3860 hbp = max(hbp, 1); 3861 3862 hfp = dsi_hbl - (hss + hsa + hse + hbp); 3863 if (hfp < 1) { 3864 int t; 3865 /* we need to take cycles from hbp */ 3866 3867 t = 1 - hfp; 3868 hbp = max(hbp - t, 1); 3869 hfp = dsi_hbl - (hss + hsa + hse + hbp); 3870 3871 if (hfp < 1 && hsa > 0) { 3872 /* we need to take cycles from hsa */ 3873 t = 1 - hfp; 3874 hsa = max(hsa - t, 1); 3875 hfp = dsi_hbl - (hss + hsa + hse + hbp); 3876 } 3877 } 3878 3879 if (hfp < 1) 3880 return false; 3881 3882 dsi_vm->hss = hss; 3883 dsi_vm->hsa = hsa; 3884 dsi_vm->hse = hse; 3885 dsi_vm->hbp = hbp; 3886 dsi_vm->hact = xres; 3887 dsi_vm->hfp = hfp; 3888 3889 dsi_vm->vsa = req_vm->vsync_len; 3890 dsi_vm->vbp = req_vm->vback_porch; 3891 dsi_vm->vact = req_vm->vactive; 3892 dsi_vm->vfp = req_vm->vfront_porch; 3893 3894 dsi_vm->trans_mode = cfg->trans_mode; 3895 3896 dsi_vm->blanking_mode = 0; 3897 dsi_vm->hsa_blanking_mode = 1; 3898 dsi_vm->hfp_blanking_mode = 1; 3899 dsi_vm->hbp_blanking_mode = 1; 3900 3901 dsi_vm->window_sync = 4; 3902 3903 /* setup DISPC videomode */ 3904 3905 dispc_vm = &ctx->vm; 3906 *dispc_vm = *req_vm; 3907 dispc_vm->pixelclock = dispc_pck; 3908 3909 if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) { 3910 hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck, 3911 req_pck_nom); 3912 hsa = max(hsa, 1); 3913 } else { 3914 hsa = 1; 3915 } 3916 3917 hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom); 3918 hbp = max(hbp, 1); 3919 3920 hfp = dispc_hbl - hsa - hbp; 3921 if (hfp < 1) { 3922 int t; 3923 /* we need to take cycles from hbp */ 3924 3925 t = 1 - hfp; 3926 hbp = max(hbp - t, 1); 3927 hfp = dispc_hbl - hsa - hbp; 3928 3929 if (hfp < 1) { 3930 /* we need to take cycles from hsa */ 3931 t = 1 - hfp; 3932 hsa = max(hsa - t, 1); 3933 hfp = dispc_hbl - hsa - hbp; 3934 } 3935 } 3936 3937 if (hfp < 1) 3938 return false; 3939 3940 dispc_vm->hfront_porch = hfp; 3941 dispc_vm->hsync_len = hsa; 3942 dispc_vm->hback_porch = hbp; 3943 3944 return true; 3945 } 3946 3947 3948 static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck, 3949 unsigned long pck, void *data) 3950 { 3951 struct dsi_clk_calc_ctx *ctx = data; 3952 3953 ctx->dispc_cinfo.lck_div = lckd; 3954 ctx->dispc_cinfo.pck_div = pckd; 3955 ctx->dispc_cinfo.lck = lck; 3956 ctx->dispc_cinfo.pck = pck; 3957 3958 if (dsi_vm_calc_blanking(ctx) == false) 3959 return false; 3960 3961 #ifdef PRINT_VERBOSE_VM_TIMINGS 3962 print_dispc_vm("dispc", &ctx->vm); 3963 print_dsi_vm("dsi ", &ctx->dsi_vm); 3964 print_dispc_vm("req ", ctx->config->vm); 3965 print_dsi_dispc_vm("act ", &ctx->dsi_vm); 3966 #endif 3967 3968 return true; 3969 } 3970 3971 static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc, 3972 void *data) 3973 { 3974 struct dsi_clk_calc_ctx *ctx = data; 3975 unsigned long pck_max; 3976 3977 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; 3978 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; 3979 3980 /* 3981 * In burst mode we can let the dispc pck be arbitrarily high, but it 3982 * limits our scaling abilities. So for now, don't aim too high. 3983 */ 3984 3985 if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE) 3986 pck_max = ctx->req_pck_max + 10000000; 3987 else 3988 pck_max = ctx->req_pck_max; 3989 3990 return dispc_div_calc(ctx->dsi->dss->dispc, dispc, 3991 ctx->req_pck_min, pck_max, 3992 dsi_vm_calc_dispc_cb, ctx); 3993 } 3994 3995 static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, 3996 unsigned long clkdco, void *data) 3997 { 3998 struct dsi_clk_calc_ctx *ctx = data; 3999 struct dsi_data *dsi = ctx->dsi; 4000 4001 ctx->dsi_cinfo.n = n; 4002 ctx->dsi_cinfo.m = m; 4003 ctx->dsi_cinfo.fint = fint; 4004 ctx->dsi_cinfo.clkdco = clkdco; 4005 4006 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, 4007 dsi->data->max_fck_freq, 4008 dsi_vm_calc_hsdiv_cb, ctx); 4009 } 4010 4011 static bool dsi_vm_calc(struct dsi_data *dsi, 4012 const struct omap_dss_dsi_config *cfg, 4013 struct dsi_clk_calc_ctx *ctx) 4014 { 4015 const struct videomode *vm = cfg->vm; 4016 unsigned long clkin; 4017 unsigned long pll_min; 4018 unsigned long pll_max; 4019 int ndl = dsi->num_lanes_used - 1; 4020 int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format); 4021 unsigned long byteclk_min; 4022 4023 clkin = clk_get_rate(dsi->pll.clkin); 4024 4025 memset(ctx, 0, sizeof(*ctx)); 4026 ctx->dsi = dsi; 4027 ctx->pll = &dsi->pll; 4028 ctx->config = cfg; 4029 4030 /* these limits should come from the panel driver */ 4031 ctx->req_pck_min = vm->pixelclock - 1000; 4032 ctx->req_pck_nom = vm->pixelclock; 4033 ctx->req_pck_max = vm->pixelclock + 1000; 4034 4035 byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8); 4036 pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4); 4037 4038 if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) { 4039 pll_max = cfg->hs_clk_max * 4; 4040 } else { 4041 unsigned long byteclk_max; 4042 byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp, 4043 ndl * 8); 4044 4045 pll_max = byteclk_max * 4 * 4; 4046 } 4047 4048 return dss_pll_calc_a(ctx->pll, clkin, 4049 pll_min, pll_max, 4050 dsi_vm_calc_pll_cb, ctx); 4051 } 4052 4053 static bool dsi_is_video_mode(struct omap_dss_device *dssdev) 4054 { 4055 struct dsi_data *dsi = to_dsi_data(dssdev); 4056 4057 return dsi->mode == OMAP_DSS_DSI_VIDEO_MODE; 4058 } 4059 4060 static int __dsi_calc_config(struct dsi_data *dsi, 4061 const struct drm_display_mode *mode, 4062 struct dsi_clk_calc_ctx *ctx) 4063 { 4064 struct omap_dss_dsi_config cfg = dsi->config; 4065 struct videomode vm; 4066 bool ok; 4067 int r; 4068 4069 drm_display_mode_to_videomode(mode, &vm); 4070 4071 cfg.vm = &vm; 4072 cfg.mode = dsi->mode; 4073 cfg.pixel_format = dsi->pix_fmt; 4074 4075 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) 4076 ok = dsi_vm_calc(dsi, &cfg, ctx); 4077 else 4078 ok = dsi_cm_calc(dsi, &cfg, ctx); 4079 4080 if (!ok) 4081 return -EINVAL; 4082 4083 dsi_pll_calc_dsi_fck(dsi, &ctx->dsi_cinfo); 4084 4085 r = dsi_lp_clock_calc(ctx->dsi_cinfo.clkout[HSDIV_DSI], 4086 cfg.lp_clk_min, cfg.lp_clk_max, &ctx->lp_cinfo); 4087 if (r) 4088 return r; 4089 4090 return 0; 4091 } 4092 4093 static int dsi_set_config(struct omap_dss_device *dssdev, 4094 const struct drm_display_mode *mode) 4095 { 4096 struct dsi_data *dsi = to_dsi_data(dssdev); 4097 struct dsi_clk_calc_ctx ctx; 4098 int r; 4099 4100 mutex_lock(&dsi->lock); 4101 4102 r = __dsi_calc_config(dsi, mode, &ctx); 4103 if (r) { 4104 DSSERR("failed to find suitable DSI clock settings\n"); 4105 goto err; 4106 } 4107 4108 dsi->user_lp_cinfo = ctx.lp_cinfo; 4109 dsi->user_dsi_cinfo = ctx.dsi_cinfo; 4110 dsi->user_dispc_cinfo = ctx.dispc_cinfo; 4111 4112 dsi->vm = ctx.vm; 4113 4114 /* 4115 * override interlace, logic level and edge related parameters in 4116 * videomode with default values 4117 */ 4118 dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED; 4119 dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW; 4120 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; 4121 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; 4122 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; 4123 /* 4124 * HACK: These flags should be handled through the omap_dss_device bus 4125 * flags, but this will only be possible when the DSI encoder will be 4126 * converted to the omapdrm-managed encoder model. 4127 */ 4128 dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; 4129 dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; 4130 dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; 4131 dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; 4132 dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; 4133 dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; 4134 4135 dss_mgr_set_timings(&dsi->output, &dsi->vm); 4136 4137 dsi->vm_timings = ctx.dsi_vm; 4138 4139 mutex_unlock(&dsi->lock); 4140 4141 return 0; 4142 err: 4143 mutex_unlock(&dsi->lock); 4144 4145 return r; 4146 } 4147 4148 /* 4149 * Return a hardcoded dispc channel for the DSI output. This should work for 4150 * current use cases, but this can be later expanded to either resolve 4151 * the channel in some more dynamic manner, or get the channel as a user 4152 * parameter. 4153 */ 4154 static enum omap_channel dsi_get_dispc_channel(struct dsi_data *dsi) 4155 { 4156 switch (dsi->data->model) { 4157 case DSI_MODEL_OMAP3: 4158 return OMAP_DSS_CHANNEL_LCD; 4159 4160 case DSI_MODEL_OMAP4: 4161 switch (dsi->module_id) { 4162 case 0: 4163 return OMAP_DSS_CHANNEL_LCD; 4164 case 1: 4165 return OMAP_DSS_CHANNEL_LCD2; 4166 default: 4167 DSSWARN("unsupported module id\n"); 4168 return OMAP_DSS_CHANNEL_LCD; 4169 } 4170 4171 case DSI_MODEL_OMAP5: 4172 switch (dsi->module_id) { 4173 case 0: 4174 return OMAP_DSS_CHANNEL_LCD; 4175 case 1: 4176 return OMAP_DSS_CHANNEL_LCD3; 4177 default: 4178 DSSWARN("unsupported module id\n"); 4179 return OMAP_DSS_CHANNEL_LCD; 4180 } 4181 4182 default: 4183 DSSWARN("unsupported DSS version\n"); 4184 return OMAP_DSS_CHANNEL_LCD; 4185 } 4186 } 4187 4188 static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc, 4189 const struct mipi_dsi_msg *msg) 4190 { 4191 struct omap_dss_device *dssdev = &dsi->output; 4192 int r; 4193 4194 dsi_vc_enable_hs(dssdev, vc, !(msg->flags & MIPI_DSI_MSG_USE_LPM)); 4195 4196 switch (msg->type) { 4197 case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: 4198 case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: 4199 case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: 4200 case MIPI_DSI_GENERIC_LONG_WRITE: 4201 case MIPI_DSI_DCS_SHORT_WRITE: 4202 case MIPI_DSI_DCS_SHORT_WRITE_PARAM: 4203 case MIPI_DSI_DCS_LONG_WRITE: 4204 case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE: 4205 case MIPI_DSI_NULL_PACKET: 4206 r = dsi_vc_write_common(dssdev, vc, msg); 4207 break; 4208 case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: 4209 case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: 4210 case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: 4211 r = dsi_vc_generic_read(dssdev, vc, msg); 4212 break; 4213 case MIPI_DSI_DCS_READ: 4214 r = dsi_vc_dcs_read(dssdev, vc, msg); 4215 break; 4216 default: 4217 r = -EINVAL; 4218 break; 4219 } 4220 4221 if (r < 0) 4222 return r; 4223 4224 if (msg->type == MIPI_DSI_DCS_SHORT_WRITE || 4225 msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM) { 4226 u8 cmd = ((u8 *)msg->tx_buf)[0]; 4227 4228 if (cmd == MIPI_DCS_SET_TEAR_OFF) 4229 dsi_enable_te(dsi, false); 4230 else if (cmd == MIPI_DCS_SET_TEAR_ON) 4231 dsi_enable_te(dsi, true); 4232 } 4233 4234 return 0; 4235 } 4236 4237 static ssize_t omap_dsi_host_transfer(struct mipi_dsi_host *host, 4238 const struct mipi_dsi_msg *msg) 4239 { 4240 struct dsi_data *dsi = host_to_omap(host); 4241 int r; 4242 int vc = VC_CMD; 4243 4244 dsi_bus_lock(dsi); 4245 4246 if (!dsi->iface_enabled) { 4247 dsi_enable(dsi); 4248 schedule_delayed_work(&dsi->dsi_disable_work, msecs_to_jiffies(2000)); 4249 } 4250 4251 r = _omap_dsi_host_transfer(dsi, vc, msg); 4252 4253 dsi_bus_unlock(dsi); 4254 4255 return r; 4256 } 4257 4258 static int dsi_get_clocks(struct dsi_data *dsi) 4259 { 4260 struct clk *clk; 4261 4262 clk = devm_clk_get(dsi->dev, "fck"); 4263 if (IS_ERR(clk)) { 4264 DSSERR("can't get fck\n"); 4265 return PTR_ERR(clk); 4266 } 4267 4268 dsi->dss_clk = clk; 4269 4270 return 0; 4271 } 4272 4273 static const struct omapdss_dsi_ops dsi_ops = { 4274 .update = dsi_update_all, 4275 .is_video_mode = dsi_is_video_mode, 4276 }; 4277 4278 static irqreturn_t omap_dsi_te_irq_handler(int irq, void *dev_id) 4279 { 4280 struct dsi_data *dsi = (struct dsi_data *)dev_id; 4281 int old; 4282 4283 old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0); 4284 if (old) { 4285 cancel_delayed_work(&dsi->te_timeout_work); 4286 _dsi_update(dsi); 4287 } 4288 4289 return IRQ_HANDLED; 4290 } 4291 4292 static void omap_dsi_te_timeout_work_callback(struct work_struct *work) 4293 { 4294 struct dsi_data *dsi = 4295 container_of(work, struct dsi_data, te_timeout_work.work); 4296 int old; 4297 4298 old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0); 4299 if (old) { 4300 dev_err(dsi->dev, "TE not received for 250ms!\n"); 4301 _dsi_update(dsi); 4302 } 4303 } 4304 4305 static int omap_dsi_register_te_irq(struct dsi_data *dsi, 4306 struct mipi_dsi_device *client) 4307 { 4308 int err; 4309 int te_irq; 4310 4311 dsi->te_gpio = gpiod_get(&client->dev, "te-gpios", GPIOD_IN); 4312 if (IS_ERR(dsi->te_gpio)) { 4313 err = PTR_ERR(dsi->te_gpio); 4314 4315 if (err == -ENOENT) { 4316 dsi->te_gpio = NULL; 4317 return 0; 4318 } 4319 4320 dev_err(dsi->dev, "Could not get TE gpio: %d\n", err); 4321 return err; 4322 } 4323 4324 te_irq = gpiod_to_irq(dsi->te_gpio); 4325 if (te_irq < 0) { 4326 gpiod_put(dsi->te_gpio); 4327 dsi->te_gpio = NULL; 4328 return -EINVAL; 4329 } 4330 4331 dsi->te_irq = te_irq; 4332 4333 irq_set_status_flags(te_irq, IRQ_NOAUTOEN); 4334 4335 err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler, 4336 IRQF_TRIGGER_RISING | IRQF_ONESHOT, 4337 "TE", dsi); 4338 if (err) { 4339 dev_err(dsi->dev, "request irq failed with %d\n", err); 4340 gpiod_put(dsi->te_gpio); 4341 dsi->te_gpio = NULL; 4342 return err; 4343 } 4344 4345 INIT_DEFERRABLE_WORK(&dsi->te_timeout_work, 4346 omap_dsi_te_timeout_work_callback); 4347 4348 dev_dbg(dsi->dev, "Using GPIO TE\n"); 4349 4350 return 0; 4351 } 4352 4353 static void omap_dsi_unregister_te_irq(struct dsi_data *dsi) 4354 { 4355 if (dsi->te_gpio) { 4356 free_irq(dsi->te_irq, dsi); 4357 cancel_delayed_work(&dsi->te_timeout_work); 4358 gpiod_put(dsi->te_gpio); 4359 dsi->te_gpio = NULL; 4360 } 4361 } 4362 4363 static int omap_dsi_host_attach(struct mipi_dsi_host *host, 4364 struct mipi_dsi_device *client) 4365 { 4366 struct dsi_data *dsi = host_to_omap(host); 4367 int r; 4368 4369 if (dsi->dsidev) { 4370 DSSERR("dsi client already attached\n"); 4371 return -EBUSY; 4372 } 4373 4374 if (mipi_dsi_pixel_format_to_bpp(client->format) < 0) { 4375 DSSERR("invalid pixel format\n"); 4376 return -EINVAL; 4377 } 4378 4379 atomic_set(&dsi->do_ext_te_update, 0); 4380 4381 if (client->mode_flags & MIPI_DSI_MODE_VIDEO) { 4382 dsi->mode = OMAP_DSS_DSI_VIDEO_MODE; 4383 } else { 4384 r = omap_dsi_register_te_irq(dsi, client); 4385 if (r) 4386 return r; 4387 4388 dsi->mode = OMAP_DSS_DSI_CMD_MODE; 4389 } 4390 4391 dsi->dsidev = client; 4392 dsi->pix_fmt = client->format; 4393 4394 dsi->config.hs_clk_min = 150000000; // TODO: get from client? 4395 dsi->config.hs_clk_max = client->hs_rate; 4396 dsi->config.lp_clk_min = 7000000; // TODO: get from client? 4397 dsi->config.lp_clk_max = client->lp_rate; 4398 4399 if (client->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 4400 dsi->config.trans_mode = OMAP_DSS_DSI_BURST_MODE; 4401 else if (client->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 4402 dsi->config.trans_mode = OMAP_DSS_DSI_PULSE_MODE; 4403 else 4404 dsi->config.trans_mode = OMAP_DSS_DSI_EVENT_MODE; 4405 4406 return 0; 4407 } 4408 4409 static int omap_dsi_host_detach(struct mipi_dsi_host *host, 4410 struct mipi_dsi_device *client) 4411 { 4412 struct dsi_data *dsi = host_to_omap(host); 4413 4414 if (WARN_ON(dsi->dsidev != client)) 4415 return -EINVAL; 4416 4417 cancel_delayed_work_sync(&dsi->dsi_disable_work); 4418 4419 dsi_bus_lock(dsi); 4420 4421 if (dsi->iface_enabled) 4422 dsi_disable(dsi); 4423 4424 dsi_bus_unlock(dsi); 4425 4426 omap_dsi_unregister_te_irq(dsi); 4427 dsi->dsidev = NULL; 4428 return 0; 4429 } 4430 4431 static const struct mipi_dsi_host_ops omap_dsi_host_ops = { 4432 .attach = omap_dsi_host_attach, 4433 .detach = omap_dsi_host_detach, 4434 .transfer = omap_dsi_host_transfer, 4435 }; 4436 4437 /* ----------------------------------------------------------------------------- 4438 * PLL 4439 */ 4440 4441 static const struct dss_pll_ops dsi_pll_ops = { 4442 .enable = dsi_pll_enable, 4443 .disable = dsi_pll_disable, 4444 .set_config = dss_pll_write_config_type_a, 4445 }; 4446 4447 static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { 4448 .type = DSS_PLL_TYPE_A, 4449 4450 .n_max = (1 << 7) - 1, 4451 .m_max = (1 << 11) - 1, 4452 .mX_max = (1 << 4) - 1, 4453 .fint_min = 750000, 4454 .fint_max = 2100000, 4455 .clkdco_low = 1000000000, 4456 .clkdco_max = 1800000000, 4457 4458 .n_msb = 7, 4459 .n_lsb = 1, 4460 .m_msb = 18, 4461 .m_lsb = 8, 4462 4463 .mX_msb[0] = 22, 4464 .mX_lsb[0] = 19, 4465 .mX_msb[1] = 26, 4466 .mX_lsb[1] = 23, 4467 4468 .has_stopmode = true, 4469 .has_freqsel = true, 4470 .has_selfreqdco = false, 4471 .has_refsel = false, 4472 }; 4473 4474 static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { 4475 .type = DSS_PLL_TYPE_A, 4476 4477 .n_max = (1 << 8) - 1, 4478 .m_max = (1 << 12) - 1, 4479 .mX_max = (1 << 5) - 1, 4480 .fint_min = 500000, 4481 .fint_max = 2500000, 4482 .clkdco_low = 1000000000, 4483 .clkdco_max = 1800000000, 4484 4485 .n_msb = 8, 4486 .n_lsb = 1, 4487 .m_msb = 20, 4488 .m_lsb = 9, 4489 4490 .mX_msb[0] = 25, 4491 .mX_lsb[0] = 21, 4492 .mX_msb[1] = 30, 4493 .mX_lsb[1] = 26, 4494 4495 .has_stopmode = true, 4496 .has_freqsel = false, 4497 .has_selfreqdco = false, 4498 .has_refsel = false, 4499 }; 4500 4501 static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { 4502 .type = DSS_PLL_TYPE_A, 4503 4504 .n_max = (1 << 8) - 1, 4505 .m_max = (1 << 12) - 1, 4506 .mX_max = (1 << 5) - 1, 4507 .fint_min = 150000, 4508 .fint_max = 52000000, 4509 .clkdco_low = 1000000000, 4510 .clkdco_max = 1800000000, 4511 4512 .n_msb = 8, 4513 .n_lsb = 1, 4514 .m_msb = 20, 4515 .m_lsb = 9, 4516 4517 .mX_msb[0] = 25, 4518 .mX_lsb[0] = 21, 4519 .mX_msb[1] = 30, 4520 .mX_lsb[1] = 26, 4521 4522 .has_stopmode = true, 4523 .has_freqsel = false, 4524 .has_selfreqdco = true, 4525 .has_refsel = true, 4526 }; 4527 4528 static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi) 4529 { 4530 struct dss_pll *pll = &dsi->pll; 4531 struct clk *clk; 4532 int r; 4533 4534 clk = devm_clk_get(dsi->dev, "sys_clk"); 4535 if (IS_ERR(clk)) { 4536 DSSERR("can't get sys_clk\n"); 4537 return PTR_ERR(clk); 4538 } 4539 4540 pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1"; 4541 pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2; 4542 pll->clkin = clk; 4543 pll->base = dsi->pll_base; 4544 pll->hw = dsi->data->pll_hw; 4545 pll->ops = &dsi_pll_ops; 4546 4547 r = dss_pll_register(dss, pll); 4548 if (r) 4549 return r; 4550 4551 return 0; 4552 } 4553 4554 /* ----------------------------------------------------------------------------- 4555 * Component Bind & Unbind 4556 */ 4557 4558 static int dsi_bind(struct device *dev, struct device *master, void *data) 4559 { 4560 struct dss_device *dss = dss_get_device(master); 4561 struct dsi_data *dsi = dev_get_drvdata(dev); 4562 char name[10]; 4563 u32 rev; 4564 int r; 4565 4566 dsi->dss = dss; 4567 4568 dsi_init_pll_data(dss, dsi); 4569 4570 r = dsi_runtime_get(dsi); 4571 if (r) 4572 return r; 4573 4574 rev = dsi_read_reg(dsi, DSI_REVISION); 4575 dev_dbg(dev, "OMAP DSI rev %d.%d\n", 4576 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); 4577 4578 dsi->line_buffer_size = dsi_get_line_buf_size(dsi); 4579 4580 dsi_runtime_put(dsi); 4581 4582 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); 4583 dsi->debugfs.regs = dss_debugfs_create_file(dss, name, 4584 dsi_dump_dsi_regs, dsi); 4585 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 4586 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); 4587 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, 4588 dsi_dump_dsi_irqs, dsi); 4589 #endif 4590 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); 4591 dsi->debugfs.clks = dss_debugfs_create_file(dss, name, 4592 dsi_dump_dsi_clocks, dsi); 4593 4594 return 0; 4595 } 4596 4597 static void dsi_unbind(struct device *dev, struct device *master, void *data) 4598 { 4599 struct dsi_data *dsi = dev_get_drvdata(dev); 4600 4601 dss_debugfs_remove_file(dsi->debugfs.clks); 4602 dss_debugfs_remove_file(dsi->debugfs.irqs); 4603 dss_debugfs_remove_file(dsi->debugfs.regs); 4604 4605 WARN_ON(dsi->scp_clk_refcount > 0); 4606 4607 dss_pll_unregister(&dsi->pll); 4608 } 4609 4610 static const struct component_ops dsi_component_ops = { 4611 .bind = dsi_bind, 4612 .unbind = dsi_unbind, 4613 }; 4614 4615 /* ----------------------------------------------------------------------------- 4616 * DRM Bridge Operations 4617 */ 4618 4619 static int dsi_bridge_attach(struct drm_bridge *bridge, 4620 enum drm_bridge_attach_flags flags) 4621 { 4622 struct dsi_data *dsi = drm_bridge_to_dsi(bridge); 4623 4624 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) 4625 return -EINVAL; 4626 4627 return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge, 4628 bridge, flags); 4629 } 4630 4631 static enum drm_mode_status 4632 dsi_bridge_mode_valid(struct drm_bridge *bridge, 4633 const struct drm_display_info *info, 4634 const struct drm_display_mode *mode) 4635 { 4636 struct dsi_data *dsi = drm_bridge_to_dsi(bridge); 4637 struct dsi_clk_calc_ctx ctx; 4638 int r; 4639 4640 mutex_lock(&dsi->lock); 4641 r = __dsi_calc_config(dsi, mode, &ctx); 4642 mutex_unlock(&dsi->lock); 4643 4644 return r ? MODE_CLOCK_RANGE : MODE_OK; 4645 } 4646 4647 static void dsi_bridge_mode_set(struct drm_bridge *bridge, 4648 const struct drm_display_mode *mode, 4649 const struct drm_display_mode *adjusted_mode) 4650 { 4651 struct dsi_data *dsi = drm_bridge_to_dsi(bridge); 4652 4653 dsi_set_config(&dsi->output, adjusted_mode); 4654 } 4655 4656 static void dsi_bridge_enable(struct drm_bridge *bridge) 4657 { 4658 struct dsi_data *dsi = drm_bridge_to_dsi(bridge); 4659 struct omap_dss_device *dssdev = &dsi->output; 4660 4661 cancel_delayed_work_sync(&dsi->dsi_disable_work); 4662 4663 dsi_bus_lock(dsi); 4664 4665 if (!dsi->iface_enabled) 4666 dsi_enable(dsi); 4667 4668 dsi_enable_video_output(dssdev, VC_VIDEO); 4669 4670 dsi->video_enabled = true; 4671 4672 dsi_bus_unlock(dsi); 4673 } 4674 4675 static void dsi_bridge_disable(struct drm_bridge *bridge) 4676 { 4677 struct dsi_data *dsi = drm_bridge_to_dsi(bridge); 4678 struct omap_dss_device *dssdev = &dsi->output; 4679 4680 cancel_delayed_work_sync(&dsi->dsi_disable_work); 4681 4682 dsi_bus_lock(dsi); 4683 4684 dsi->video_enabled = false; 4685 4686 dsi_disable_video_output(dssdev, VC_VIDEO); 4687 4688 dsi_disable(dsi); 4689 4690 dsi_bus_unlock(dsi); 4691 } 4692 4693 static const struct drm_bridge_funcs dsi_bridge_funcs = { 4694 .attach = dsi_bridge_attach, 4695 .mode_valid = dsi_bridge_mode_valid, 4696 .mode_set = dsi_bridge_mode_set, 4697 .enable = dsi_bridge_enable, 4698 .disable = dsi_bridge_disable, 4699 }; 4700 4701 static void dsi_bridge_init(struct dsi_data *dsi) 4702 { 4703 dsi->bridge.funcs = &dsi_bridge_funcs; 4704 dsi->bridge.of_node = dsi->host.dev->of_node; 4705 dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; 4706 4707 drm_bridge_add(&dsi->bridge); 4708 } 4709 4710 static void dsi_bridge_cleanup(struct dsi_data *dsi) 4711 { 4712 drm_bridge_remove(&dsi->bridge); 4713 } 4714 4715 /* ----------------------------------------------------------------------------- 4716 * Probe & Remove, Suspend & Resume 4717 */ 4718 4719 static int dsi_init_output(struct dsi_data *dsi) 4720 { 4721 struct omap_dss_device *out = &dsi->output; 4722 int r; 4723 4724 dsi_bridge_init(dsi); 4725 4726 out->dev = dsi->dev; 4727 out->id = dsi->module_id == 0 ? 4728 OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; 4729 4730 out->type = OMAP_DISPLAY_TYPE_DSI; 4731 out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; 4732 out->dispc_channel = dsi_get_dispc_channel(dsi); 4733 out->dsi_ops = &dsi_ops; 4734 out->of_port = 0; 4735 out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE 4736 | DRM_BUS_FLAG_DE_HIGH 4737 | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; 4738 4739 r = omapdss_device_init_output(out, &dsi->bridge); 4740 if (r < 0) { 4741 dsi_bridge_cleanup(dsi); 4742 return r; 4743 } 4744 4745 omapdss_device_register(out); 4746 4747 return 0; 4748 } 4749 4750 static void dsi_uninit_output(struct dsi_data *dsi) 4751 { 4752 struct omap_dss_device *out = &dsi->output; 4753 4754 omapdss_device_unregister(out); 4755 omapdss_device_cleanup_output(out); 4756 dsi_bridge_cleanup(dsi); 4757 } 4758 4759 static int dsi_probe_of(struct dsi_data *dsi) 4760 { 4761 struct device_node *node = dsi->dev->of_node; 4762 struct property *prop; 4763 u32 lane_arr[10]; 4764 int len, num_pins; 4765 int r; 4766 struct device_node *ep; 4767 4768 ep = of_graph_get_endpoint_by_regs(node, 0, 0); 4769 if (!ep) 4770 return 0; 4771 4772 prop = of_find_property(ep, "lanes", &len); 4773 if (prop == NULL) { 4774 dev_err(dsi->dev, "failed to find lane data\n"); 4775 r = -EINVAL; 4776 goto err; 4777 } 4778 4779 num_pins = len / sizeof(u32); 4780 4781 if (num_pins < 4 || num_pins % 2 != 0 || 4782 num_pins > dsi->num_lanes_supported * 2) { 4783 dev_err(dsi->dev, "bad number of lanes\n"); 4784 r = -EINVAL; 4785 goto err; 4786 } 4787 4788 r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins); 4789 if (r) { 4790 dev_err(dsi->dev, "failed to read lane data\n"); 4791 goto err; 4792 } 4793 4794 r = dsi_configure_pins(dsi, num_pins, lane_arr); 4795 if (r) { 4796 dev_err(dsi->dev, "failed to configure pins"); 4797 goto err; 4798 } 4799 4800 of_node_put(ep); 4801 4802 return 0; 4803 4804 err: 4805 of_node_put(ep); 4806 return r; 4807 } 4808 4809 static const struct dsi_of_data dsi_of_data_omap34xx = { 4810 .model = DSI_MODEL_OMAP3, 4811 .pll_hw = &dss_omap3_dsi_pll_hw, 4812 .modules = (const struct dsi_module_id_data[]) { 4813 { .address = 0x4804fc00, .id = 0, }, 4814 { }, 4815 }, 4816 .max_fck_freq = 173000000, 4817 .max_pll_lpdiv = (1 << 13) - 1, 4818 .quirks = DSI_QUIRK_REVERSE_TXCLKESC, 4819 }; 4820 4821 static const struct dsi_of_data dsi_of_data_omap36xx = { 4822 .model = DSI_MODEL_OMAP3, 4823 .pll_hw = &dss_omap3_dsi_pll_hw, 4824 .modules = (const struct dsi_module_id_data[]) { 4825 { .address = 0x4804fc00, .id = 0, }, 4826 { }, 4827 }, 4828 .max_fck_freq = 173000000, 4829 .max_pll_lpdiv = (1 << 13) - 1, 4830 .quirks = DSI_QUIRK_PLL_PWR_BUG, 4831 }; 4832 4833 static const struct dsi_of_data dsi_of_data_omap4 = { 4834 .model = DSI_MODEL_OMAP4, 4835 .pll_hw = &dss_omap4_dsi_pll_hw, 4836 .modules = (const struct dsi_module_id_data[]) { 4837 { .address = 0x58004000, .id = 0, }, 4838 { .address = 0x58005000, .id = 1, }, 4839 { }, 4840 }, 4841 .max_fck_freq = 170000000, 4842 .max_pll_lpdiv = (1 << 13) - 1, 4843 .quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH 4844 | DSI_QUIRK_GNQ, 4845 }; 4846 4847 static const struct dsi_of_data dsi_of_data_omap5 = { 4848 .model = DSI_MODEL_OMAP5, 4849 .pll_hw = &dss_omap5_dsi_pll_hw, 4850 .modules = (const struct dsi_module_id_data[]) { 4851 { .address = 0x58004000, .id = 0, }, 4852 { .address = 0x58009000, .id = 1, }, 4853 { }, 4854 }, 4855 .max_fck_freq = 209250000, 4856 .max_pll_lpdiv = (1 << 13) - 1, 4857 .quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH 4858 | DSI_QUIRK_GNQ | DSI_QUIRK_PHY_DCC, 4859 }; 4860 4861 static const struct of_device_id dsi_of_match[] = { 4862 { .compatible = "ti,omap3-dsi", .data = &dsi_of_data_omap36xx, }, 4863 { .compatible = "ti,omap4-dsi", .data = &dsi_of_data_omap4, }, 4864 { .compatible = "ti,omap5-dsi", .data = &dsi_of_data_omap5, }, 4865 {}, 4866 }; 4867 4868 static const struct soc_device_attribute dsi_soc_devices[] = { 4869 { .machine = "OMAP3[45]*", .data = &dsi_of_data_omap34xx }, 4870 { .machine = "AM35*", .data = &dsi_of_data_omap34xx }, 4871 { /* sentinel */ } 4872 }; 4873 4874 static void omap_dsi_disable_work_callback(struct work_struct *work) 4875 { 4876 struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work); 4877 4878 dsi_bus_lock(dsi); 4879 4880 if (dsi->iface_enabled && !dsi->video_enabled) 4881 dsi_disable(dsi); 4882 4883 dsi_bus_unlock(dsi); 4884 } 4885 4886 static int dsi_probe(struct platform_device *pdev) 4887 { 4888 const struct soc_device_attribute *soc; 4889 const struct dsi_module_id_data *d; 4890 struct device *dev = &pdev->dev; 4891 struct dsi_data *dsi; 4892 struct resource *dsi_mem; 4893 unsigned int i; 4894 int r; 4895 4896 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 4897 if (!dsi) 4898 return -ENOMEM; 4899 4900 dsi->dev = dev; 4901 dev_set_drvdata(dev, dsi); 4902 4903 spin_lock_init(&dsi->irq_lock); 4904 spin_lock_init(&dsi->errors_lock); 4905 dsi->errors = 0; 4906 4907 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 4908 spin_lock_init(&dsi->irq_stats_lock); 4909 dsi->irq_stats.last_reset = jiffies; 4910 #endif 4911 4912 mutex_init(&dsi->lock); 4913 sema_init(&dsi->bus_lock, 1); 4914 4915 INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work, 4916 dsi_framedone_timeout_work_callback); 4917 4918 INIT_DEFERRABLE_WORK(&dsi->dsi_disable_work, omap_dsi_disable_work_callback); 4919 4920 #ifdef DSI_CATCH_MISSING_TE 4921 timer_setup(&dsi->te_timer, dsi_te_timeout, 0); 4922 #endif 4923 4924 dsi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proto"); 4925 dsi->proto_base = devm_ioremap_resource(dev, dsi_mem); 4926 if (IS_ERR(dsi->proto_base)) 4927 return PTR_ERR(dsi->proto_base); 4928 4929 dsi->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy"); 4930 if (IS_ERR(dsi->phy_base)) 4931 return PTR_ERR(dsi->phy_base); 4932 4933 dsi->pll_base = devm_platform_ioremap_resource_byname(pdev, "pll"); 4934 if (IS_ERR(dsi->pll_base)) 4935 return PTR_ERR(dsi->pll_base); 4936 4937 dsi->irq = platform_get_irq(pdev, 0); 4938 if (dsi->irq < 0) { 4939 DSSERR("platform_get_irq failed\n"); 4940 return -ENODEV; 4941 } 4942 4943 r = devm_request_irq(dev, dsi->irq, omap_dsi_irq_handler, 4944 IRQF_SHARED, dev_name(dev), dsi); 4945 if (r < 0) { 4946 DSSERR("request_irq failed\n"); 4947 return r; 4948 } 4949 4950 dsi->vdds_dsi_reg = devm_regulator_get(dev, "vdd"); 4951 if (IS_ERR(dsi->vdds_dsi_reg)) { 4952 if (PTR_ERR(dsi->vdds_dsi_reg) != -EPROBE_DEFER) 4953 DSSERR("can't get DSI VDD regulator\n"); 4954 return PTR_ERR(dsi->vdds_dsi_reg); 4955 } 4956 4957 soc = soc_device_match(dsi_soc_devices); 4958 if (soc) 4959 dsi->data = soc->data; 4960 else 4961 dsi->data = of_match_node(dsi_of_match, dev->of_node)->data; 4962 4963 d = dsi->data->modules; 4964 while (d->address != 0 && d->address != dsi_mem->start) 4965 d++; 4966 4967 if (d->address == 0) { 4968 DSSERR("unsupported DSI module\n"); 4969 return -ENODEV; 4970 } 4971 4972 dsi->module_id = d->id; 4973 4974 if (dsi->data->model == DSI_MODEL_OMAP4 || 4975 dsi->data->model == DSI_MODEL_OMAP5) { 4976 struct device_node *np; 4977 4978 /* 4979 * The OMAP4/5 display DT bindings don't reference the padconf 4980 * syscon. Our only option to retrieve it is to find it by name. 4981 */ 4982 np = of_find_node_by_name(NULL, 4983 dsi->data->model == DSI_MODEL_OMAP4 ? 4984 "omap4_padconf_global" : "omap5_padconf_global"); 4985 if (!np) 4986 return -ENODEV; 4987 4988 dsi->syscon = syscon_node_to_regmap(np); 4989 of_node_put(np); 4990 } 4991 4992 /* DSI VCs initialization */ 4993 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) 4994 dsi->vc[i].source = DSI_VC_SOURCE_L4; 4995 4996 r = dsi_get_clocks(dsi); 4997 if (r) 4998 return r; 4999 5000 pm_runtime_enable(dev); 5001 5002 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number 5003 * of data to 3 by default */ 5004 if (dsi->data->quirks & DSI_QUIRK_GNQ) { 5005 dsi_runtime_get(dsi); 5006 /* NB_DATA_LANES */ 5007 dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9); 5008 dsi_runtime_put(dsi); 5009 } else { 5010 dsi->num_lanes_supported = 3; 5011 } 5012 5013 dsi->host.ops = &omap_dsi_host_ops; 5014 dsi->host.dev = &pdev->dev; 5015 5016 r = dsi_probe_of(dsi); 5017 if (r) { 5018 DSSERR("Invalid DSI DT data\n"); 5019 goto err_pm_disable; 5020 } 5021 5022 r = mipi_dsi_host_register(&dsi->host); 5023 if (r < 0) { 5024 dev_err(&pdev->dev, "failed to register DSI host: %d\n", r); 5025 goto err_pm_disable; 5026 } 5027 5028 r = dsi_init_output(dsi); 5029 if (r) 5030 goto err_dsi_host_unregister; 5031 5032 r = component_add(&pdev->dev, &dsi_component_ops); 5033 if (r) 5034 goto err_uninit_output; 5035 5036 return 0; 5037 5038 err_uninit_output: 5039 dsi_uninit_output(dsi); 5040 err_dsi_host_unregister: 5041 mipi_dsi_host_unregister(&dsi->host); 5042 err_pm_disable: 5043 pm_runtime_disable(dev); 5044 return r; 5045 } 5046 5047 static void dsi_remove(struct platform_device *pdev) 5048 { 5049 struct dsi_data *dsi = platform_get_drvdata(pdev); 5050 5051 component_del(&pdev->dev, &dsi_component_ops); 5052 5053 dsi_uninit_output(dsi); 5054 5055 mipi_dsi_host_unregister(&dsi->host); 5056 5057 pm_runtime_disable(&pdev->dev); 5058 5059 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { 5060 regulator_disable(dsi->vdds_dsi_reg); 5061 dsi->vdds_dsi_enabled = false; 5062 } 5063 } 5064 5065 static __maybe_unused int dsi_runtime_suspend(struct device *dev) 5066 { 5067 struct dsi_data *dsi = dev_get_drvdata(dev); 5068 5069 dsi->is_enabled = false; 5070 /* ensure the irq handler sees the is_enabled value */ 5071 smp_wmb(); 5072 /* wait for current handler to finish before turning the DSI off */ 5073 synchronize_irq(dsi->irq); 5074 5075 return 0; 5076 } 5077 5078 static __maybe_unused int dsi_runtime_resume(struct device *dev) 5079 { 5080 struct dsi_data *dsi = dev_get_drvdata(dev); 5081 5082 dsi->is_enabled = true; 5083 /* ensure the irq handler sees the is_enabled value */ 5084 smp_wmb(); 5085 5086 return 0; 5087 } 5088 5089 static const struct dev_pm_ops dsi_pm_ops = { 5090 SET_RUNTIME_PM_OPS(dsi_runtime_suspend, dsi_runtime_resume, NULL) 5091 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 5092 }; 5093 5094 struct platform_driver omap_dsihw_driver = { 5095 .probe = dsi_probe, 5096 .remove_new = dsi_remove, 5097 .driver = { 5098 .name = "omapdss_dsi", 5099 .pm = &dsi_pm_ops, 5100 .of_match_table = dsi_of_match, 5101 .suppress_bind_attrs = true, 5102 }, 5103 }; 5104