1 /* 2 * Driver for msm7k serial device and console 3 * 4 * Copyright (C) 2007 Google, Inc. 5 * Author: Robert Love <rlove@google.com> 6 * Copyright (c) 2011, Code Aurora Forum. All rights reserved. 7 * 8 * This software is licensed under the terms of the GNU General Public 9 * License version 2, as published by the Free Software Foundation, and 10 * may be copied, distributed, and modified under those terms. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 19 # define SUPPORT_SYSRQ 20 #endif 21 22 #include <linux/atomic.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/dmaengine.h> 25 #include <linux/hrtimer.h> 26 #include <linux/module.h> 27 #include <linux/io.h> 28 #include <linux/ioport.h> 29 #include <linux/irq.h> 30 #include <linux/init.h> 31 #include <linux/console.h> 32 #include <linux/tty.h> 33 #include <linux/tty_flip.h> 34 #include <linux/serial_core.h> 35 #include <linux/serial.h> 36 #include <linux/slab.h> 37 #include <linux/clk.h> 38 #include <linux/platform_device.h> 39 #include <linux/delay.h> 40 #include <linux/of.h> 41 #include <linux/of_device.h> 42 43 #include "msm_serial.h" 44 45 #define UARTDM_BURST_SIZE 16 /* in bytes */ 46 #define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */ 47 #define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */ 48 #define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4) 49 50 enum { 51 UARTDM_1P1 = 1, 52 UARTDM_1P2, 53 UARTDM_1P3, 54 UARTDM_1P4, 55 }; 56 57 struct msm_dma { 58 struct dma_chan *chan; 59 enum dma_data_direction dir; 60 dma_addr_t phys; 61 unsigned char *virt; 62 dma_cookie_t cookie; 63 u32 enable_bit; 64 unsigned int count; 65 struct dma_async_tx_descriptor *desc; 66 }; 67 68 struct msm_port { 69 struct uart_port uart; 70 char name[16]; 71 struct clk *clk; 72 struct clk *pclk; 73 unsigned int imr; 74 int is_uartdm; 75 unsigned int old_snap_state; 76 bool break_detected; 77 struct msm_dma tx_dma; 78 struct msm_dma rx_dma; 79 }; 80 81 static void msm_handle_tx(struct uart_port *port); 82 static void msm_start_rx_dma(struct msm_port *msm_port); 83 84 void msm_stop_dma(struct uart_port *port, struct msm_dma *dma) 85 { 86 struct device *dev = port->dev; 87 unsigned int mapped; 88 u32 val; 89 90 mapped = dma->count; 91 dma->count = 0; 92 93 dmaengine_terminate_all(dma->chan); 94 95 /* 96 * DMA Stall happens if enqueue and flush command happens concurrently. 97 * For example before changing the baud rate/protocol configuration and 98 * sending flush command to ADM, disable the channel of UARTDM. 99 * Note: should not reset the receiver here immediately as it is not 100 * suggested to do disable/reset or reset/disable at the same time. 101 */ 102 val = msm_read(port, UARTDM_DMEN); 103 val &= ~dma->enable_bit; 104 msm_write(port, val, UARTDM_DMEN); 105 106 if (mapped) 107 dma_unmap_single(dev, dma->phys, mapped, dma->dir); 108 } 109 110 static void msm_release_dma(struct msm_port *msm_port) 111 { 112 struct msm_dma *dma; 113 114 dma = &msm_port->tx_dma; 115 if (dma->chan) { 116 msm_stop_dma(&msm_port->uart, dma); 117 dma_release_channel(dma->chan); 118 } 119 120 memset(dma, 0, sizeof(*dma)); 121 122 dma = &msm_port->rx_dma; 123 if (dma->chan) { 124 msm_stop_dma(&msm_port->uart, dma); 125 dma_release_channel(dma->chan); 126 kfree(dma->virt); 127 } 128 129 memset(dma, 0, sizeof(*dma)); 130 } 131 132 static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base) 133 { 134 struct device *dev = msm_port->uart.dev; 135 struct dma_slave_config conf; 136 struct msm_dma *dma; 137 u32 crci = 0; 138 int ret; 139 140 dma = &msm_port->tx_dma; 141 142 /* allocate DMA resources, if available */ 143 dma->chan = dma_request_slave_channel_reason(dev, "tx"); 144 if (IS_ERR(dma->chan)) 145 goto no_tx; 146 147 of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci); 148 149 memset(&conf, 0, sizeof(conf)); 150 conf.direction = DMA_MEM_TO_DEV; 151 conf.device_fc = true; 152 conf.dst_addr = base + UARTDM_TF; 153 conf.dst_maxburst = UARTDM_BURST_SIZE; 154 conf.slave_id = crci; 155 156 ret = dmaengine_slave_config(dma->chan, &conf); 157 if (ret) 158 goto rel_tx; 159 160 dma->dir = DMA_TO_DEVICE; 161 162 if (msm_port->is_uartdm < UARTDM_1P4) 163 dma->enable_bit = UARTDM_DMEN_TX_DM_ENABLE; 164 else 165 dma->enable_bit = UARTDM_DMEN_TX_BAM_ENABLE; 166 167 return; 168 169 rel_tx: 170 dma_release_channel(dma->chan); 171 no_tx: 172 memset(dma, 0, sizeof(*dma)); 173 } 174 175 static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base) 176 { 177 struct device *dev = msm_port->uart.dev; 178 struct dma_slave_config conf; 179 struct msm_dma *dma; 180 u32 crci = 0; 181 int ret; 182 183 dma = &msm_port->rx_dma; 184 185 /* allocate DMA resources, if available */ 186 dma->chan = dma_request_slave_channel_reason(dev, "rx"); 187 if (IS_ERR(dma->chan)) 188 goto no_rx; 189 190 of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci); 191 192 dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL); 193 if (!dma->virt) 194 goto rel_rx; 195 196 memset(&conf, 0, sizeof(conf)); 197 conf.direction = DMA_DEV_TO_MEM; 198 conf.device_fc = true; 199 conf.src_addr = base + UARTDM_RF; 200 conf.src_maxburst = UARTDM_BURST_SIZE; 201 conf.slave_id = crci; 202 203 ret = dmaengine_slave_config(dma->chan, &conf); 204 if (ret) 205 goto err; 206 207 dma->dir = DMA_FROM_DEVICE; 208 209 if (msm_port->is_uartdm < UARTDM_1P4) 210 dma->enable_bit = UARTDM_DMEN_RX_DM_ENABLE; 211 else 212 dma->enable_bit = UARTDM_DMEN_RX_BAM_ENABLE; 213 214 return; 215 err: 216 kfree(dma->virt); 217 rel_rx: 218 dma_release_channel(dma->chan); 219 no_rx: 220 memset(dma, 0, sizeof(*dma)); 221 } 222 223 static inline void msm_wait_for_xmitr(struct uart_port *port) 224 { 225 while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) { 226 if (msm_read(port, UART_ISR) & UART_ISR_TX_READY) 227 break; 228 udelay(1); 229 } 230 msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR); 231 } 232 233 static void msm_stop_tx(struct uart_port *port) 234 { 235 struct msm_port *msm_port = UART_TO_MSM(port); 236 237 msm_port->imr &= ~UART_IMR_TXLEV; 238 msm_write(port, msm_port->imr, UART_IMR); 239 } 240 241 static void msm_start_tx(struct uart_port *port) 242 { 243 struct msm_port *msm_port = UART_TO_MSM(port); 244 struct msm_dma *dma = &msm_port->tx_dma; 245 246 /* Already started in DMA mode */ 247 if (dma->count) 248 return; 249 250 msm_port->imr |= UART_IMR_TXLEV; 251 msm_write(port, msm_port->imr, UART_IMR); 252 } 253 254 static void msm_reset_dm_count(struct uart_port *port, int count) 255 { 256 msm_wait_for_xmitr(port); 257 msm_write(port, count, UARTDM_NCF_TX); 258 msm_read(port, UARTDM_NCF_TX); 259 } 260 261 static void msm_complete_tx_dma(void *args) 262 { 263 struct msm_port *msm_port = args; 264 struct uart_port *port = &msm_port->uart; 265 struct circ_buf *xmit = &port->state->xmit; 266 struct msm_dma *dma = &msm_port->tx_dma; 267 struct dma_tx_state state; 268 enum dma_status status; 269 unsigned long flags; 270 unsigned int count; 271 u32 val; 272 273 spin_lock_irqsave(&port->lock, flags); 274 275 /* Already stopped */ 276 if (!dma->count) 277 goto done; 278 279 status = dmaengine_tx_status(dma->chan, dma->cookie, &state); 280 281 dma_unmap_single(port->dev, dma->phys, dma->count, dma->dir); 282 283 val = msm_read(port, UARTDM_DMEN); 284 val &= ~dma->enable_bit; 285 msm_write(port, val, UARTDM_DMEN); 286 287 if (msm_port->is_uartdm > UARTDM_1P3) { 288 msm_write(port, UART_CR_CMD_RESET_TX, UART_CR); 289 msm_write(port, UART_CR_TX_ENABLE, UART_CR); 290 } 291 292 count = dma->count - state.residue; 293 port->icount.tx += count; 294 dma->count = 0; 295 296 xmit->tail += count; 297 xmit->tail &= UART_XMIT_SIZE - 1; 298 299 /* Restore "Tx FIFO below watermark" interrupt */ 300 msm_port->imr |= UART_IMR_TXLEV; 301 msm_write(port, msm_port->imr, UART_IMR); 302 303 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 304 uart_write_wakeup(port); 305 306 msm_handle_tx(port); 307 done: 308 spin_unlock_irqrestore(&port->lock, flags); 309 } 310 311 static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count) 312 { 313 struct circ_buf *xmit = &msm_port->uart.state->xmit; 314 struct uart_port *port = &msm_port->uart; 315 struct msm_dma *dma = &msm_port->tx_dma; 316 void *cpu_addr; 317 int ret; 318 u32 val; 319 320 cpu_addr = &xmit->buf[xmit->tail]; 321 322 dma->phys = dma_map_single(port->dev, cpu_addr, count, dma->dir); 323 ret = dma_mapping_error(port->dev, dma->phys); 324 if (ret) 325 return ret; 326 327 dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys, 328 count, DMA_MEM_TO_DEV, 329 DMA_PREP_INTERRUPT | 330 DMA_PREP_FENCE); 331 if (!dma->desc) { 332 ret = -EIO; 333 goto unmap; 334 } 335 336 dma->desc->callback = msm_complete_tx_dma; 337 dma->desc->callback_param = msm_port; 338 339 dma->cookie = dmaengine_submit(dma->desc); 340 ret = dma_submit_error(dma->cookie); 341 if (ret) 342 goto unmap; 343 344 /* 345 * Using DMA complete for Tx FIFO reload, no need for 346 * "Tx FIFO below watermark" one, disable it 347 */ 348 msm_port->imr &= ~UART_IMR_TXLEV; 349 msm_write(port, msm_port->imr, UART_IMR); 350 351 dma->count = count; 352 353 val = msm_read(port, UARTDM_DMEN); 354 val |= dma->enable_bit; 355 356 if (msm_port->is_uartdm < UARTDM_1P4) 357 msm_write(port, val, UARTDM_DMEN); 358 359 msm_reset_dm_count(port, count); 360 361 if (msm_port->is_uartdm > UARTDM_1P3) 362 msm_write(port, val, UARTDM_DMEN); 363 364 dma_async_issue_pending(dma->chan); 365 return 0; 366 unmap: 367 dma_unmap_single(port->dev, dma->phys, count, dma->dir); 368 return ret; 369 } 370 371 static void msm_complete_rx_dma(void *args) 372 { 373 struct msm_port *msm_port = args; 374 struct uart_port *port = &msm_port->uart; 375 struct tty_port *tport = &port->state->port; 376 struct msm_dma *dma = &msm_port->rx_dma; 377 int count = 0, i, sysrq; 378 unsigned long flags; 379 u32 val; 380 381 spin_lock_irqsave(&port->lock, flags); 382 383 /* Already stopped */ 384 if (!dma->count) 385 goto done; 386 387 val = msm_read(port, UARTDM_DMEN); 388 val &= ~dma->enable_bit; 389 msm_write(port, val, UARTDM_DMEN); 390 391 /* Restore interrupts */ 392 msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE; 393 msm_write(port, msm_port->imr, UART_IMR); 394 395 if (msm_read(port, UART_SR) & UART_SR_OVERRUN) { 396 port->icount.overrun++; 397 tty_insert_flip_char(tport, 0, TTY_OVERRUN); 398 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); 399 } 400 401 count = msm_read(port, UARTDM_RX_TOTAL_SNAP); 402 403 port->icount.rx += count; 404 405 dma->count = 0; 406 407 dma_unmap_single(port->dev, dma->phys, UARTDM_RX_SIZE, dma->dir); 408 409 for (i = 0; i < count; i++) { 410 char flag = TTY_NORMAL; 411 412 if (msm_port->break_detected && dma->virt[i] == 0) { 413 port->icount.brk++; 414 flag = TTY_BREAK; 415 msm_port->break_detected = false; 416 if (uart_handle_break(port)) 417 continue; 418 } 419 420 if (!(port->read_status_mask & UART_SR_RX_BREAK)) 421 flag = TTY_NORMAL; 422 423 spin_unlock_irqrestore(&port->lock, flags); 424 sysrq = uart_handle_sysrq_char(port, dma->virt[i]); 425 spin_lock_irqsave(&port->lock, flags); 426 if (!sysrq) 427 tty_insert_flip_char(tport, dma->virt[i], flag); 428 } 429 430 msm_start_rx_dma(msm_port); 431 done: 432 spin_unlock_irqrestore(&port->lock, flags); 433 434 if (count) 435 tty_flip_buffer_push(tport); 436 } 437 438 static void msm_start_rx_dma(struct msm_port *msm_port) 439 { 440 struct msm_dma *dma = &msm_port->rx_dma; 441 struct uart_port *uart = &msm_port->uart; 442 u32 val; 443 int ret; 444 445 if (!dma->chan) 446 return; 447 448 dma->phys = dma_map_single(uart->dev, dma->virt, 449 UARTDM_RX_SIZE, dma->dir); 450 ret = dma_mapping_error(uart->dev, dma->phys); 451 if (ret) 452 return; 453 454 dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys, 455 UARTDM_RX_SIZE, DMA_DEV_TO_MEM, 456 DMA_PREP_INTERRUPT); 457 if (!dma->desc) 458 goto unmap; 459 460 dma->desc->callback = msm_complete_rx_dma; 461 dma->desc->callback_param = msm_port; 462 463 dma->cookie = dmaengine_submit(dma->desc); 464 ret = dma_submit_error(dma->cookie); 465 if (ret) 466 goto unmap; 467 /* 468 * Using DMA for FIFO off-load, no need for "Rx FIFO over 469 * watermark" or "stale" interrupts, disable them 470 */ 471 msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE); 472 473 /* 474 * Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3), 475 * we need RXSTALE to flush input DMA fifo to memory 476 */ 477 if (msm_port->is_uartdm < UARTDM_1P4) 478 msm_port->imr |= UART_IMR_RXSTALE; 479 480 msm_write(uart, msm_port->imr, UART_IMR); 481 482 dma->count = UARTDM_RX_SIZE; 483 484 dma_async_issue_pending(dma->chan); 485 486 msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR); 487 msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR); 488 489 val = msm_read(uart, UARTDM_DMEN); 490 val |= dma->enable_bit; 491 492 if (msm_port->is_uartdm < UARTDM_1P4) 493 msm_write(uart, val, UARTDM_DMEN); 494 495 msm_write(uart, UARTDM_RX_SIZE, UARTDM_DMRX); 496 497 if (msm_port->is_uartdm > UARTDM_1P3) 498 msm_write(uart, val, UARTDM_DMEN); 499 500 return; 501 unmap: 502 dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir); 503 } 504 505 static void msm_stop_rx(struct uart_port *port) 506 { 507 struct msm_port *msm_port = UART_TO_MSM(port); 508 struct msm_dma *dma = &msm_port->rx_dma; 509 510 msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE); 511 msm_write(port, msm_port->imr, UART_IMR); 512 513 if (dma->chan) 514 msm_stop_dma(port, dma); 515 } 516 517 static void msm_enable_ms(struct uart_port *port) 518 { 519 struct msm_port *msm_port = UART_TO_MSM(port); 520 521 msm_port->imr |= UART_IMR_DELTA_CTS; 522 msm_write(port, msm_port->imr, UART_IMR); 523 } 524 525 static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr) 526 { 527 struct tty_port *tport = &port->state->port; 528 unsigned int sr; 529 int count = 0; 530 struct msm_port *msm_port = UART_TO_MSM(port); 531 532 if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) { 533 port->icount.overrun++; 534 tty_insert_flip_char(tport, 0, TTY_OVERRUN); 535 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); 536 } 537 538 if (misr & UART_IMR_RXSTALE) { 539 count = msm_read(port, UARTDM_RX_TOTAL_SNAP) - 540 msm_port->old_snap_state; 541 msm_port->old_snap_state = 0; 542 } else { 543 count = 4 * (msm_read(port, UART_RFWR)); 544 msm_port->old_snap_state += count; 545 } 546 547 /* TODO: Precise error reporting */ 548 549 port->icount.rx += count; 550 551 while (count > 0) { 552 unsigned char buf[4]; 553 int sysrq, r_count, i; 554 555 sr = msm_read(port, UART_SR); 556 if ((sr & UART_SR_RX_READY) == 0) { 557 msm_port->old_snap_state -= count; 558 break; 559 } 560 561 ioread32_rep(port->membase + UARTDM_RF, buf, 1); 562 r_count = min_t(int, count, sizeof(buf)); 563 564 for (i = 0; i < r_count; i++) { 565 char flag = TTY_NORMAL; 566 567 if (msm_port->break_detected && buf[i] == 0) { 568 port->icount.brk++; 569 flag = TTY_BREAK; 570 msm_port->break_detected = false; 571 if (uart_handle_break(port)) 572 continue; 573 } 574 575 if (!(port->read_status_mask & UART_SR_RX_BREAK)) 576 flag = TTY_NORMAL; 577 578 spin_unlock(&port->lock); 579 sysrq = uart_handle_sysrq_char(port, buf[i]); 580 spin_lock(&port->lock); 581 if (!sysrq) 582 tty_insert_flip_char(tport, buf[i], flag); 583 } 584 count -= r_count; 585 } 586 587 spin_unlock(&port->lock); 588 tty_flip_buffer_push(tport); 589 spin_lock(&port->lock); 590 591 if (misr & (UART_IMR_RXSTALE)) 592 msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR); 593 msm_write(port, 0xFFFFFF, UARTDM_DMRX); 594 msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR); 595 596 /* Try to use DMA */ 597 msm_start_rx_dma(msm_port); 598 } 599 600 static void msm_handle_rx(struct uart_port *port) 601 { 602 struct tty_port *tport = &port->state->port; 603 unsigned int sr; 604 605 /* 606 * Handle overrun. My understanding of the hardware is that overrun 607 * is not tied to the RX buffer, so we handle the case out of band. 608 */ 609 if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) { 610 port->icount.overrun++; 611 tty_insert_flip_char(tport, 0, TTY_OVERRUN); 612 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); 613 } 614 615 /* and now the main RX loop */ 616 while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) { 617 unsigned int c; 618 char flag = TTY_NORMAL; 619 int sysrq; 620 621 c = msm_read(port, UART_RF); 622 623 if (sr & UART_SR_RX_BREAK) { 624 port->icount.brk++; 625 if (uart_handle_break(port)) 626 continue; 627 } else if (sr & UART_SR_PAR_FRAME_ERR) { 628 port->icount.frame++; 629 } else { 630 port->icount.rx++; 631 } 632 633 /* Mask conditions we're ignorning. */ 634 sr &= port->read_status_mask; 635 636 if (sr & UART_SR_RX_BREAK) 637 flag = TTY_BREAK; 638 else if (sr & UART_SR_PAR_FRAME_ERR) 639 flag = TTY_FRAME; 640 641 spin_unlock(&port->lock); 642 sysrq = uart_handle_sysrq_char(port, c); 643 spin_lock(&port->lock); 644 if (!sysrq) 645 tty_insert_flip_char(tport, c, flag); 646 } 647 648 spin_unlock(&port->lock); 649 tty_flip_buffer_push(tport); 650 spin_lock(&port->lock); 651 } 652 653 static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count) 654 { 655 struct circ_buf *xmit = &port->state->xmit; 656 struct msm_port *msm_port = UART_TO_MSM(port); 657 unsigned int num_chars; 658 unsigned int tf_pointer = 0; 659 void __iomem *tf; 660 661 if (msm_port->is_uartdm) 662 tf = port->membase + UARTDM_TF; 663 else 664 tf = port->membase + UART_TF; 665 666 if (tx_count && msm_port->is_uartdm) 667 msm_reset_dm_count(port, tx_count); 668 669 while (tf_pointer < tx_count) { 670 int i; 671 char buf[4] = { 0 }; 672 673 if (!(msm_read(port, UART_SR) & UART_SR_TX_READY)) 674 break; 675 676 if (msm_port->is_uartdm) 677 num_chars = min(tx_count - tf_pointer, 678 (unsigned int)sizeof(buf)); 679 else 680 num_chars = 1; 681 682 for (i = 0; i < num_chars; i++) { 683 buf[i] = xmit->buf[xmit->tail + i]; 684 port->icount.tx++; 685 } 686 687 iowrite32_rep(tf, buf, 1); 688 xmit->tail = (xmit->tail + num_chars) & (UART_XMIT_SIZE - 1); 689 tf_pointer += num_chars; 690 } 691 692 /* disable tx interrupts if nothing more to send */ 693 if (uart_circ_empty(xmit)) 694 msm_stop_tx(port); 695 696 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 697 uart_write_wakeup(port); 698 } 699 700 static void msm_handle_tx(struct uart_port *port) 701 { 702 struct msm_port *msm_port = UART_TO_MSM(port); 703 struct circ_buf *xmit = &msm_port->uart.state->xmit; 704 struct msm_dma *dma = &msm_port->tx_dma; 705 unsigned int pio_count, dma_count, dma_min; 706 void __iomem *tf; 707 int err = 0; 708 709 if (port->x_char) { 710 if (msm_port->is_uartdm) 711 tf = port->membase + UARTDM_TF; 712 else 713 tf = port->membase + UART_TF; 714 715 if (msm_port->is_uartdm) 716 msm_reset_dm_count(port, 1); 717 718 iowrite8_rep(tf, &port->x_char, 1); 719 port->icount.tx++; 720 port->x_char = 0; 721 return; 722 } 723 724 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 725 msm_stop_tx(port); 726 return; 727 } 728 729 pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); 730 dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 731 732 dma_min = 1; /* Always DMA */ 733 if (msm_port->is_uartdm > UARTDM_1P3) { 734 dma_count = UARTDM_TX_AIGN(dma_count); 735 dma_min = UARTDM_BURST_SIZE; 736 } else { 737 if (dma_count > UARTDM_TX_MAX) 738 dma_count = UARTDM_TX_MAX; 739 } 740 741 if (pio_count > port->fifosize) 742 pio_count = port->fifosize; 743 744 if (!dma->chan || dma_count < dma_min) 745 msm_handle_tx_pio(port, pio_count); 746 else 747 err = msm_handle_tx_dma(msm_port, dma_count); 748 749 if (err) /* fall back to PIO mode */ 750 msm_handle_tx_pio(port, pio_count); 751 } 752 753 static void msm_handle_delta_cts(struct uart_port *port) 754 { 755 msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); 756 port->icount.cts++; 757 wake_up_interruptible(&port->state->port.delta_msr_wait); 758 } 759 760 static irqreturn_t msm_uart_irq(int irq, void *dev_id) 761 { 762 struct uart_port *port = dev_id; 763 struct msm_port *msm_port = UART_TO_MSM(port); 764 struct msm_dma *dma = &msm_port->rx_dma; 765 unsigned long flags; 766 unsigned int misr; 767 u32 val; 768 769 spin_lock_irqsave(&port->lock, flags); 770 misr = msm_read(port, UART_MISR); 771 msm_write(port, 0, UART_IMR); /* disable interrupt */ 772 773 if (misr & UART_IMR_RXBREAK_START) { 774 msm_port->break_detected = true; 775 msm_write(port, UART_CR_CMD_RESET_RXBREAK_START, UART_CR); 776 } 777 778 if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) { 779 if (dma->count) { 780 val = UART_CR_CMD_STALE_EVENT_DISABLE; 781 msm_write(port, val, UART_CR); 782 val = UART_CR_CMD_RESET_STALE_INT; 783 msm_write(port, val, UART_CR); 784 /* 785 * Flush DMA input fifo to memory, this will also 786 * trigger DMA RX completion 787 */ 788 dmaengine_terminate_all(dma->chan); 789 } else if (msm_port->is_uartdm) { 790 msm_handle_rx_dm(port, misr); 791 } else { 792 msm_handle_rx(port); 793 } 794 } 795 if (misr & UART_IMR_TXLEV) 796 msm_handle_tx(port); 797 if (misr & UART_IMR_DELTA_CTS) 798 msm_handle_delta_cts(port); 799 800 msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */ 801 spin_unlock_irqrestore(&port->lock, flags); 802 803 return IRQ_HANDLED; 804 } 805 806 static unsigned int msm_tx_empty(struct uart_port *port) 807 { 808 return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0; 809 } 810 811 static unsigned int msm_get_mctrl(struct uart_port *port) 812 { 813 return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS; 814 } 815 816 static void msm_reset(struct uart_port *port) 817 { 818 struct msm_port *msm_port = UART_TO_MSM(port); 819 820 /* reset everything */ 821 msm_write(port, UART_CR_CMD_RESET_RX, UART_CR); 822 msm_write(port, UART_CR_CMD_RESET_TX, UART_CR); 823 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); 824 msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR); 825 msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); 826 msm_write(port, UART_CR_CMD_SET_RFR, UART_CR); 827 828 /* Disable DM modes */ 829 if (msm_port->is_uartdm) 830 msm_write(port, 0, UARTDM_DMEN); 831 } 832 833 static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl) 834 { 835 unsigned int mr; 836 837 mr = msm_read(port, UART_MR1); 838 839 if (!(mctrl & TIOCM_RTS)) { 840 mr &= ~UART_MR1_RX_RDY_CTL; 841 msm_write(port, mr, UART_MR1); 842 msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR); 843 } else { 844 mr |= UART_MR1_RX_RDY_CTL; 845 msm_write(port, mr, UART_MR1); 846 } 847 } 848 849 static void msm_break_ctl(struct uart_port *port, int break_ctl) 850 { 851 if (break_ctl) 852 msm_write(port, UART_CR_CMD_START_BREAK, UART_CR); 853 else 854 msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR); 855 } 856 857 struct msm_baud_map { 858 u16 divisor; 859 u8 code; 860 u8 rxstale; 861 }; 862 863 static const struct msm_baud_map * 864 msm_find_best_baud(struct uart_port *port, unsigned int baud, 865 unsigned long *rate) 866 { 867 struct msm_port *msm_port = UART_TO_MSM(port); 868 unsigned int divisor, result; 869 unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX; 870 const struct msm_baud_map *entry, *end, *best; 871 static const struct msm_baud_map table[] = { 872 { 1, 0xff, 31 }, 873 { 2, 0xee, 16 }, 874 { 3, 0xdd, 8 }, 875 { 4, 0xcc, 6 }, 876 { 6, 0xbb, 6 }, 877 { 8, 0xaa, 6 }, 878 { 12, 0x99, 6 }, 879 { 16, 0x88, 1 }, 880 { 24, 0x77, 1 }, 881 { 32, 0x66, 1 }, 882 { 48, 0x55, 1 }, 883 { 96, 0x44, 1 }, 884 { 192, 0x33, 1 }, 885 { 384, 0x22, 1 }, 886 { 768, 0x11, 1 }, 887 { 1536, 0x00, 1 }, 888 }; 889 890 best = table; /* Default to smallest divider */ 891 target = clk_round_rate(msm_port->clk, 16 * baud); 892 divisor = DIV_ROUND_CLOSEST(target, 16 * baud); 893 894 end = table + ARRAY_SIZE(table); 895 entry = table; 896 while (entry < end) { 897 if (entry->divisor <= divisor) { 898 result = target / entry->divisor / 16; 899 diff = abs(result - baud); 900 901 /* Keep track of best entry */ 902 if (diff < best_diff) { 903 best_diff = diff; 904 best = entry; 905 best_rate = target; 906 } 907 908 if (result == baud) 909 break; 910 } else if (entry->divisor > divisor) { 911 old = target; 912 target = clk_round_rate(msm_port->clk, old + 1); 913 /* 914 * The rate didn't get any faster so we can't do 915 * better at dividing it down 916 */ 917 if (target == old) 918 break; 919 920 /* Start the divisor search over at this new rate */ 921 entry = table; 922 divisor = DIV_ROUND_CLOSEST(target, 16 * baud); 923 continue; 924 } 925 entry++; 926 } 927 928 *rate = best_rate; 929 return best; 930 } 931 932 static int msm_set_baud_rate(struct uart_port *port, unsigned int baud, 933 unsigned long *saved_flags) 934 { 935 unsigned int rxstale, watermark, mask; 936 struct msm_port *msm_port = UART_TO_MSM(port); 937 const struct msm_baud_map *entry; 938 unsigned long flags, rate; 939 940 flags = *saved_flags; 941 spin_unlock_irqrestore(&port->lock, flags); 942 943 entry = msm_find_best_baud(port, baud, &rate); 944 clk_set_rate(msm_port->clk, rate); 945 baud = rate / 16 / entry->divisor; 946 947 spin_lock_irqsave(&port->lock, flags); 948 *saved_flags = flags; 949 port->uartclk = rate; 950 951 msm_write(port, entry->code, UART_CSR); 952 953 /* RX stale watermark */ 954 rxstale = entry->rxstale; 955 watermark = UART_IPR_STALE_LSB & rxstale; 956 if (msm_port->is_uartdm) { 957 mask = UART_DM_IPR_STALE_TIMEOUT_MSB; 958 } else { 959 watermark |= UART_IPR_RXSTALE_LAST; 960 mask = UART_IPR_STALE_TIMEOUT_MSB; 961 } 962 963 watermark |= mask & (rxstale << 2); 964 965 msm_write(port, watermark, UART_IPR); 966 967 /* set RX watermark */ 968 watermark = (port->fifosize * 3) / 4; 969 msm_write(port, watermark, UART_RFWR); 970 971 /* set TX watermark */ 972 msm_write(port, 10, UART_TFWR); 973 974 msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR); 975 msm_reset(port); 976 977 /* Enable RX and TX */ 978 msm_write(port, UART_CR_TX_ENABLE | UART_CR_RX_ENABLE, UART_CR); 979 980 /* turn on RX and CTS interrupts */ 981 msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE | 982 UART_IMR_CURRENT_CTS | UART_IMR_RXBREAK_START; 983 984 msm_write(port, msm_port->imr, UART_IMR); 985 986 if (msm_port->is_uartdm) { 987 msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR); 988 msm_write(port, 0xFFFFFF, UARTDM_DMRX); 989 msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR); 990 } 991 992 return baud; 993 } 994 995 static void msm_init_clock(struct uart_port *port) 996 { 997 struct msm_port *msm_port = UART_TO_MSM(port); 998 999 clk_prepare_enable(msm_port->clk); 1000 clk_prepare_enable(msm_port->pclk); 1001 msm_serial_set_mnd_regs(port); 1002 } 1003 1004 static int msm_startup(struct uart_port *port) 1005 { 1006 struct msm_port *msm_port = UART_TO_MSM(port); 1007 unsigned int data, rfr_level, mask; 1008 int ret; 1009 1010 snprintf(msm_port->name, sizeof(msm_port->name), 1011 "msm_serial%d", port->line); 1012 1013 ret = request_irq(port->irq, msm_uart_irq, IRQF_TRIGGER_HIGH, 1014 msm_port->name, port); 1015 if (unlikely(ret)) 1016 return ret; 1017 1018 msm_init_clock(port); 1019 1020 if (likely(port->fifosize > 12)) 1021 rfr_level = port->fifosize - 12; 1022 else 1023 rfr_level = port->fifosize; 1024 1025 /* set automatic RFR level */ 1026 data = msm_read(port, UART_MR1); 1027 1028 if (msm_port->is_uartdm) 1029 mask = UART_DM_MR1_AUTO_RFR_LEVEL1; 1030 else 1031 mask = UART_MR1_AUTO_RFR_LEVEL1; 1032 1033 data &= ~mask; 1034 data &= ~UART_MR1_AUTO_RFR_LEVEL0; 1035 data |= mask & (rfr_level << 2); 1036 data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level; 1037 msm_write(port, data, UART_MR1); 1038 1039 if (msm_port->is_uartdm) { 1040 msm_request_tx_dma(msm_port, msm_port->uart.mapbase); 1041 msm_request_rx_dma(msm_port, msm_port->uart.mapbase); 1042 } 1043 1044 return 0; 1045 } 1046 1047 static void msm_shutdown(struct uart_port *port) 1048 { 1049 struct msm_port *msm_port = UART_TO_MSM(port); 1050 1051 msm_port->imr = 0; 1052 msm_write(port, 0, UART_IMR); /* disable interrupts */ 1053 1054 if (msm_port->is_uartdm) 1055 msm_release_dma(msm_port); 1056 1057 clk_disable_unprepare(msm_port->clk); 1058 1059 free_irq(port->irq, port); 1060 } 1061 1062 static void msm_set_termios(struct uart_port *port, struct ktermios *termios, 1063 struct ktermios *old) 1064 { 1065 struct msm_port *msm_port = UART_TO_MSM(port); 1066 struct msm_dma *dma = &msm_port->rx_dma; 1067 unsigned long flags; 1068 unsigned int baud, mr; 1069 1070 spin_lock_irqsave(&port->lock, flags); 1071 1072 if (dma->chan) /* Terminate if any */ 1073 msm_stop_dma(port, dma); 1074 1075 /* calculate and set baud rate */ 1076 baud = uart_get_baud_rate(port, termios, old, 300, 4000000); 1077 baud = msm_set_baud_rate(port, baud, &flags); 1078 if (tty_termios_baud_rate(termios)) 1079 tty_termios_encode_baud_rate(termios, baud, baud); 1080 1081 /* calculate parity */ 1082 mr = msm_read(port, UART_MR2); 1083 mr &= ~UART_MR2_PARITY_MODE; 1084 if (termios->c_cflag & PARENB) { 1085 if (termios->c_cflag & PARODD) 1086 mr |= UART_MR2_PARITY_MODE_ODD; 1087 else if (termios->c_cflag & CMSPAR) 1088 mr |= UART_MR2_PARITY_MODE_SPACE; 1089 else 1090 mr |= UART_MR2_PARITY_MODE_EVEN; 1091 } 1092 1093 /* calculate bits per char */ 1094 mr &= ~UART_MR2_BITS_PER_CHAR; 1095 switch (termios->c_cflag & CSIZE) { 1096 case CS5: 1097 mr |= UART_MR2_BITS_PER_CHAR_5; 1098 break; 1099 case CS6: 1100 mr |= UART_MR2_BITS_PER_CHAR_6; 1101 break; 1102 case CS7: 1103 mr |= UART_MR2_BITS_PER_CHAR_7; 1104 break; 1105 case CS8: 1106 default: 1107 mr |= UART_MR2_BITS_PER_CHAR_8; 1108 break; 1109 } 1110 1111 /* calculate stop bits */ 1112 mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO); 1113 if (termios->c_cflag & CSTOPB) 1114 mr |= UART_MR2_STOP_BIT_LEN_TWO; 1115 else 1116 mr |= UART_MR2_STOP_BIT_LEN_ONE; 1117 1118 /* set parity, bits per char, and stop bit */ 1119 msm_write(port, mr, UART_MR2); 1120 1121 /* calculate and set hardware flow control */ 1122 mr = msm_read(port, UART_MR1); 1123 mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL); 1124 if (termios->c_cflag & CRTSCTS) { 1125 mr |= UART_MR1_CTS_CTL; 1126 mr |= UART_MR1_RX_RDY_CTL; 1127 } 1128 msm_write(port, mr, UART_MR1); 1129 1130 /* Configure status bits to ignore based on termio flags. */ 1131 port->read_status_mask = 0; 1132 if (termios->c_iflag & INPCK) 1133 port->read_status_mask |= UART_SR_PAR_FRAME_ERR; 1134 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1135 port->read_status_mask |= UART_SR_RX_BREAK; 1136 1137 uart_update_timeout(port, termios->c_cflag, baud); 1138 1139 /* Try to use DMA */ 1140 msm_start_rx_dma(msm_port); 1141 1142 spin_unlock_irqrestore(&port->lock, flags); 1143 } 1144 1145 static const char *msm_type(struct uart_port *port) 1146 { 1147 return "MSM"; 1148 } 1149 1150 static void msm_release_port(struct uart_port *port) 1151 { 1152 struct platform_device *pdev = to_platform_device(port->dev); 1153 struct resource *uart_resource; 1154 resource_size_t size; 1155 1156 uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1157 if (unlikely(!uart_resource)) 1158 return; 1159 size = resource_size(uart_resource); 1160 1161 release_mem_region(port->mapbase, size); 1162 iounmap(port->membase); 1163 port->membase = NULL; 1164 } 1165 1166 static int msm_request_port(struct uart_port *port) 1167 { 1168 struct platform_device *pdev = to_platform_device(port->dev); 1169 struct resource *uart_resource; 1170 resource_size_t size; 1171 int ret; 1172 1173 uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1174 if (unlikely(!uart_resource)) 1175 return -ENXIO; 1176 1177 size = resource_size(uart_resource); 1178 1179 if (!request_mem_region(port->mapbase, size, "msm_serial")) 1180 return -EBUSY; 1181 1182 port->membase = ioremap(port->mapbase, size); 1183 if (!port->membase) { 1184 ret = -EBUSY; 1185 goto fail_release_port; 1186 } 1187 1188 return 0; 1189 1190 fail_release_port: 1191 release_mem_region(port->mapbase, size); 1192 return ret; 1193 } 1194 1195 static void msm_config_port(struct uart_port *port, int flags) 1196 { 1197 int ret; 1198 1199 if (flags & UART_CONFIG_TYPE) { 1200 port->type = PORT_MSM; 1201 ret = msm_request_port(port); 1202 if (ret) 1203 return; 1204 } 1205 } 1206 1207 static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) 1208 { 1209 if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM)) 1210 return -EINVAL; 1211 if (unlikely(port->irq != ser->irq)) 1212 return -EINVAL; 1213 return 0; 1214 } 1215 1216 static void msm_power(struct uart_port *port, unsigned int state, 1217 unsigned int oldstate) 1218 { 1219 struct msm_port *msm_port = UART_TO_MSM(port); 1220 1221 switch (state) { 1222 case 0: 1223 clk_prepare_enable(msm_port->clk); 1224 clk_prepare_enable(msm_port->pclk); 1225 break; 1226 case 3: 1227 clk_disable_unprepare(msm_port->clk); 1228 clk_disable_unprepare(msm_port->pclk); 1229 break; 1230 default: 1231 pr_err("msm_serial: Unknown PM state %d\n", state); 1232 } 1233 } 1234 1235 #ifdef CONFIG_CONSOLE_POLL 1236 static int msm_poll_get_char_single(struct uart_port *port) 1237 { 1238 struct msm_port *msm_port = UART_TO_MSM(port); 1239 unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF; 1240 1241 if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) 1242 return NO_POLL_CHAR; 1243 1244 return msm_read(port, rf_reg) & 0xff; 1245 } 1246 1247 static int msm_poll_get_char_dm(struct uart_port *port) 1248 { 1249 int c; 1250 static u32 slop; 1251 static int count; 1252 unsigned char *sp = (unsigned char *)&slop; 1253 1254 /* Check if a previous read had more than one char */ 1255 if (count) { 1256 c = sp[sizeof(slop) - count]; 1257 count--; 1258 /* Or if FIFO is empty */ 1259 } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) { 1260 /* 1261 * If RX packing buffer has less than a word, force stale to 1262 * push contents into RX FIFO 1263 */ 1264 count = msm_read(port, UARTDM_RXFS); 1265 count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK; 1266 if (count) { 1267 msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR); 1268 slop = msm_read(port, UARTDM_RF); 1269 c = sp[0]; 1270 count--; 1271 msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR); 1272 msm_write(port, 0xFFFFFF, UARTDM_DMRX); 1273 msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, 1274 UART_CR); 1275 } else { 1276 c = NO_POLL_CHAR; 1277 } 1278 /* FIFO has a word */ 1279 } else { 1280 slop = msm_read(port, UARTDM_RF); 1281 c = sp[0]; 1282 count = sizeof(slop) - 1; 1283 } 1284 1285 return c; 1286 } 1287 1288 static int msm_poll_get_char(struct uart_port *port) 1289 { 1290 u32 imr; 1291 int c; 1292 struct msm_port *msm_port = UART_TO_MSM(port); 1293 1294 /* Disable all interrupts */ 1295 imr = msm_read(port, UART_IMR); 1296 msm_write(port, 0, UART_IMR); 1297 1298 if (msm_port->is_uartdm) 1299 c = msm_poll_get_char_dm(port); 1300 else 1301 c = msm_poll_get_char_single(port); 1302 1303 /* Enable interrupts */ 1304 msm_write(port, imr, UART_IMR); 1305 1306 return c; 1307 } 1308 1309 static void msm_poll_put_char(struct uart_port *port, unsigned char c) 1310 { 1311 u32 imr; 1312 struct msm_port *msm_port = UART_TO_MSM(port); 1313 1314 /* Disable all interrupts */ 1315 imr = msm_read(port, UART_IMR); 1316 msm_write(port, 0, UART_IMR); 1317 1318 if (msm_port->is_uartdm) 1319 msm_reset_dm_count(port, 1); 1320 1321 /* Wait until FIFO is empty */ 1322 while (!(msm_read(port, UART_SR) & UART_SR_TX_READY)) 1323 cpu_relax(); 1324 1325 /* Write a character */ 1326 msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF); 1327 1328 /* Wait until FIFO is empty */ 1329 while (!(msm_read(port, UART_SR) & UART_SR_TX_READY)) 1330 cpu_relax(); 1331 1332 /* Enable interrupts */ 1333 msm_write(port, imr, UART_IMR); 1334 } 1335 #endif 1336 1337 static struct uart_ops msm_uart_pops = { 1338 .tx_empty = msm_tx_empty, 1339 .set_mctrl = msm_set_mctrl, 1340 .get_mctrl = msm_get_mctrl, 1341 .stop_tx = msm_stop_tx, 1342 .start_tx = msm_start_tx, 1343 .stop_rx = msm_stop_rx, 1344 .enable_ms = msm_enable_ms, 1345 .break_ctl = msm_break_ctl, 1346 .startup = msm_startup, 1347 .shutdown = msm_shutdown, 1348 .set_termios = msm_set_termios, 1349 .type = msm_type, 1350 .release_port = msm_release_port, 1351 .request_port = msm_request_port, 1352 .config_port = msm_config_port, 1353 .verify_port = msm_verify_port, 1354 .pm = msm_power, 1355 #ifdef CONFIG_CONSOLE_POLL 1356 .poll_get_char = msm_poll_get_char, 1357 .poll_put_char = msm_poll_put_char, 1358 #endif 1359 }; 1360 1361 static struct msm_port msm_uart_ports[] = { 1362 { 1363 .uart = { 1364 .iotype = UPIO_MEM, 1365 .ops = &msm_uart_pops, 1366 .flags = UPF_BOOT_AUTOCONF, 1367 .fifosize = 64, 1368 .line = 0, 1369 }, 1370 }, 1371 { 1372 .uart = { 1373 .iotype = UPIO_MEM, 1374 .ops = &msm_uart_pops, 1375 .flags = UPF_BOOT_AUTOCONF, 1376 .fifosize = 64, 1377 .line = 1, 1378 }, 1379 }, 1380 { 1381 .uart = { 1382 .iotype = UPIO_MEM, 1383 .ops = &msm_uart_pops, 1384 .flags = UPF_BOOT_AUTOCONF, 1385 .fifosize = 64, 1386 .line = 2, 1387 }, 1388 }, 1389 }; 1390 1391 #define UART_NR ARRAY_SIZE(msm_uart_ports) 1392 1393 static inline struct uart_port *msm_get_port_from_line(unsigned int line) 1394 { 1395 return &msm_uart_ports[line].uart; 1396 } 1397 1398 #ifdef CONFIG_SERIAL_MSM_CONSOLE 1399 static void __msm_console_write(struct uart_port *port, const char *s, 1400 unsigned int count, bool is_uartdm) 1401 { 1402 int i; 1403 int num_newlines = 0; 1404 bool replaced = false; 1405 void __iomem *tf; 1406 1407 if (is_uartdm) 1408 tf = port->membase + UARTDM_TF; 1409 else 1410 tf = port->membase + UART_TF; 1411 1412 /* Account for newlines that will get a carriage return added */ 1413 for (i = 0; i < count; i++) 1414 if (s[i] == '\n') 1415 num_newlines++; 1416 count += num_newlines; 1417 1418 spin_lock(&port->lock); 1419 if (is_uartdm) 1420 msm_reset_dm_count(port, count); 1421 1422 i = 0; 1423 while (i < count) { 1424 int j; 1425 unsigned int num_chars; 1426 char buf[4] = { 0 }; 1427 1428 if (is_uartdm) 1429 num_chars = min(count - i, (unsigned int)sizeof(buf)); 1430 else 1431 num_chars = 1; 1432 1433 for (j = 0; j < num_chars; j++) { 1434 char c = *s; 1435 1436 if (c == '\n' && !replaced) { 1437 buf[j] = '\r'; 1438 j++; 1439 replaced = true; 1440 } 1441 if (j < num_chars) { 1442 buf[j] = c; 1443 s++; 1444 replaced = false; 1445 } 1446 } 1447 1448 while (!(msm_read(port, UART_SR) & UART_SR_TX_READY)) 1449 cpu_relax(); 1450 1451 iowrite32_rep(tf, buf, 1); 1452 i += num_chars; 1453 } 1454 spin_unlock(&port->lock); 1455 } 1456 1457 static void msm_console_write(struct console *co, const char *s, 1458 unsigned int count) 1459 { 1460 struct uart_port *port; 1461 struct msm_port *msm_port; 1462 1463 BUG_ON(co->index < 0 || co->index >= UART_NR); 1464 1465 port = msm_get_port_from_line(co->index); 1466 msm_port = UART_TO_MSM(port); 1467 1468 __msm_console_write(port, s, count, msm_port->is_uartdm); 1469 } 1470 1471 static int __init msm_console_setup(struct console *co, char *options) 1472 { 1473 struct uart_port *port; 1474 int baud = 115200; 1475 int bits = 8; 1476 int parity = 'n'; 1477 int flow = 'n'; 1478 1479 if (unlikely(co->index >= UART_NR || co->index < 0)) 1480 return -ENXIO; 1481 1482 port = msm_get_port_from_line(co->index); 1483 1484 if (unlikely(!port->membase)) 1485 return -ENXIO; 1486 1487 msm_init_clock(port); 1488 1489 if (options) 1490 uart_parse_options(options, &baud, &parity, &bits, &flow); 1491 1492 pr_info("msm_serial: console setup on port #%d\n", port->line); 1493 1494 return uart_set_options(port, co, baud, parity, bits, flow); 1495 } 1496 1497 static void 1498 msm_serial_early_write(struct console *con, const char *s, unsigned n) 1499 { 1500 struct earlycon_device *dev = con->data; 1501 1502 __msm_console_write(&dev->port, s, n, false); 1503 } 1504 1505 static int __init 1506 msm_serial_early_console_setup(struct earlycon_device *device, const char *opt) 1507 { 1508 if (!device->port.membase) 1509 return -ENODEV; 1510 1511 device->con->write = msm_serial_early_write; 1512 return 0; 1513 } 1514 OF_EARLYCON_DECLARE(msm_serial, "qcom,msm-uart", 1515 msm_serial_early_console_setup); 1516 1517 static void 1518 msm_serial_early_write_dm(struct console *con, const char *s, unsigned n) 1519 { 1520 struct earlycon_device *dev = con->data; 1521 1522 __msm_console_write(&dev->port, s, n, true); 1523 } 1524 1525 static int __init 1526 msm_serial_early_console_setup_dm(struct earlycon_device *device, 1527 const char *opt) 1528 { 1529 if (!device->port.membase) 1530 return -ENODEV; 1531 1532 device->con->write = msm_serial_early_write_dm; 1533 return 0; 1534 } 1535 OF_EARLYCON_DECLARE(msm_serial_dm, "qcom,msm-uartdm", 1536 msm_serial_early_console_setup_dm); 1537 1538 static struct uart_driver msm_uart_driver; 1539 1540 static struct console msm_console = { 1541 .name = "ttyMSM", 1542 .write = msm_console_write, 1543 .device = uart_console_device, 1544 .setup = msm_console_setup, 1545 .flags = CON_PRINTBUFFER, 1546 .index = -1, 1547 .data = &msm_uart_driver, 1548 }; 1549 1550 #define MSM_CONSOLE (&msm_console) 1551 1552 #else 1553 #define MSM_CONSOLE NULL 1554 #endif 1555 1556 static struct uart_driver msm_uart_driver = { 1557 .owner = THIS_MODULE, 1558 .driver_name = "msm_serial", 1559 .dev_name = "ttyMSM", 1560 .nr = UART_NR, 1561 .cons = MSM_CONSOLE, 1562 }; 1563 1564 static atomic_t msm_uart_next_id = ATOMIC_INIT(0); 1565 1566 static const struct of_device_id msm_uartdm_table[] = { 1567 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 }, 1568 { .compatible = "qcom,msm-uartdm-v1.2", .data = (void *)UARTDM_1P2 }, 1569 { .compatible = "qcom,msm-uartdm-v1.3", .data = (void *)UARTDM_1P3 }, 1570 { .compatible = "qcom,msm-uartdm-v1.4", .data = (void *)UARTDM_1P4 }, 1571 { } 1572 }; 1573 1574 static int msm_serial_probe(struct platform_device *pdev) 1575 { 1576 struct msm_port *msm_port; 1577 struct resource *resource; 1578 struct uart_port *port; 1579 const struct of_device_id *id; 1580 int irq, line; 1581 1582 if (pdev->dev.of_node) 1583 line = of_alias_get_id(pdev->dev.of_node, "serial"); 1584 else 1585 line = pdev->id; 1586 1587 if (line < 0) 1588 line = atomic_inc_return(&msm_uart_next_id) - 1; 1589 1590 if (unlikely(line < 0 || line >= UART_NR)) 1591 return -ENXIO; 1592 1593 dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line); 1594 1595 port = msm_get_port_from_line(line); 1596 port->dev = &pdev->dev; 1597 msm_port = UART_TO_MSM(port); 1598 1599 id = of_match_device(msm_uartdm_table, &pdev->dev); 1600 if (id) 1601 msm_port->is_uartdm = (unsigned long)id->data; 1602 else 1603 msm_port->is_uartdm = 0; 1604 1605 msm_port->clk = devm_clk_get(&pdev->dev, "core"); 1606 if (IS_ERR(msm_port->clk)) 1607 return PTR_ERR(msm_port->clk); 1608 1609 if (msm_port->is_uartdm) { 1610 msm_port->pclk = devm_clk_get(&pdev->dev, "iface"); 1611 if (IS_ERR(msm_port->pclk)) 1612 return PTR_ERR(msm_port->pclk); 1613 } 1614 1615 port->uartclk = clk_get_rate(msm_port->clk); 1616 dev_info(&pdev->dev, "uartclk = %d\n", port->uartclk); 1617 1618 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1619 if (unlikely(!resource)) 1620 return -ENXIO; 1621 port->mapbase = resource->start; 1622 1623 irq = platform_get_irq(pdev, 0); 1624 if (unlikely(irq < 0)) 1625 return -ENXIO; 1626 port->irq = irq; 1627 1628 platform_set_drvdata(pdev, port); 1629 1630 return uart_add_one_port(&msm_uart_driver, port); 1631 } 1632 1633 static int msm_serial_remove(struct platform_device *pdev) 1634 { 1635 struct uart_port *port = platform_get_drvdata(pdev); 1636 1637 uart_remove_one_port(&msm_uart_driver, port); 1638 1639 return 0; 1640 } 1641 1642 static const struct of_device_id msm_match_table[] = { 1643 { .compatible = "qcom,msm-uart" }, 1644 { .compatible = "qcom,msm-uartdm" }, 1645 {} 1646 }; 1647 1648 static struct platform_driver msm_platform_driver = { 1649 .remove = msm_serial_remove, 1650 .probe = msm_serial_probe, 1651 .driver = { 1652 .name = "msm_serial", 1653 .of_match_table = msm_match_table, 1654 }, 1655 }; 1656 1657 static int __init msm_serial_init(void) 1658 { 1659 int ret; 1660 1661 ret = uart_register_driver(&msm_uart_driver); 1662 if (unlikely(ret)) 1663 return ret; 1664 1665 ret = platform_driver_register(&msm_platform_driver); 1666 if (unlikely(ret)) 1667 uart_unregister_driver(&msm_uart_driver); 1668 1669 pr_info("msm_serial: driver initialized\n"); 1670 1671 return ret; 1672 } 1673 1674 static void __exit msm_serial_exit(void) 1675 { 1676 platform_driver_unregister(&msm_platform_driver); 1677 uart_unregister_driver(&msm_uart_driver); 1678 } 1679 1680 module_init(msm_serial_init); 1681 module_exit(msm_serial_exit); 1682 1683 MODULE_AUTHOR("Robert Love <rlove@google.com>"); 1684 MODULE_DESCRIPTION("Driver for msm7x serial device"); 1685 MODULE_LICENSE("GPL"); 1686