1 /* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * This is a generic driver for ARM AMBA-type serial ports. They 25 * have a lot of 16550-like features, but are not register compatible. 26 * Note that although they do have CTS, DCD and DSR inputs, they do 27 * not have an RI input, nor do they have DTR or RTS outputs. If 28 * required, these have to be supplied via some other means (eg, GPIO) 29 * and hooked into this driver. 30 */ 31 32 33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 34 #define SUPPORT_SYSRQ 35 #endif 36 37 #include <linux/module.h> 38 #include <linux/ioport.h> 39 #include <linux/init.h> 40 #include <linux/console.h> 41 #include <linux/sysrq.h> 42 #include <linux/device.h> 43 #include <linux/tty.h> 44 #include <linux/tty_flip.h> 45 #include <linux/serial_core.h> 46 #include <linux/serial.h> 47 #include <linux/amba/bus.h> 48 #include <linux/amba/serial.h> 49 #include <linux/clk.h> 50 #include <linux/slab.h> 51 #include <linux/dmaengine.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/scatterlist.h> 54 #include <linux/delay.h> 55 #include <linux/types.h> 56 #include <linux/of.h> 57 #include <linux/of_device.h> 58 #include <linux/pinctrl/consumer.h> 59 #include <linux/sizes.h> 60 #include <linux/io.h> 61 62 #define UART_NR 14 63 64 #define SERIAL_AMBA_MAJOR 204 65 #define SERIAL_AMBA_MINOR 64 66 #define SERIAL_AMBA_NR UART_NR 67 68 #define AMBA_ISR_PASS_LIMIT 256 69 70 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) 71 #define UART_DUMMY_DR_RX (1 << 16) 72 73 /* There is by now at least one vendor with differing details, so handle it */ 74 struct vendor_data { 75 unsigned int ifls; 76 unsigned int fifosize; 77 unsigned int lcrh_tx; 78 unsigned int lcrh_rx; 79 bool oversampling; 80 bool dma_threshold; 81 bool cts_event_workaround; 82 }; 83 84 static struct vendor_data vendor_arm = { 85 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, 86 .fifosize = 16, 87 .lcrh_tx = UART011_LCRH, 88 .lcrh_rx = UART011_LCRH, 89 .oversampling = false, 90 .dma_threshold = false, 91 .cts_event_workaround = false, 92 }; 93 94 static struct vendor_data vendor_st = { 95 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, 96 .fifosize = 64, 97 .lcrh_tx = ST_UART011_LCRH_TX, 98 .lcrh_rx = ST_UART011_LCRH_RX, 99 .oversampling = true, 100 .dma_threshold = true, 101 .cts_event_workaround = true, 102 }; 103 104 static struct uart_amba_port *amba_ports[UART_NR]; 105 106 /* Deals with DMA transactions */ 107 108 struct pl011_sgbuf { 109 struct scatterlist sg; 110 char *buf; 111 }; 112 113 struct pl011_dmarx_data { 114 struct dma_chan *chan; 115 struct completion complete; 116 bool use_buf_b; 117 struct pl011_sgbuf sgbuf_a; 118 struct pl011_sgbuf sgbuf_b; 119 dma_cookie_t cookie; 120 bool running; 121 struct timer_list timer; 122 unsigned int last_residue; 123 unsigned long last_jiffies; 124 bool auto_poll_rate; 125 unsigned int poll_rate; 126 unsigned int poll_timeout; 127 }; 128 129 struct pl011_dmatx_data { 130 struct dma_chan *chan; 131 struct scatterlist sg; 132 char *buf; 133 bool queued; 134 }; 135 136 /* 137 * We wrap our port structure around the generic uart_port. 138 */ 139 struct uart_amba_port { 140 struct uart_port port; 141 struct clk *clk; 142 /* Two optional pin states - default & sleep */ 143 struct pinctrl *pinctrl; 144 struct pinctrl_state *pins_default; 145 struct pinctrl_state *pins_sleep; 146 const struct vendor_data *vendor; 147 unsigned int dmacr; /* dma control reg */ 148 unsigned int im; /* interrupt mask */ 149 unsigned int old_status; 150 unsigned int fifosize; /* vendor-specific */ 151 unsigned int lcrh_tx; /* vendor-specific */ 152 unsigned int lcrh_rx; /* vendor-specific */ 153 unsigned int old_cr; /* state during shutdown */ 154 bool autorts; 155 char type[12]; 156 #ifdef CONFIG_DMA_ENGINE 157 /* DMA stuff */ 158 bool using_tx_dma; 159 bool using_rx_dma; 160 struct pl011_dmarx_data dmarx; 161 struct pl011_dmatx_data dmatx; 162 #endif 163 }; 164 165 /* 166 * Reads up to 256 characters from the FIFO or until it's empty and 167 * inserts them into the TTY layer. Returns the number of characters 168 * read from the FIFO. 169 */ 170 static int pl011_fifo_to_tty(struct uart_amba_port *uap) 171 { 172 u16 status, ch; 173 unsigned int flag, max_count = 256; 174 int fifotaken = 0; 175 176 while (max_count--) { 177 status = readw(uap->port.membase + UART01x_FR); 178 if (status & UART01x_FR_RXFE) 179 break; 180 181 /* Take chars from the FIFO and update status */ 182 ch = readw(uap->port.membase + UART01x_DR) | 183 UART_DUMMY_DR_RX; 184 flag = TTY_NORMAL; 185 uap->port.icount.rx++; 186 fifotaken++; 187 188 if (unlikely(ch & UART_DR_ERROR)) { 189 if (ch & UART011_DR_BE) { 190 ch &= ~(UART011_DR_FE | UART011_DR_PE); 191 uap->port.icount.brk++; 192 if (uart_handle_break(&uap->port)) 193 continue; 194 } else if (ch & UART011_DR_PE) 195 uap->port.icount.parity++; 196 else if (ch & UART011_DR_FE) 197 uap->port.icount.frame++; 198 if (ch & UART011_DR_OE) 199 uap->port.icount.overrun++; 200 201 ch &= uap->port.read_status_mask; 202 203 if (ch & UART011_DR_BE) 204 flag = TTY_BREAK; 205 else if (ch & UART011_DR_PE) 206 flag = TTY_PARITY; 207 else if (ch & UART011_DR_FE) 208 flag = TTY_FRAME; 209 } 210 211 if (uart_handle_sysrq_char(&uap->port, ch & 255)) 212 continue; 213 214 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); 215 } 216 217 return fifotaken; 218 } 219 220 221 /* 222 * All the DMA operation mode stuff goes inside this ifdef. 223 * This assumes that you have a generic DMA device interface, 224 * no custom DMA interfaces are supported. 225 */ 226 #ifdef CONFIG_DMA_ENGINE 227 228 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE 229 230 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 231 enum dma_data_direction dir) 232 { 233 dma_addr_t dma_addr; 234 235 sg->buf = dma_alloc_coherent(chan->device->dev, 236 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL); 237 if (!sg->buf) 238 return -ENOMEM; 239 240 sg_init_table(&sg->sg, 1); 241 sg_set_page(&sg->sg, phys_to_page(dma_addr), 242 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); 243 sg_dma_address(&sg->sg) = dma_addr; 244 245 return 0; 246 } 247 248 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, 249 enum dma_data_direction dir) 250 { 251 if (sg->buf) { 252 dma_free_coherent(chan->device->dev, 253 PL011_DMA_BUFFER_SIZE, sg->buf, 254 sg_dma_address(&sg->sg)); 255 } 256 } 257 258 static void pl011_dma_probe_initcall(struct uart_amba_port *uap) 259 { 260 /* DMA is the sole user of the platform data right now */ 261 struct amba_pl011_data *plat = uap->port.dev->platform_data; 262 struct dma_slave_config tx_conf = { 263 .dst_addr = uap->port.mapbase + UART01x_DR, 264 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 265 .direction = DMA_MEM_TO_DEV, 266 .dst_maxburst = uap->fifosize >> 1, 267 .device_fc = false, 268 }; 269 struct dma_chan *chan; 270 dma_cap_mask_t mask; 271 272 /* We need platform data */ 273 if (!plat || !plat->dma_filter) { 274 dev_info(uap->port.dev, "no DMA platform data\n"); 275 return; 276 } 277 278 /* Try to acquire a generic DMA engine slave TX channel */ 279 dma_cap_zero(mask); 280 dma_cap_set(DMA_SLAVE, mask); 281 282 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param); 283 if (!chan) { 284 dev_err(uap->port.dev, "no TX DMA channel!\n"); 285 return; 286 } 287 288 dmaengine_slave_config(chan, &tx_conf); 289 uap->dmatx.chan = chan; 290 291 dev_info(uap->port.dev, "DMA channel TX %s\n", 292 dma_chan_name(uap->dmatx.chan)); 293 294 /* Optionally make use of an RX channel as well */ 295 if (plat->dma_rx_param) { 296 struct dma_slave_config rx_conf = { 297 .src_addr = uap->port.mapbase + UART01x_DR, 298 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 299 .direction = DMA_DEV_TO_MEM, 300 .src_maxburst = uap->fifosize >> 1, 301 .device_fc = false, 302 }; 303 304 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 305 if (!chan) { 306 dev_err(uap->port.dev, "no RX DMA channel!\n"); 307 return; 308 } 309 310 dmaengine_slave_config(chan, &rx_conf); 311 uap->dmarx.chan = chan; 312 313 if (plat->dma_rx_poll_enable) { 314 /* Set poll rate if specified. */ 315 if (plat->dma_rx_poll_rate) { 316 uap->dmarx.auto_poll_rate = false; 317 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; 318 } else { 319 /* 320 * 100 ms defaults to poll rate if not 321 * specified. This will be adjusted with 322 * the baud rate at set_termios. 323 */ 324 uap->dmarx.auto_poll_rate = true; 325 uap->dmarx.poll_rate = 100; 326 } 327 /* 3 secs defaults poll_timeout if not specified. */ 328 if (plat->dma_rx_poll_timeout) 329 uap->dmarx.poll_timeout = 330 plat->dma_rx_poll_timeout; 331 else 332 uap->dmarx.poll_timeout = 3000; 333 } else 334 uap->dmarx.auto_poll_rate = false; 335 336 dev_info(uap->port.dev, "DMA channel RX %s\n", 337 dma_chan_name(uap->dmarx.chan)); 338 } 339 } 340 341 #ifndef MODULE 342 /* 343 * Stack up the UARTs and let the above initcall be done at device 344 * initcall time, because the serial driver is called as an arch 345 * initcall, and at this time the DMA subsystem is not yet registered. 346 * At this point the driver will switch over to using DMA where desired. 347 */ 348 struct dma_uap { 349 struct list_head node; 350 struct uart_amba_port *uap; 351 }; 352 353 static LIST_HEAD(pl011_dma_uarts); 354 355 static int __init pl011_dma_initcall(void) 356 { 357 struct list_head *node, *tmp; 358 359 list_for_each_safe(node, tmp, &pl011_dma_uarts) { 360 struct dma_uap *dmau = list_entry(node, struct dma_uap, node); 361 pl011_dma_probe_initcall(dmau->uap); 362 list_del(node); 363 kfree(dmau); 364 } 365 return 0; 366 } 367 368 device_initcall(pl011_dma_initcall); 369 370 static void pl011_dma_probe(struct uart_amba_port *uap) 371 { 372 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL); 373 if (dmau) { 374 dmau->uap = uap; 375 list_add_tail(&dmau->node, &pl011_dma_uarts); 376 } 377 } 378 #else 379 static void pl011_dma_probe(struct uart_amba_port *uap) 380 { 381 pl011_dma_probe_initcall(uap); 382 } 383 #endif 384 385 static void pl011_dma_remove(struct uart_amba_port *uap) 386 { 387 /* TODO: remove the initcall if it has not yet executed */ 388 if (uap->dmatx.chan) 389 dma_release_channel(uap->dmatx.chan); 390 if (uap->dmarx.chan) 391 dma_release_channel(uap->dmarx.chan); 392 } 393 394 /* Forward declare this for the refill routine */ 395 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 396 397 /* 398 * The current DMA TX buffer has been sent. 399 * Try to queue up another DMA buffer. 400 */ 401 static void pl011_dma_tx_callback(void *data) 402 { 403 struct uart_amba_port *uap = data; 404 struct pl011_dmatx_data *dmatx = &uap->dmatx; 405 unsigned long flags; 406 u16 dmacr; 407 408 spin_lock_irqsave(&uap->port.lock, flags); 409 if (uap->dmatx.queued) 410 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, 411 DMA_TO_DEVICE); 412 413 dmacr = uap->dmacr; 414 uap->dmacr = dmacr & ~UART011_TXDMAE; 415 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 416 417 /* 418 * If TX DMA was disabled, it means that we've stopped the DMA for 419 * some reason (eg, XOFF received, or we want to send an X-char.) 420 * 421 * Note: we need to be careful here of a potential race between DMA 422 * and the rest of the driver - if the driver disables TX DMA while 423 * a TX buffer completing, we must update the tx queued status to 424 * get further refills (hence we check dmacr). 425 */ 426 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || 427 uart_circ_empty(&uap->port.state->xmit)) { 428 uap->dmatx.queued = false; 429 spin_unlock_irqrestore(&uap->port.lock, flags); 430 return; 431 } 432 433 if (pl011_dma_tx_refill(uap) <= 0) { 434 /* 435 * We didn't queue a DMA buffer for some reason, but we 436 * have data pending to be sent. Re-enable the TX IRQ. 437 */ 438 uap->im |= UART011_TXIM; 439 writew(uap->im, uap->port.membase + UART011_IMSC); 440 } 441 spin_unlock_irqrestore(&uap->port.lock, flags); 442 } 443 444 /* 445 * Try to refill the TX DMA buffer. 446 * Locking: called with port lock held and IRQs disabled. 447 * Returns: 448 * 1 if we queued up a TX DMA buffer. 449 * 0 if we didn't want to handle this by DMA 450 * <0 on error 451 */ 452 static int pl011_dma_tx_refill(struct uart_amba_port *uap) 453 { 454 struct pl011_dmatx_data *dmatx = &uap->dmatx; 455 struct dma_chan *chan = dmatx->chan; 456 struct dma_device *dma_dev = chan->device; 457 struct dma_async_tx_descriptor *desc; 458 struct circ_buf *xmit = &uap->port.state->xmit; 459 unsigned int count; 460 461 /* 462 * Try to avoid the overhead involved in using DMA if the 463 * transaction fits in the first half of the FIFO, by using 464 * the standard interrupt handling. This ensures that we 465 * issue a uart_write_wakeup() at the appropriate time. 466 */ 467 count = uart_circ_chars_pending(xmit); 468 if (count < (uap->fifosize >> 1)) { 469 uap->dmatx.queued = false; 470 return 0; 471 } 472 473 /* 474 * Bodge: don't send the last character by DMA, as this 475 * will prevent XON from notifying us to restart DMA. 476 */ 477 count -= 1; 478 479 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ 480 if (count > PL011_DMA_BUFFER_SIZE) 481 count = PL011_DMA_BUFFER_SIZE; 482 483 if (xmit->tail < xmit->head) 484 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); 485 else { 486 size_t first = UART_XMIT_SIZE - xmit->tail; 487 size_t second = xmit->head; 488 489 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); 490 if (second) 491 memcpy(&dmatx->buf[first], &xmit->buf[0], second); 492 } 493 494 dmatx->sg.length = count; 495 496 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { 497 uap->dmatx.queued = false; 498 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 499 return -EBUSY; 500 } 501 502 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, 503 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 504 if (!desc) { 505 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 506 uap->dmatx.queued = false; 507 /* 508 * If DMA cannot be used right now, we complete this 509 * transaction via IRQ and let the TTY layer retry. 510 */ 511 dev_dbg(uap->port.dev, "TX DMA busy\n"); 512 return -EBUSY; 513 } 514 515 /* Some data to go along to the callback */ 516 desc->callback = pl011_dma_tx_callback; 517 desc->callback_param = uap; 518 519 /* All errors should happen at prepare time */ 520 dmaengine_submit(desc); 521 522 /* Fire the DMA transaction */ 523 dma_dev->device_issue_pending(chan); 524 525 uap->dmacr |= UART011_TXDMAE; 526 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 527 uap->dmatx.queued = true; 528 529 /* 530 * Now we know that DMA will fire, so advance the ring buffer 531 * with the stuff we just dispatched. 532 */ 533 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 534 uap->port.icount.tx += count; 535 536 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 537 uart_write_wakeup(&uap->port); 538 539 return 1; 540 } 541 542 /* 543 * We received a transmit interrupt without a pending X-char but with 544 * pending characters. 545 * Locking: called with port lock held and IRQs disabled. 546 * Returns: 547 * false if we want to use PIO to transmit 548 * true if we queued a DMA buffer 549 */ 550 static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 551 { 552 if (!uap->using_tx_dma) 553 return false; 554 555 /* 556 * If we already have a TX buffer queued, but received a 557 * TX interrupt, it will be because we've just sent an X-char. 558 * Ensure the TX DMA is enabled and the TX IRQ is disabled. 559 */ 560 if (uap->dmatx.queued) { 561 uap->dmacr |= UART011_TXDMAE; 562 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 563 uap->im &= ~UART011_TXIM; 564 writew(uap->im, uap->port.membase + UART011_IMSC); 565 return true; 566 } 567 568 /* 569 * We don't have a TX buffer queued, so try to queue one. 570 * If we successfully queued a buffer, mask the TX IRQ. 571 */ 572 if (pl011_dma_tx_refill(uap) > 0) { 573 uap->im &= ~UART011_TXIM; 574 writew(uap->im, uap->port.membase + UART011_IMSC); 575 return true; 576 } 577 return false; 578 } 579 580 /* 581 * Stop the DMA transmit (eg, due to received XOFF). 582 * Locking: called with port lock held and IRQs disabled. 583 */ 584 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 585 { 586 if (uap->dmatx.queued) { 587 uap->dmacr &= ~UART011_TXDMAE; 588 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 589 } 590 } 591 592 /* 593 * Try to start a DMA transmit, or in the case of an XON/OFF 594 * character queued for send, try to get that character out ASAP. 595 * Locking: called with port lock held and IRQs disabled. 596 * Returns: 597 * false if we want the TX IRQ to be enabled 598 * true if we have a buffer queued 599 */ 600 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 601 { 602 u16 dmacr; 603 604 if (!uap->using_tx_dma) 605 return false; 606 607 if (!uap->port.x_char) { 608 /* no X-char, try to push chars out in DMA mode */ 609 bool ret = true; 610 611 if (!uap->dmatx.queued) { 612 if (pl011_dma_tx_refill(uap) > 0) { 613 uap->im &= ~UART011_TXIM; 614 ret = true; 615 } else { 616 uap->im |= UART011_TXIM; 617 ret = false; 618 } 619 writew(uap->im, uap->port.membase + UART011_IMSC); 620 } else if (!(uap->dmacr & UART011_TXDMAE)) { 621 uap->dmacr |= UART011_TXDMAE; 622 writew(uap->dmacr, 623 uap->port.membase + UART011_DMACR); 624 } 625 return ret; 626 } 627 628 /* 629 * We have an X-char to send. Disable DMA to prevent it loading 630 * the TX fifo, and then see if we can stuff it into the FIFO. 631 */ 632 dmacr = uap->dmacr; 633 uap->dmacr &= ~UART011_TXDMAE; 634 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 635 636 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) { 637 /* 638 * No space in the FIFO, so enable the transmit interrupt 639 * so we know when there is space. Note that once we've 640 * loaded the character, we should just re-enable DMA. 641 */ 642 return false; 643 } 644 645 writew(uap->port.x_char, uap->port.membase + UART01x_DR); 646 uap->port.icount.tx++; 647 uap->port.x_char = 0; 648 649 /* Success - restore the DMA state */ 650 uap->dmacr = dmacr; 651 writew(dmacr, uap->port.membase + UART011_DMACR); 652 653 return true; 654 } 655 656 /* 657 * Flush the transmit buffer. 658 * Locking: called with port lock held and IRQs disabled. 659 */ 660 static void pl011_dma_flush_buffer(struct uart_port *port) 661 { 662 struct uart_amba_port *uap = (struct uart_amba_port *)port; 663 664 if (!uap->using_tx_dma) 665 return; 666 667 /* Avoid deadlock with the DMA engine callback */ 668 spin_unlock(&uap->port.lock); 669 dmaengine_terminate_all(uap->dmatx.chan); 670 spin_lock(&uap->port.lock); 671 if (uap->dmatx.queued) { 672 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 673 DMA_TO_DEVICE); 674 uap->dmatx.queued = false; 675 uap->dmacr &= ~UART011_TXDMAE; 676 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 677 } 678 } 679 680 static void pl011_dma_rx_callback(void *data); 681 682 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 683 { 684 struct dma_chan *rxchan = uap->dmarx.chan; 685 struct pl011_dmarx_data *dmarx = &uap->dmarx; 686 struct dma_async_tx_descriptor *desc; 687 struct pl011_sgbuf *sgbuf; 688 689 if (!rxchan) 690 return -EIO; 691 692 /* Start the RX DMA job */ 693 sgbuf = uap->dmarx.use_buf_b ? 694 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 695 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, 696 DMA_DEV_TO_MEM, 697 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 698 /* 699 * If the DMA engine is busy and cannot prepare a 700 * channel, no big deal, the driver will fall back 701 * to interrupt mode as a result of this error code. 702 */ 703 if (!desc) { 704 uap->dmarx.running = false; 705 dmaengine_terminate_all(rxchan); 706 return -EBUSY; 707 } 708 709 /* Some data to go along to the callback */ 710 desc->callback = pl011_dma_rx_callback; 711 desc->callback_param = uap; 712 dmarx->cookie = dmaengine_submit(desc); 713 dma_async_issue_pending(rxchan); 714 715 uap->dmacr |= UART011_RXDMAE; 716 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 717 uap->dmarx.running = true; 718 719 uap->im &= ~UART011_RXIM; 720 writew(uap->im, uap->port.membase + UART011_IMSC); 721 722 return 0; 723 } 724 725 /* 726 * This is called when either the DMA job is complete, or 727 * the FIFO timeout interrupt occurred. This must be called 728 * with the port spinlock uap->port.lock held. 729 */ 730 static void pl011_dma_rx_chars(struct uart_amba_port *uap, 731 u32 pending, bool use_buf_b, 732 bool readfifo) 733 { 734 struct tty_port *port = &uap->port.state->port; 735 struct pl011_sgbuf *sgbuf = use_buf_b ? 736 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 737 int dma_count = 0; 738 u32 fifotaken = 0; /* only used for vdbg() */ 739 740 struct pl011_dmarx_data *dmarx = &uap->dmarx; 741 int dmataken = 0; 742 743 if (uap->dmarx.poll_rate) { 744 /* The data can be taken by polling */ 745 dmataken = sgbuf->sg.length - dmarx->last_residue; 746 /* Recalculate the pending size */ 747 if (pending >= dmataken) 748 pending -= dmataken; 749 } 750 751 /* Pick the remain data from the DMA */ 752 if (pending) { 753 754 /* 755 * First take all chars in the DMA pipe, then look in the FIFO. 756 * Note that tty_insert_flip_buf() tries to take as many chars 757 * as it can. 758 */ 759 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 760 pending); 761 762 uap->port.icount.rx += dma_count; 763 if (dma_count < pending) 764 dev_warn(uap->port.dev, 765 "couldn't insert all characters (TTY is full?)\n"); 766 } 767 768 /* Reset the last_residue for Rx DMA poll */ 769 if (uap->dmarx.poll_rate) 770 dmarx->last_residue = sgbuf->sg.length; 771 772 /* 773 * Only continue with trying to read the FIFO if all DMA chars have 774 * been taken first. 775 */ 776 if (dma_count == pending && readfifo) { 777 /* Clear any error flags */ 778 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, 779 uap->port.membase + UART011_ICR); 780 781 /* 782 * If we read all the DMA'd characters, and we had an 783 * incomplete buffer, that could be due to an rx error, or 784 * maybe we just timed out. Read any pending chars and check 785 * the error status. 786 * 787 * Error conditions will only occur in the FIFO, these will 788 * trigger an immediate interrupt and stop the DMA job, so we 789 * will always find the error in the FIFO, never in the DMA 790 * buffer. 791 */ 792 fifotaken = pl011_fifo_to_tty(uap); 793 } 794 795 spin_unlock(&uap->port.lock); 796 dev_vdbg(uap->port.dev, 797 "Took %d chars from DMA buffer and %d chars from the FIFO\n", 798 dma_count, fifotaken); 799 tty_flip_buffer_push(port); 800 spin_lock(&uap->port.lock); 801 } 802 803 static void pl011_dma_rx_irq(struct uart_amba_port *uap) 804 { 805 struct pl011_dmarx_data *dmarx = &uap->dmarx; 806 struct dma_chan *rxchan = dmarx->chan; 807 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 808 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 809 size_t pending; 810 struct dma_tx_state state; 811 enum dma_status dmastat; 812 813 /* 814 * Pause the transfer so we can trust the current counter, 815 * do this before we pause the PL011 block, else we may 816 * overflow the FIFO. 817 */ 818 if (dmaengine_pause(rxchan)) 819 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 820 dmastat = rxchan->device->device_tx_status(rxchan, 821 dmarx->cookie, &state); 822 if (dmastat != DMA_PAUSED) 823 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 824 825 /* Disable RX DMA - incoming data will wait in the FIFO */ 826 uap->dmacr &= ~UART011_RXDMAE; 827 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 828 uap->dmarx.running = false; 829 830 pending = sgbuf->sg.length - state.residue; 831 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 832 /* Then we terminate the transfer - we now know our residue */ 833 dmaengine_terminate_all(rxchan); 834 835 /* 836 * This will take the chars we have so far and insert 837 * into the framework. 838 */ 839 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); 840 841 /* Switch buffer & re-trigger DMA job */ 842 dmarx->use_buf_b = !dmarx->use_buf_b; 843 if (pl011_dma_rx_trigger_dma(uap)) { 844 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 845 "fall back to interrupt mode\n"); 846 uap->im |= UART011_RXIM; 847 writew(uap->im, uap->port.membase + UART011_IMSC); 848 } 849 } 850 851 static void pl011_dma_rx_callback(void *data) 852 { 853 struct uart_amba_port *uap = data; 854 struct pl011_dmarx_data *dmarx = &uap->dmarx; 855 struct dma_chan *rxchan = dmarx->chan; 856 bool lastbuf = dmarx->use_buf_b; 857 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 858 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 859 size_t pending; 860 struct dma_tx_state state; 861 int ret; 862 863 /* 864 * This completion interrupt occurs typically when the 865 * RX buffer is totally stuffed but no timeout has yet 866 * occurred. When that happens, we just want the RX 867 * routine to flush out the secondary DMA buffer while 868 * we immediately trigger the next DMA job. 869 */ 870 spin_lock_irq(&uap->port.lock); 871 /* 872 * Rx data can be taken by the UART interrupts during 873 * the DMA irq handler. So we check the residue here. 874 */ 875 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 876 pending = sgbuf->sg.length - state.residue; 877 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 878 /* Then we terminate the transfer - we now know our residue */ 879 dmaengine_terminate_all(rxchan); 880 881 uap->dmarx.running = false; 882 dmarx->use_buf_b = !lastbuf; 883 ret = pl011_dma_rx_trigger_dma(uap); 884 885 pl011_dma_rx_chars(uap, pending, lastbuf, false); 886 spin_unlock_irq(&uap->port.lock); 887 /* 888 * Do this check after we picked the DMA chars so we don't 889 * get some IRQ immediately from RX. 890 */ 891 if (ret) { 892 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 893 "fall back to interrupt mode\n"); 894 uap->im |= UART011_RXIM; 895 writew(uap->im, uap->port.membase + UART011_IMSC); 896 } 897 } 898 899 /* 900 * Stop accepting received characters, when we're shutting down or 901 * suspending this port. 902 * Locking: called with port lock held and IRQs disabled. 903 */ 904 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 905 { 906 /* FIXME. Just disable the DMA enable */ 907 uap->dmacr &= ~UART011_RXDMAE; 908 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 909 } 910 911 /* 912 * Timer handler for Rx DMA polling. 913 * Every polling, It checks the residue in the dma buffer and transfer 914 * data to the tty. Also, last_residue is updated for the next polling. 915 */ 916 static void pl011_dma_rx_poll(unsigned long args) 917 { 918 struct uart_amba_port *uap = (struct uart_amba_port *)args; 919 struct tty_port *port = &uap->port.state->port; 920 struct pl011_dmarx_data *dmarx = &uap->dmarx; 921 struct dma_chan *rxchan = uap->dmarx.chan; 922 unsigned long flags = 0; 923 unsigned int dmataken = 0; 924 unsigned int size = 0; 925 struct pl011_sgbuf *sgbuf; 926 int dma_count; 927 struct dma_tx_state state; 928 929 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 930 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 931 if (likely(state.residue < dmarx->last_residue)) { 932 dmataken = sgbuf->sg.length - dmarx->last_residue; 933 size = dmarx->last_residue - state.residue; 934 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 935 size); 936 if (dma_count == size) 937 dmarx->last_residue = state.residue; 938 dmarx->last_jiffies = jiffies; 939 } 940 tty_flip_buffer_push(port); 941 942 /* 943 * If no data is received in poll_timeout, the driver will fall back 944 * to interrupt mode. We will retrigger DMA at the first interrupt. 945 */ 946 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) 947 > uap->dmarx.poll_timeout) { 948 949 spin_lock_irqsave(&uap->port.lock, flags); 950 pl011_dma_rx_stop(uap); 951 spin_unlock_irqrestore(&uap->port.lock, flags); 952 953 uap->dmarx.running = false; 954 dmaengine_terminate_all(rxchan); 955 del_timer(&uap->dmarx.timer); 956 } else { 957 mod_timer(&uap->dmarx.timer, 958 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 959 } 960 } 961 962 static void pl011_dma_startup(struct uart_amba_port *uap) 963 { 964 int ret; 965 966 if (!uap->dmatx.chan) 967 return; 968 969 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); 970 if (!uap->dmatx.buf) { 971 dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); 972 uap->port.fifosize = uap->fifosize; 973 return; 974 } 975 976 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); 977 978 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 979 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 980 uap->using_tx_dma = true; 981 982 if (!uap->dmarx.chan) 983 goto skip_rx; 984 985 /* Allocate and map DMA RX buffers */ 986 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 987 DMA_FROM_DEVICE); 988 if (ret) { 989 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 990 "RX buffer A", ret); 991 goto skip_rx; 992 } 993 994 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, 995 DMA_FROM_DEVICE); 996 if (ret) { 997 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 998 "RX buffer B", ret); 999 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 1000 DMA_FROM_DEVICE); 1001 goto skip_rx; 1002 } 1003 1004 uap->using_rx_dma = true; 1005 1006 skip_rx: 1007 /* Turn on DMA error (RX/TX will be enabled on demand) */ 1008 uap->dmacr |= UART011_DMAONERR; 1009 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 1010 1011 /* 1012 * ST Micro variants has some specific dma burst threshold 1013 * compensation. Set this to 16 bytes, so burst will only 1014 * be issued above/below 16 bytes. 1015 */ 1016 if (uap->vendor->dma_threshold) 1017 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 1018 uap->port.membase + ST_UART011_DMAWM); 1019 1020 if (uap->using_rx_dma) { 1021 if (pl011_dma_rx_trigger_dma(uap)) 1022 dev_dbg(uap->port.dev, "could not trigger initial " 1023 "RX DMA job, fall back to interrupt mode\n"); 1024 if (uap->dmarx.poll_rate) { 1025 init_timer(&(uap->dmarx.timer)); 1026 uap->dmarx.timer.function = pl011_dma_rx_poll; 1027 uap->dmarx.timer.data = (unsigned long)uap; 1028 mod_timer(&uap->dmarx.timer, 1029 jiffies + 1030 msecs_to_jiffies(uap->dmarx.poll_rate)); 1031 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1032 uap->dmarx.last_jiffies = jiffies; 1033 } 1034 } 1035 } 1036 1037 static void pl011_dma_shutdown(struct uart_amba_port *uap) 1038 { 1039 if (!(uap->using_tx_dma || uap->using_rx_dma)) 1040 return; 1041 1042 /* Disable RX and TX DMA */ 1043 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1044 barrier(); 1045 1046 spin_lock_irq(&uap->port.lock); 1047 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); 1048 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 1049 spin_unlock_irq(&uap->port.lock); 1050 1051 if (uap->using_tx_dma) { 1052 /* In theory, this should already be done by pl011_dma_flush_buffer */ 1053 dmaengine_terminate_all(uap->dmatx.chan); 1054 if (uap->dmatx.queued) { 1055 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 1056 DMA_TO_DEVICE); 1057 uap->dmatx.queued = false; 1058 } 1059 1060 kfree(uap->dmatx.buf); 1061 uap->using_tx_dma = false; 1062 } 1063 1064 if (uap->using_rx_dma) { 1065 dmaengine_terminate_all(uap->dmarx.chan); 1066 /* Clean up the RX DMA */ 1067 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 1068 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); 1069 if (uap->dmarx.poll_rate) 1070 del_timer_sync(&uap->dmarx.timer); 1071 uap->using_rx_dma = false; 1072 } 1073 } 1074 1075 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1076 { 1077 return uap->using_rx_dma; 1078 } 1079 1080 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1081 { 1082 return uap->using_rx_dma && uap->dmarx.running; 1083 } 1084 1085 #else 1086 /* Blank functions if the DMA engine is not available */ 1087 static inline void pl011_dma_probe(struct uart_amba_port *uap) 1088 { 1089 } 1090 1091 static inline void pl011_dma_remove(struct uart_amba_port *uap) 1092 { 1093 } 1094 1095 static inline void pl011_dma_startup(struct uart_amba_port *uap) 1096 { 1097 } 1098 1099 static inline void pl011_dma_shutdown(struct uart_amba_port *uap) 1100 { 1101 } 1102 1103 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) 1104 { 1105 return false; 1106 } 1107 1108 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 1109 { 1110 } 1111 1112 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 1113 { 1114 return false; 1115 } 1116 1117 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) 1118 { 1119 } 1120 1121 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1122 { 1123 } 1124 1125 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 1126 { 1127 return -EIO; 1128 } 1129 1130 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1131 { 1132 return false; 1133 } 1134 1135 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1136 { 1137 return false; 1138 } 1139 1140 #define pl011_dma_flush_buffer NULL 1141 #endif 1142 1143 static void pl011_stop_tx(struct uart_port *port) 1144 { 1145 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1146 1147 uap->im &= ~UART011_TXIM; 1148 writew(uap->im, uap->port.membase + UART011_IMSC); 1149 pl011_dma_tx_stop(uap); 1150 } 1151 1152 static void pl011_start_tx(struct uart_port *port) 1153 { 1154 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1155 1156 if (!pl011_dma_tx_start(uap)) { 1157 uap->im |= UART011_TXIM; 1158 writew(uap->im, uap->port.membase + UART011_IMSC); 1159 } 1160 } 1161 1162 static void pl011_stop_rx(struct uart_port *port) 1163 { 1164 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1165 1166 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| 1167 UART011_PEIM|UART011_BEIM|UART011_OEIM); 1168 writew(uap->im, uap->port.membase + UART011_IMSC); 1169 1170 pl011_dma_rx_stop(uap); 1171 } 1172 1173 static void pl011_enable_ms(struct uart_port *port) 1174 { 1175 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1176 1177 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; 1178 writew(uap->im, uap->port.membase + UART011_IMSC); 1179 } 1180 1181 static void pl011_rx_chars(struct uart_amba_port *uap) 1182 { 1183 pl011_fifo_to_tty(uap); 1184 1185 spin_unlock(&uap->port.lock); 1186 tty_flip_buffer_push(&uap->port.state->port); 1187 /* 1188 * If we were temporarily out of DMA mode for a while, 1189 * attempt to switch back to DMA mode again. 1190 */ 1191 if (pl011_dma_rx_available(uap)) { 1192 if (pl011_dma_rx_trigger_dma(uap)) { 1193 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1194 "fall back to interrupt mode again\n"); 1195 uap->im |= UART011_RXIM; 1196 } else { 1197 uap->im &= ~UART011_RXIM; 1198 /* Start Rx DMA poll */ 1199 if (uap->dmarx.poll_rate) { 1200 uap->dmarx.last_jiffies = jiffies; 1201 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1202 mod_timer(&uap->dmarx.timer, 1203 jiffies + 1204 msecs_to_jiffies(uap->dmarx.poll_rate)); 1205 } 1206 } 1207 1208 writew(uap->im, uap->port.membase + UART011_IMSC); 1209 } 1210 spin_lock(&uap->port.lock); 1211 } 1212 1213 static void pl011_tx_chars(struct uart_amba_port *uap) 1214 { 1215 struct circ_buf *xmit = &uap->port.state->xmit; 1216 int count; 1217 1218 if (uap->port.x_char) { 1219 writew(uap->port.x_char, uap->port.membase + UART01x_DR); 1220 uap->port.icount.tx++; 1221 uap->port.x_char = 0; 1222 return; 1223 } 1224 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { 1225 pl011_stop_tx(&uap->port); 1226 return; 1227 } 1228 1229 /* If we are using DMA mode, try to send some characters. */ 1230 if (pl011_dma_tx_irq(uap)) 1231 return; 1232 1233 count = uap->fifosize >> 1; 1234 do { 1235 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); 1236 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 1237 uap->port.icount.tx++; 1238 if (uart_circ_empty(xmit)) 1239 break; 1240 } while (--count > 0); 1241 1242 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1243 uart_write_wakeup(&uap->port); 1244 1245 if (uart_circ_empty(xmit)) 1246 pl011_stop_tx(&uap->port); 1247 } 1248 1249 static void pl011_modem_status(struct uart_amba_port *uap) 1250 { 1251 unsigned int status, delta; 1252 1253 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1254 1255 delta = status ^ uap->old_status; 1256 uap->old_status = status; 1257 1258 if (!delta) 1259 return; 1260 1261 if (delta & UART01x_FR_DCD) 1262 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); 1263 1264 if (delta & UART01x_FR_DSR) 1265 uap->port.icount.dsr++; 1266 1267 if (delta & UART01x_FR_CTS) 1268 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); 1269 1270 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1271 } 1272 1273 static irqreturn_t pl011_int(int irq, void *dev_id) 1274 { 1275 struct uart_amba_port *uap = dev_id; 1276 unsigned long flags; 1277 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1278 int handled = 0; 1279 unsigned int dummy_read; 1280 1281 spin_lock_irqsave(&uap->port.lock, flags); 1282 status = readw(uap->port.membase + UART011_MIS); 1283 if (status) { 1284 do { 1285 if (uap->vendor->cts_event_workaround) { 1286 /* workaround to make sure that all bits are unlocked.. */ 1287 writew(0x00, uap->port.membase + UART011_ICR); 1288 1289 /* 1290 * WA: introduce 26ns(1 uart clk) delay before W1C; 1291 * single apb access will incur 2 pclk(133.12Mhz) delay, 1292 * so add 2 dummy reads 1293 */ 1294 dummy_read = readw(uap->port.membase + UART011_ICR); 1295 dummy_read = readw(uap->port.membase + UART011_ICR); 1296 } 1297 1298 writew(status & ~(UART011_TXIS|UART011_RTIS| 1299 UART011_RXIS), 1300 uap->port.membase + UART011_ICR); 1301 1302 if (status & (UART011_RTIS|UART011_RXIS)) { 1303 if (pl011_dma_rx_running(uap)) 1304 pl011_dma_rx_irq(uap); 1305 else 1306 pl011_rx_chars(uap); 1307 } 1308 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1309 UART011_CTSMIS|UART011_RIMIS)) 1310 pl011_modem_status(uap); 1311 if (status & UART011_TXIS) 1312 pl011_tx_chars(uap); 1313 1314 if (pass_counter-- == 0) 1315 break; 1316 1317 status = readw(uap->port.membase + UART011_MIS); 1318 } while (status != 0); 1319 handled = 1; 1320 } 1321 1322 spin_unlock_irqrestore(&uap->port.lock, flags); 1323 1324 return IRQ_RETVAL(handled); 1325 } 1326 1327 static unsigned int pl011_tx_empty(struct uart_port *port) 1328 { 1329 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1330 unsigned int status = readw(uap->port.membase + UART01x_FR); 1331 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT; 1332 } 1333 1334 static unsigned int pl011_get_mctrl(struct uart_port *port) 1335 { 1336 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1337 unsigned int result = 0; 1338 unsigned int status = readw(uap->port.membase + UART01x_FR); 1339 1340 #define TIOCMBIT(uartbit, tiocmbit) \ 1341 if (status & uartbit) \ 1342 result |= tiocmbit 1343 1344 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR); 1345 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR); 1346 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS); 1347 TIOCMBIT(UART011_FR_RI, TIOCM_RNG); 1348 #undef TIOCMBIT 1349 return result; 1350 } 1351 1352 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) 1353 { 1354 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1355 unsigned int cr; 1356 1357 cr = readw(uap->port.membase + UART011_CR); 1358 1359 #define TIOCMBIT(tiocmbit, uartbit) \ 1360 if (mctrl & tiocmbit) \ 1361 cr |= uartbit; \ 1362 else \ 1363 cr &= ~uartbit 1364 1365 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS); 1366 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR); 1367 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1); 1368 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2); 1369 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE); 1370 1371 if (uap->autorts) { 1372 /* We need to disable auto-RTS if we want to turn RTS off */ 1373 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN); 1374 } 1375 #undef TIOCMBIT 1376 1377 writew(cr, uap->port.membase + UART011_CR); 1378 } 1379 1380 static void pl011_break_ctl(struct uart_port *port, int break_state) 1381 { 1382 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1383 unsigned long flags; 1384 unsigned int lcr_h; 1385 1386 spin_lock_irqsave(&uap->port.lock, flags); 1387 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 1388 if (break_state == -1) 1389 lcr_h |= UART01x_LCRH_BRK; 1390 else 1391 lcr_h &= ~UART01x_LCRH_BRK; 1392 writew(lcr_h, uap->port.membase + uap->lcrh_tx); 1393 spin_unlock_irqrestore(&uap->port.lock, flags); 1394 } 1395 1396 #ifdef CONFIG_CONSOLE_POLL 1397 1398 static void pl011_quiesce_irqs(struct uart_port *port) 1399 { 1400 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1401 unsigned char __iomem *regs = uap->port.membase; 1402 1403 writew(readw(regs + UART011_MIS), regs + UART011_ICR); 1404 /* 1405 * There is no way to clear TXIM as this is "ready to transmit IRQ", so 1406 * we simply mask it. start_tx() will unmask it. 1407 * 1408 * Note we can race with start_tx(), and if the race happens, the 1409 * polling user might get another interrupt just after we clear it. 1410 * But it should be OK and can happen even w/o the race, e.g. 1411 * controller immediately got some new data and raised the IRQ. 1412 * 1413 * And whoever uses polling routines assumes that it manages the device 1414 * (including tx queue), so we're also fine with start_tx()'s caller 1415 * side. 1416 */ 1417 writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC); 1418 } 1419 1420 static int pl011_get_poll_char(struct uart_port *port) 1421 { 1422 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1423 unsigned int status; 1424 1425 /* 1426 * The caller might need IRQs lowered, e.g. if used with KDB NMI 1427 * debugger. 1428 */ 1429 pl011_quiesce_irqs(port); 1430 1431 status = readw(uap->port.membase + UART01x_FR); 1432 if (status & UART01x_FR_RXFE) 1433 return NO_POLL_CHAR; 1434 1435 return readw(uap->port.membase + UART01x_DR); 1436 } 1437 1438 static void pl011_put_poll_char(struct uart_port *port, 1439 unsigned char ch) 1440 { 1441 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1442 1443 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1444 barrier(); 1445 1446 writew(ch, uap->port.membase + UART01x_DR); 1447 } 1448 1449 #endif /* CONFIG_CONSOLE_POLL */ 1450 1451 static int pl011_hwinit(struct uart_port *port) 1452 { 1453 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1454 int retval; 1455 1456 /* Optionaly enable pins to be muxed in and configured */ 1457 if (!IS_ERR(uap->pins_default)) { 1458 retval = pinctrl_select_state(uap->pinctrl, uap->pins_default); 1459 if (retval) 1460 dev_err(port->dev, 1461 "could not set default pins\n"); 1462 } 1463 1464 /* 1465 * Try to enable the clock producer. 1466 */ 1467 retval = clk_prepare_enable(uap->clk); 1468 if (retval) 1469 goto out; 1470 1471 uap->port.uartclk = clk_get_rate(uap->clk); 1472 1473 /* Clear pending error and receive interrupts */ 1474 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS | 1475 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR); 1476 1477 /* 1478 * Save interrupts enable mask, and enable RX interrupts in case if 1479 * the interrupt is used for NMI entry. 1480 */ 1481 uap->im = readw(uap->port.membase + UART011_IMSC); 1482 writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC); 1483 1484 if (uap->port.dev->platform_data) { 1485 struct amba_pl011_data *plat; 1486 1487 plat = uap->port.dev->platform_data; 1488 if (plat->init) 1489 plat->init(); 1490 } 1491 return 0; 1492 out: 1493 return retval; 1494 } 1495 1496 static int pl011_startup(struct uart_port *port) 1497 { 1498 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1499 unsigned int cr; 1500 int retval; 1501 1502 retval = pl011_hwinit(port); 1503 if (retval) 1504 goto clk_dis; 1505 1506 writew(uap->im, uap->port.membase + UART011_IMSC); 1507 1508 /* 1509 * Allocate the IRQ 1510 */ 1511 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); 1512 if (retval) 1513 goto clk_dis; 1514 1515 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); 1516 1517 /* 1518 * Provoke TX FIFO interrupt into asserting. 1519 */ 1520 cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; 1521 writew(cr, uap->port.membase + UART011_CR); 1522 writew(0, uap->port.membase + UART011_FBRD); 1523 writew(1, uap->port.membase + UART011_IBRD); 1524 writew(0, uap->port.membase + uap->lcrh_rx); 1525 if (uap->lcrh_tx != uap->lcrh_rx) { 1526 int i; 1527 /* 1528 * Wait 10 PCLKs before writing LCRH_TX register, 1529 * to get this delay write read only register 10 times 1530 */ 1531 for (i = 0; i < 10; ++i) 1532 writew(0xff, uap->port.membase + UART011_MIS); 1533 writew(0, uap->port.membase + uap->lcrh_tx); 1534 } 1535 writew(0, uap->port.membase + UART01x_DR); 1536 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1537 barrier(); 1538 1539 /* restore RTS and DTR */ 1540 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); 1541 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; 1542 writew(cr, uap->port.membase + UART011_CR); 1543 1544 /* 1545 * initialise the old status of the modem signals 1546 */ 1547 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1548 1549 /* Startup DMA */ 1550 pl011_dma_startup(uap); 1551 1552 /* 1553 * Finally, enable interrupts, only timeouts when using DMA 1554 * if initial RX DMA job failed, start in interrupt mode 1555 * as well. 1556 */ 1557 spin_lock_irq(&uap->port.lock); 1558 /* Clear out any spuriously appearing RX interrupts */ 1559 writew(UART011_RTIS | UART011_RXIS, 1560 uap->port.membase + UART011_ICR); 1561 uap->im = UART011_RTIM; 1562 if (!pl011_dma_rx_running(uap)) 1563 uap->im |= UART011_RXIM; 1564 writew(uap->im, uap->port.membase + UART011_IMSC); 1565 spin_unlock_irq(&uap->port.lock); 1566 1567 return 0; 1568 1569 clk_dis: 1570 clk_disable_unprepare(uap->clk); 1571 return retval; 1572 } 1573 1574 static void pl011_shutdown_channel(struct uart_amba_port *uap, 1575 unsigned int lcrh) 1576 { 1577 unsigned long val; 1578 1579 val = readw(uap->port.membase + lcrh); 1580 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1581 writew(val, uap->port.membase + lcrh); 1582 } 1583 1584 static void pl011_shutdown(struct uart_port *port) 1585 { 1586 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1587 unsigned int cr; 1588 int retval; 1589 1590 /* 1591 * disable all interrupts 1592 */ 1593 spin_lock_irq(&uap->port.lock); 1594 uap->im = 0; 1595 writew(uap->im, uap->port.membase + UART011_IMSC); 1596 writew(0xffff, uap->port.membase + UART011_ICR); 1597 spin_unlock_irq(&uap->port.lock); 1598 1599 pl011_dma_shutdown(uap); 1600 1601 /* 1602 * Free the interrupt 1603 */ 1604 free_irq(uap->port.irq, uap); 1605 1606 /* 1607 * disable the port 1608 * disable the port. It should not disable RTS and DTR. 1609 * Also RTS and DTR state should be preserved to restore 1610 * it during startup(). 1611 */ 1612 uap->autorts = false; 1613 cr = readw(uap->port.membase + UART011_CR); 1614 uap->old_cr = cr; 1615 cr &= UART011_CR_RTS | UART011_CR_DTR; 1616 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1617 writew(cr, uap->port.membase + UART011_CR); 1618 1619 /* 1620 * disable break condition and fifos 1621 */ 1622 pl011_shutdown_channel(uap, uap->lcrh_rx); 1623 if (uap->lcrh_rx != uap->lcrh_tx) 1624 pl011_shutdown_channel(uap, uap->lcrh_tx); 1625 1626 /* 1627 * Shut down the clock producer 1628 */ 1629 clk_disable_unprepare(uap->clk); 1630 /* Optionally let pins go into sleep states */ 1631 if (!IS_ERR(uap->pins_sleep)) { 1632 retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep); 1633 if (retval) 1634 dev_err(port->dev, 1635 "could not set pins to sleep state\n"); 1636 } 1637 1638 1639 if (uap->port.dev->platform_data) { 1640 struct amba_pl011_data *plat; 1641 1642 plat = uap->port.dev->platform_data; 1643 if (plat->exit) 1644 plat->exit(); 1645 } 1646 1647 } 1648 1649 static void 1650 pl011_set_termios(struct uart_port *port, struct ktermios *termios, 1651 struct ktermios *old) 1652 { 1653 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1654 unsigned int lcr_h, old_cr; 1655 unsigned long flags; 1656 unsigned int baud, quot, clkdiv; 1657 1658 if (uap->vendor->oversampling) 1659 clkdiv = 8; 1660 else 1661 clkdiv = 16; 1662 1663 /* 1664 * Ask the core to calculate the divisor for us. 1665 */ 1666 baud = uart_get_baud_rate(port, termios, old, 0, 1667 port->uartclk / clkdiv); 1668 /* 1669 * Adjust RX DMA polling rate with baud rate if not specified. 1670 */ 1671 if (uap->dmarx.auto_poll_rate) 1672 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); 1673 1674 if (baud > port->uartclk/16) 1675 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1676 else 1677 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 1678 1679 switch (termios->c_cflag & CSIZE) { 1680 case CS5: 1681 lcr_h = UART01x_LCRH_WLEN_5; 1682 break; 1683 case CS6: 1684 lcr_h = UART01x_LCRH_WLEN_6; 1685 break; 1686 case CS7: 1687 lcr_h = UART01x_LCRH_WLEN_7; 1688 break; 1689 default: // CS8 1690 lcr_h = UART01x_LCRH_WLEN_8; 1691 break; 1692 } 1693 if (termios->c_cflag & CSTOPB) 1694 lcr_h |= UART01x_LCRH_STP2; 1695 if (termios->c_cflag & PARENB) { 1696 lcr_h |= UART01x_LCRH_PEN; 1697 if (!(termios->c_cflag & PARODD)) 1698 lcr_h |= UART01x_LCRH_EPS; 1699 } 1700 if (uap->fifosize > 1) 1701 lcr_h |= UART01x_LCRH_FEN; 1702 1703 spin_lock_irqsave(&port->lock, flags); 1704 1705 /* 1706 * Update the per-port timeout. 1707 */ 1708 uart_update_timeout(port, termios->c_cflag, baud); 1709 1710 port->read_status_mask = UART011_DR_OE | 255; 1711 if (termios->c_iflag & INPCK) 1712 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 1713 if (termios->c_iflag & (BRKINT | PARMRK)) 1714 port->read_status_mask |= UART011_DR_BE; 1715 1716 /* 1717 * Characters to ignore 1718 */ 1719 port->ignore_status_mask = 0; 1720 if (termios->c_iflag & IGNPAR) 1721 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; 1722 if (termios->c_iflag & IGNBRK) { 1723 port->ignore_status_mask |= UART011_DR_BE; 1724 /* 1725 * If we're ignoring parity and break indicators, 1726 * ignore overruns too (for real raw support). 1727 */ 1728 if (termios->c_iflag & IGNPAR) 1729 port->ignore_status_mask |= UART011_DR_OE; 1730 } 1731 1732 /* 1733 * Ignore all characters if CREAD is not set. 1734 */ 1735 if ((termios->c_cflag & CREAD) == 0) 1736 port->ignore_status_mask |= UART_DUMMY_DR_RX; 1737 1738 if (UART_ENABLE_MS(port, termios->c_cflag)) 1739 pl011_enable_ms(port); 1740 1741 /* first, disable everything */ 1742 old_cr = readw(port->membase + UART011_CR); 1743 writew(0, port->membase + UART011_CR); 1744 1745 if (termios->c_cflag & CRTSCTS) { 1746 if (old_cr & UART011_CR_RTS) 1747 old_cr |= UART011_CR_RTSEN; 1748 1749 old_cr |= UART011_CR_CTSEN; 1750 uap->autorts = true; 1751 } else { 1752 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); 1753 uap->autorts = false; 1754 } 1755 1756 if (uap->vendor->oversampling) { 1757 if (baud > port->uartclk / 16) 1758 old_cr |= ST_UART011_CR_OVSFACT; 1759 else 1760 old_cr &= ~ST_UART011_CR_OVSFACT; 1761 } 1762 1763 /* 1764 * Workaround for the ST Micro oversampling variants to 1765 * increase the bitrate slightly, by lowering the divisor, 1766 * to avoid delayed sampling of start bit at high speeds, 1767 * else we see data corruption. 1768 */ 1769 if (uap->vendor->oversampling) { 1770 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) 1771 quot -= 1; 1772 else if ((baud > 3250000) && (quot > 2)) 1773 quot -= 2; 1774 } 1775 /* Set baud rate */ 1776 writew(quot & 0x3f, port->membase + UART011_FBRD); 1777 writew(quot >> 6, port->membase + UART011_IBRD); 1778 1779 /* 1780 * ----------v----------v----------v----------v----- 1781 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER 1782 * UART011_FBRD & UART011_IBRD. 1783 * ----------^----------^----------^----------^----- 1784 */ 1785 writew(lcr_h, port->membase + uap->lcrh_rx); 1786 if (uap->lcrh_rx != uap->lcrh_tx) { 1787 int i; 1788 /* 1789 * Wait 10 PCLKs before writing LCRH_TX register, 1790 * to get this delay write read only register 10 times 1791 */ 1792 for (i = 0; i < 10; ++i) 1793 writew(0xff, uap->port.membase + UART011_MIS); 1794 writew(lcr_h, port->membase + uap->lcrh_tx); 1795 } 1796 writew(old_cr, port->membase + UART011_CR); 1797 1798 spin_unlock_irqrestore(&port->lock, flags); 1799 } 1800 1801 static const char *pl011_type(struct uart_port *port) 1802 { 1803 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1804 return uap->port.type == PORT_AMBA ? uap->type : NULL; 1805 } 1806 1807 /* 1808 * Release the memory region(s) being used by 'port' 1809 */ 1810 static void pl011_release_port(struct uart_port *port) 1811 { 1812 release_mem_region(port->mapbase, SZ_4K); 1813 } 1814 1815 /* 1816 * Request the memory region(s) being used by 'port' 1817 */ 1818 static int pl011_request_port(struct uart_port *port) 1819 { 1820 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") 1821 != NULL ? 0 : -EBUSY; 1822 } 1823 1824 /* 1825 * Configure/autoconfigure the port. 1826 */ 1827 static void pl011_config_port(struct uart_port *port, int flags) 1828 { 1829 if (flags & UART_CONFIG_TYPE) { 1830 port->type = PORT_AMBA; 1831 pl011_request_port(port); 1832 } 1833 } 1834 1835 /* 1836 * verify the new serial_struct (for TIOCSSERIAL). 1837 */ 1838 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) 1839 { 1840 int ret = 0; 1841 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 1842 ret = -EINVAL; 1843 if (ser->irq < 0 || ser->irq >= nr_irqs) 1844 ret = -EINVAL; 1845 if (ser->baud_base < 9600) 1846 ret = -EINVAL; 1847 return ret; 1848 } 1849 1850 static struct uart_ops amba_pl011_pops = { 1851 .tx_empty = pl011_tx_empty, 1852 .set_mctrl = pl011_set_mctrl, 1853 .get_mctrl = pl011_get_mctrl, 1854 .stop_tx = pl011_stop_tx, 1855 .start_tx = pl011_start_tx, 1856 .stop_rx = pl011_stop_rx, 1857 .enable_ms = pl011_enable_ms, 1858 .break_ctl = pl011_break_ctl, 1859 .startup = pl011_startup, 1860 .shutdown = pl011_shutdown, 1861 .flush_buffer = pl011_dma_flush_buffer, 1862 .set_termios = pl011_set_termios, 1863 .type = pl011_type, 1864 .release_port = pl011_release_port, 1865 .request_port = pl011_request_port, 1866 .config_port = pl011_config_port, 1867 .verify_port = pl011_verify_port, 1868 #ifdef CONFIG_CONSOLE_POLL 1869 .poll_init = pl011_hwinit, 1870 .poll_get_char = pl011_get_poll_char, 1871 .poll_put_char = pl011_put_poll_char, 1872 #endif 1873 }; 1874 1875 static struct uart_amba_port *amba_ports[UART_NR]; 1876 1877 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE 1878 1879 static void pl011_console_putchar(struct uart_port *port, int ch) 1880 { 1881 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1882 1883 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1884 barrier(); 1885 writew(ch, uap->port.membase + UART01x_DR); 1886 } 1887 1888 static void 1889 pl011_console_write(struct console *co, const char *s, unsigned int count) 1890 { 1891 struct uart_amba_port *uap = amba_ports[co->index]; 1892 unsigned int status, old_cr, new_cr; 1893 unsigned long flags; 1894 int locked = 1; 1895 1896 clk_enable(uap->clk); 1897 1898 local_irq_save(flags); 1899 if (uap->port.sysrq) 1900 locked = 0; 1901 else if (oops_in_progress) 1902 locked = spin_trylock(&uap->port.lock); 1903 else 1904 spin_lock(&uap->port.lock); 1905 1906 /* 1907 * First save the CR then disable the interrupts 1908 */ 1909 old_cr = readw(uap->port.membase + UART011_CR); 1910 new_cr = old_cr & ~UART011_CR_CTSEN; 1911 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1912 writew(new_cr, uap->port.membase + UART011_CR); 1913 1914 uart_console_write(&uap->port, s, count, pl011_console_putchar); 1915 1916 /* 1917 * Finally, wait for transmitter to become empty 1918 * and restore the TCR 1919 */ 1920 do { 1921 status = readw(uap->port.membase + UART01x_FR); 1922 } while (status & UART01x_FR_BUSY); 1923 writew(old_cr, uap->port.membase + UART011_CR); 1924 1925 if (locked) 1926 spin_unlock(&uap->port.lock); 1927 local_irq_restore(flags); 1928 1929 clk_disable(uap->clk); 1930 } 1931 1932 static void __init 1933 pl011_console_get_options(struct uart_amba_port *uap, int *baud, 1934 int *parity, int *bits) 1935 { 1936 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { 1937 unsigned int lcr_h, ibrd, fbrd; 1938 1939 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 1940 1941 *parity = 'n'; 1942 if (lcr_h & UART01x_LCRH_PEN) { 1943 if (lcr_h & UART01x_LCRH_EPS) 1944 *parity = 'e'; 1945 else 1946 *parity = 'o'; 1947 } 1948 1949 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) 1950 *bits = 7; 1951 else 1952 *bits = 8; 1953 1954 ibrd = readw(uap->port.membase + UART011_IBRD); 1955 fbrd = readw(uap->port.membase + UART011_FBRD); 1956 1957 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); 1958 1959 if (uap->vendor->oversampling) { 1960 if (readw(uap->port.membase + UART011_CR) 1961 & ST_UART011_CR_OVSFACT) 1962 *baud *= 2; 1963 } 1964 } 1965 } 1966 1967 static int __init pl011_console_setup(struct console *co, char *options) 1968 { 1969 struct uart_amba_port *uap; 1970 int baud = 38400; 1971 int bits = 8; 1972 int parity = 'n'; 1973 int flow = 'n'; 1974 int ret; 1975 1976 /* 1977 * Check whether an invalid uart number has been specified, and 1978 * if so, search for the first available port that does have 1979 * console support. 1980 */ 1981 if (co->index >= UART_NR) 1982 co->index = 0; 1983 uap = amba_ports[co->index]; 1984 if (!uap) 1985 return -ENODEV; 1986 1987 /* Allow pins to be muxed in and configured */ 1988 if (!IS_ERR(uap->pins_default)) { 1989 ret = pinctrl_select_state(uap->pinctrl, uap->pins_default); 1990 if (ret) 1991 dev_err(uap->port.dev, 1992 "could not set default pins\n"); 1993 } 1994 1995 ret = clk_prepare(uap->clk); 1996 if (ret) 1997 return ret; 1998 1999 if (uap->port.dev->platform_data) { 2000 struct amba_pl011_data *plat; 2001 2002 plat = uap->port.dev->platform_data; 2003 if (plat->init) 2004 plat->init(); 2005 } 2006 2007 uap->port.uartclk = clk_get_rate(uap->clk); 2008 2009 if (options) 2010 uart_parse_options(options, &baud, &parity, &bits, &flow); 2011 else 2012 pl011_console_get_options(uap, &baud, &parity, &bits); 2013 2014 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 2015 } 2016 2017 static struct uart_driver amba_reg; 2018 static struct console amba_console = { 2019 .name = "ttyAMA", 2020 .write = pl011_console_write, 2021 .device = uart_console_device, 2022 .setup = pl011_console_setup, 2023 .flags = CON_PRINTBUFFER, 2024 .index = -1, 2025 .data = &amba_reg, 2026 }; 2027 2028 #define AMBA_CONSOLE (&amba_console) 2029 #else 2030 #define AMBA_CONSOLE NULL 2031 #endif 2032 2033 static struct uart_driver amba_reg = { 2034 .owner = THIS_MODULE, 2035 .driver_name = "ttyAMA", 2036 .dev_name = "ttyAMA", 2037 .major = SERIAL_AMBA_MAJOR, 2038 .minor = SERIAL_AMBA_MINOR, 2039 .nr = UART_NR, 2040 .cons = AMBA_CONSOLE, 2041 }; 2042 2043 static int pl011_probe_dt_alias(int index, struct device *dev) 2044 { 2045 struct device_node *np; 2046 static bool seen_dev_with_alias = false; 2047 static bool seen_dev_without_alias = false; 2048 int ret = index; 2049 2050 if (!IS_ENABLED(CONFIG_OF)) 2051 return ret; 2052 2053 np = dev->of_node; 2054 if (!np) 2055 return ret; 2056 2057 ret = of_alias_get_id(np, "serial"); 2058 if (IS_ERR_VALUE(ret)) { 2059 seen_dev_without_alias = true; 2060 ret = index; 2061 } else { 2062 seen_dev_with_alias = true; 2063 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) { 2064 dev_warn(dev, "requested serial port %d not available.\n", ret); 2065 ret = index; 2066 } 2067 } 2068 2069 if (seen_dev_with_alias && seen_dev_without_alias) 2070 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); 2071 2072 return ret; 2073 } 2074 2075 static int pl011_probe(struct amba_device *dev, const struct amba_id *id) 2076 { 2077 struct uart_amba_port *uap; 2078 struct vendor_data *vendor = id->data; 2079 void __iomem *base; 2080 int i, ret; 2081 2082 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2083 if (amba_ports[i] == NULL) 2084 break; 2085 2086 if (i == ARRAY_SIZE(amba_ports)) { 2087 ret = -EBUSY; 2088 goto out; 2089 } 2090 2091 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), 2092 GFP_KERNEL); 2093 if (uap == NULL) { 2094 ret = -ENOMEM; 2095 goto out; 2096 } 2097 2098 i = pl011_probe_dt_alias(i, &dev->dev); 2099 2100 base = devm_ioremap(&dev->dev, dev->res.start, 2101 resource_size(&dev->res)); 2102 if (!base) { 2103 ret = -ENOMEM; 2104 goto out; 2105 } 2106 2107 uap->pinctrl = devm_pinctrl_get(&dev->dev); 2108 if (IS_ERR(uap->pinctrl)) { 2109 ret = PTR_ERR(uap->pinctrl); 2110 goto out; 2111 } 2112 uap->pins_default = pinctrl_lookup_state(uap->pinctrl, 2113 PINCTRL_STATE_DEFAULT); 2114 if (IS_ERR(uap->pins_default)) 2115 dev_err(&dev->dev, "could not get default pinstate\n"); 2116 2117 uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl, 2118 PINCTRL_STATE_SLEEP); 2119 if (IS_ERR(uap->pins_sleep)) 2120 dev_dbg(&dev->dev, "could not get sleep pinstate\n"); 2121 2122 uap->clk = devm_clk_get(&dev->dev, NULL); 2123 if (IS_ERR(uap->clk)) { 2124 ret = PTR_ERR(uap->clk); 2125 goto out; 2126 } 2127 2128 uap->vendor = vendor; 2129 uap->lcrh_rx = vendor->lcrh_rx; 2130 uap->lcrh_tx = vendor->lcrh_tx; 2131 uap->old_cr = 0; 2132 uap->fifosize = vendor->fifosize; 2133 uap->port.dev = &dev->dev; 2134 uap->port.mapbase = dev->res.start; 2135 uap->port.membase = base; 2136 uap->port.iotype = UPIO_MEM; 2137 uap->port.irq = dev->irq[0]; 2138 uap->port.fifosize = uap->fifosize; 2139 uap->port.ops = &amba_pl011_pops; 2140 uap->port.flags = UPF_BOOT_AUTOCONF; 2141 uap->port.line = i; 2142 pl011_dma_probe(uap); 2143 2144 /* Ensure interrupts from this UART are masked and cleared */ 2145 writew(0, uap->port.membase + UART011_IMSC); 2146 writew(0xffff, uap->port.membase + UART011_ICR); 2147 2148 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 2149 2150 amba_ports[i] = uap; 2151 2152 amba_set_drvdata(dev, uap); 2153 ret = uart_add_one_port(&amba_reg, &uap->port); 2154 if (ret) { 2155 amba_set_drvdata(dev, NULL); 2156 amba_ports[i] = NULL; 2157 pl011_dma_remove(uap); 2158 } 2159 out: 2160 return ret; 2161 } 2162 2163 static int pl011_remove(struct amba_device *dev) 2164 { 2165 struct uart_amba_port *uap = amba_get_drvdata(dev); 2166 int i; 2167 2168 amba_set_drvdata(dev, NULL); 2169 2170 uart_remove_one_port(&amba_reg, &uap->port); 2171 2172 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2173 if (amba_ports[i] == uap) 2174 amba_ports[i] = NULL; 2175 2176 pl011_dma_remove(uap); 2177 return 0; 2178 } 2179 2180 #ifdef CONFIG_PM 2181 static int pl011_suspend(struct amba_device *dev, pm_message_t state) 2182 { 2183 struct uart_amba_port *uap = amba_get_drvdata(dev); 2184 2185 if (!uap) 2186 return -EINVAL; 2187 2188 return uart_suspend_port(&amba_reg, &uap->port); 2189 } 2190 2191 static int pl011_resume(struct amba_device *dev) 2192 { 2193 struct uart_amba_port *uap = amba_get_drvdata(dev); 2194 2195 if (!uap) 2196 return -EINVAL; 2197 2198 return uart_resume_port(&amba_reg, &uap->port); 2199 } 2200 #endif 2201 2202 static struct amba_id pl011_ids[] = { 2203 { 2204 .id = 0x00041011, 2205 .mask = 0x000fffff, 2206 .data = &vendor_arm, 2207 }, 2208 { 2209 .id = 0x00380802, 2210 .mask = 0x00ffffff, 2211 .data = &vendor_st, 2212 }, 2213 { 0, 0 }, 2214 }; 2215 2216 MODULE_DEVICE_TABLE(amba, pl011_ids); 2217 2218 static struct amba_driver pl011_driver = { 2219 .drv = { 2220 .name = "uart-pl011", 2221 }, 2222 .id_table = pl011_ids, 2223 .probe = pl011_probe, 2224 .remove = pl011_remove, 2225 #ifdef CONFIG_PM 2226 .suspend = pl011_suspend, 2227 .resume = pl011_resume, 2228 #endif 2229 }; 2230 2231 static int __init pl011_init(void) 2232 { 2233 int ret; 2234 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n"); 2235 2236 ret = uart_register_driver(&amba_reg); 2237 if (ret == 0) { 2238 ret = amba_driver_register(&pl011_driver); 2239 if (ret) 2240 uart_unregister_driver(&amba_reg); 2241 } 2242 return ret; 2243 } 2244 2245 static void __exit pl011_exit(void) 2246 { 2247 amba_driver_unregister(&pl011_driver); 2248 uart_unregister_driver(&amba_reg); 2249 } 2250 2251 /* 2252 * While this can be a module, if builtin it's most likely the console 2253 * So let's leave module_exit but move module_init to an earlier place 2254 */ 2255 arch_initcall(pl011_init); 2256 module_exit(pl011_exit); 2257 2258 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 2259 MODULE_DESCRIPTION("ARM AMBA serial port driver"); 2260 MODULE_LICENSE("GPL"); 2261