1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for AMBA serial ports 4 * 5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 6 * 7 * Copyright 1999 ARM Limited 8 * Copyright (C) 2000 Deep Blue Solutions Ltd. 9 * Copyright (C) 2010 ST-Ericsson SA 10 * 11 * This is a generic driver for ARM AMBA-type serial ports. They 12 * have a lot of 16550-like features, but are not register compatible. 13 * Note that although they do have CTS, DCD and DSR inputs, they do 14 * not have an RI input, nor do they have DTR or RTS outputs. If 15 * required, these have to be supplied via some other means (eg, GPIO) 16 * and hooked into this driver. 17 */ 18 19 #include <linux/module.h> 20 #include <linux/ioport.h> 21 #include <linux/init.h> 22 #include <linux/console.h> 23 #include <linux/platform_device.h> 24 #include <linux/sysrq.h> 25 #include <linux/device.h> 26 #include <linux/tty.h> 27 #include <linux/tty_flip.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/amba/bus.h> 31 #include <linux/amba/serial.h> 32 #include <linux/clk.h> 33 #include <linux/slab.h> 34 #include <linux/dmaengine.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/scatterlist.h> 37 #include <linux/delay.h> 38 #include <linux/types.h> 39 #include <linux/of.h> 40 #include <linux/pinctrl/consumer.h> 41 #include <linux/sizes.h> 42 #include <linux/io.h> 43 #include <linux/acpi.h> 44 45 #define UART_NR 14 46 47 #define SERIAL_AMBA_MAJOR 204 48 #define SERIAL_AMBA_MINOR 64 49 #define SERIAL_AMBA_NR UART_NR 50 51 #define AMBA_ISR_PASS_LIMIT 256 52 53 #define UART_DR_ERROR (UART011_DR_OE | UART011_DR_BE | UART011_DR_PE | UART011_DR_FE) 54 #define UART_DUMMY_DR_RX BIT(16) 55 56 enum { 57 REG_DR, 58 REG_ST_DMAWM, 59 REG_ST_TIMEOUT, 60 REG_FR, 61 REG_LCRH_RX, 62 REG_LCRH_TX, 63 REG_IBRD, 64 REG_FBRD, 65 REG_CR, 66 REG_IFLS, 67 REG_IMSC, 68 REG_RIS, 69 REG_MIS, 70 REG_ICR, 71 REG_DMACR, 72 REG_ST_XFCR, 73 REG_ST_XON1, 74 REG_ST_XON2, 75 REG_ST_XOFF1, 76 REG_ST_XOFF2, 77 REG_ST_ITCR, 78 REG_ST_ITIP, 79 REG_ST_ABCR, 80 REG_ST_ABIMSC, 81 82 /* The size of the array - must be last */ 83 REG_ARRAY_SIZE, 84 }; 85 86 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = { 87 [REG_DR] = UART01x_DR, 88 [REG_FR] = UART01x_FR, 89 [REG_LCRH_RX] = UART011_LCRH, 90 [REG_LCRH_TX] = UART011_LCRH, 91 [REG_IBRD] = UART011_IBRD, 92 [REG_FBRD] = UART011_FBRD, 93 [REG_CR] = UART011_CR, 94 [REG_IFLS] = UART011_IFLS, 95 [REG_IMSC] = UART011_IMSC, 96 [REG_RIS] = UART011_RIS, 97 [REG_MIS] = UART011_MIS, 98 [REG_ICR] = UART011_ICR, 99 [REG_DMACR] = UART011_DMACR, 100 }; 101 102 /* There is by now at least one vendor with differing details, so handle it */ 103 struct vendor_data { 104 const u16 *reg_offset; 105 unsigned int ifls; 106 unsigned int fr_busy; 107 unsigned int fr_dsr; 108 unsigned int fr_cts; 109 unsigned int fr_ri; 110 unsigned int inv_fr; 111 bool access_32b; 112 bool oversampling; 113 bool dma_threshold; 114 bool cts_event_workaround; 115 bool always_enabled; 116 bool fixed_options; 117 118 unsigned int (*get_fifosize)(struct amba_device *dev); 119 }; 120 121 static unsigned int get_fifosize_arm(struct amba_device *dev) 122 { 123 return amba_rev(dev) < 3 ? 16 : 32; 124 } 125 126 static struct vendor_data vendor_arm = { 127 .reg_offset = pl011_std_offsets, 128 .ifls = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8, 129 .fr_busy = UART01x_FR_BUSY, 130 .fr_dsr = UART01x_FR_DSR, 131 .fr_cts = UART01x_FR_CTS, 132 .fr_ri = UART011_FR_RI, 133 .oversampling = false, 134 .dma_threshold = false, 135 .cts_event_workaround = false, 136 .always_enabled = false, 137 .fixed_options = false, 138 .get_fifosize = get_fifosize_arm, 139 }; 140 141 static const struct vendor_data vendor_sbsa = { 142 .reg_offset = pl011_std_offsets, 143 .fr_busy = UART01x_FR_BUSY, 144 .fr_dsr = UART01x_FR_DSR, 145 .fr_cts = UART01x_FR_CTS, 146 .fr_ri = UART011_FR_RI, 147 .access_32b = true, 148 .oversampling = false, 149 .dma_threshold = false, 150 .cts_event_workaround = false, 151 .always_enabled = true, 152 .fixed_options = true, 153 }; 154 155 #ifdef CONFIG_ACPI_SPCR_TABLE 156 static const struct vendor_data vendor_qdt_qdf2400_e44 = { 157 .reg_offset = pl011_std_offsets, 158 .fr_busy = UART011_FR_TXFE, 159 .fr_dsr = UART01x_FR_DSR, 160 .fr_cts = UART01x_FR_CTS, 161 .fr_ri = UART011_FR_RI, 162 .inv_fr = UART011_FR_TXFE, 163 .access_32b = true, 164 .oversampling = false, 165 .dma_threshold = false, 166 .cts_event_workaround = false, 167 .always_enabled = true, 168 .fixed_options = true, 169 }; 170 #endif 171 172 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { 173 [REG_DR] = UART01x_DR, 174 [REG_ST_DMAWM] = ST_UART011_DMAWM, 175 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT, 176 [REG_FR] = UART01x_FR, 177 [REG_LCRH_RX] = ST_UART011_LCRH_RX, 178 [REG_LCRH_TX] = ST_UART011_LCRH_TX, 179 [REG_IBRD] = UART011_IBRD, 180 [REG_FBRD] = UART011_FBRD, 181 [REG_CR] = UART011_CR, 182 [REG_IFLS] = UART011_IFLS, 183 [REG_IMSC] = UART011_IMSC, 184 [REG_RIS] = UART011_RIS, 185 [REG_MIS] = UART011_MIS, 186 [REG_ICR] = UART011_ICR, 187 [REG_DMACR] = UART011_DMACR, 188 [REG_ST_XFCR] = ST_UART011_XFCR, 189 [REG_ST_XON1] = ST_UART011_XON1, 190 [REG_ST_XON2] = ST_UART011_XON2, 191 [REG_ST_XOFF1] = ST_UART011_XOFF1, 192 [REG_ST_XOFF2] = ST_UART011_XOFF2, 193 [REG_ST_ITCR] = ST_UART011_ITCR, 194 [REG_ST_ITIP] = ST_UART011_ITIP, 195 [REG_ST_ABCR] = ST_UART011_ABCR, 196 [REG_ST_ABIMSC] = ST_UART011_ABIMSC, 197 }; 198 199 static unsigned int get_fifosize_st(struct amba_device *dev) 200 { 201 return 64; 202 } 203 204 static struct vendor_data vendor_st = { 205 .reg_offset = pl011_st_offsets, 206 .ifls = UART011_IFLS_RX_HALF | UART011_IFLS_TX_HALF, 207 .fr_busy = UART01x_FR_BUSY, 208 .fr_dsr = UART01x_FR_DSR, 209 .fr_cts = UART01x_FR_CTS, 210 .fr_ri = UART011_FR_RI, 211 .oversampling = true, 212 .dma_threshold = true, 213 .cts_event_workaround = true, 214 .always_enabled = false, 215 .fixed_options = false, 216 .get_fifosize = get_fifosize_st, 217 }; 218 219 /* Deals with DMA transactions */ 220 221 struct pl011_dmabuf { 222 dma_addr_t dma; 223 size_t len; 224 char *buf; 225 }; 226 227 struct pl011_dmarx_data { 228 struct dma_chan *chan; 229 struct completion complete; 230 bool use_buf_b; 231 struct pl011_dmabuf dbuf_a; 232 struct pl011_dmabuf dbuf_b; 233 dma_cookie_t cookie; 234 bool running; 235 struct timer_list timer; 236 unsigned int last_residue; 237 unsigned long last_jiffies; 238 bool auto_poll_rate; 239 unsigned int poll_rate; 240 unsigned int poll_timeout; 241 }; 242 243 struct pl011_dmatx_data { 244 struct dma_chan *chan; 245 dma_addr_t dma; 246 size_t len; 247 char *buf; 248 bool queued; 249 }; 250 251 enum pl011_rs485_tx_state { 252 OFF, 253 WAIT_AFTER_RTS, 254 SEND, 255 WAIT_AFTER_SEND, 256 }; 257 258 /* 259 * We wrap our port structure around the generic uart_port. 260 */ 261 struct uart_amba_port { 262 struct uart_port port; 263 const u16 *reg_offset; 264 struct clk *clk; 265 const struct vendor_data *vendor; 266 unsigned int im; /* interrupt mask */ 267 unsigned int old_status; 268 unsigned int fifosize; /* vendor-specific */ 269 unsigned int fixed_baud; /* vendor-set fixed baud rate */ 270 char type[12]; 271 ktime_t rs485_tx_drain_interval; /* nano */ 272 enum pl011_rs485_tx_state rs485_tx_state; 273 struct hrtimer trigger_start_tx; 274 struct hrtimer trigger_stop_tx; 275 bool console_line_ended; 276 #ifdef CONFIG_DMA_ENGINE 277 /* DMA stuff */ 278 unsigned int dmacr; /* dma control reg */ 279 bool using_tx_dma; 280 bool using_rx_dma; 281 struct pl011_dmarx_data dmarx; 282 struct pl011_dmatx_data dmatx; 283 bool dma_probed; 284 #endif 285 }; 286 287 static unsigned int pl011_tx_empty(struct uart_port *port); 288 289 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap, 290 unsigned int reg) 291 { 292 return uap->reg_offset[reg]; 293 } 294 295 static unsigned int pl011_read(const struct uart_amba_port *uap, 296 unsigned int reg) 297 { 298 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); 299 300 return (uap->port.iotype == UPIO_MEM32) ? 301 readl_relaxed(addr) : readw_relaxed(addr); 302 } 303 304 static void pl011_write(unsigned int val, const struct uart_amba_port *uap, 305 unsigned int reg) 306 { 307 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); 308 309 if (uap->port.iotype == UPIO_MEM32) 310 writel_relaxed(val, addr); 311 else 312 writew_relaxed(val, addr); 313 } 314 315 /* 316 * Reads up to 256 characters from the FIFO or until it's empty and 317 * inserts them into the TTY layer. Returns the number of characters 318 * read from the FIFO. 319 */ 320 static int pl011_fifo_to_tty(struct uart_amba_port *uap) 321 { 322 unsigned int ch, fifotaken; 323 int sysrq; 324 u16 status; 325 u8 flag; 326 327 for (fifotaken = 0; fifotaken != 256; fifotaken++) { 328 status = pl011_read(uap, REG_FR); 329 if (status & UART01x_FR_RXFE) 330 break; 331 332 /* Take chars from the FIFO and update status */ 333 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX; 334 flag = TTY_NORMAL; 335 uap->port.icount.rx++; 336 337 if (unlikely(ch & UART_DR_ERROR)) { 338 if (ch & UART011_DR_BE) { 339 ch &= ~(UART011_DR_FE | UART011_DR_PE); 340 uap->port.icount.brk++; 341 if (uart_handle_break(&uap->port)) 342 continue; 343 } else if (ch & UART011_DR_PE) { 344 uap->port.icount.parity++; 345 } else if (ch & UART011_DR_FE) { 346 uap->port.icount.frame++; 347 } 348 if (ch & UART011_DR_OE) 349 uap->port.icount.overrun++; 350 351 ch &= uap->port.read_status_mask; 352 353 if (ch & UART011_DR_BE) 354 flag = TTY_BREAK; 355 else if (ch & UART011_DR_PE) 356 flag = TTY_PARITY; 357 else if (ch & UART011_DR_FE) 358 flag = TTY_FRAME; 359 } 360 361 sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255); 362 if (!sysrq) 363 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); 364 } 365 366 return fifotaken; 367 } 368 369 /* 370 * All the DMA operation mode stuff goes inside this ifdef. 371 * This assumes that you have a generic DMA device interface, 372 * no custom DMA interfaces are supported. 373 */ 374 #ifdef CONFIG_DMA_ENGINE 375 376 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE 377 378 static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db, 379 enum dma_data_direction dir) 380 { 381 db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE, 382 &db->dma, GFP_KERNEL); 383 if (!db->buf) 384 return -ENOMEM; 385 db->len = PL011_DMA_BUFFER_SIZE; 386 387 return 0; 388 } 389 390 static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db, 391 enum dma_data_direction dir) 392 { 393 if (db->buf) { 394 dma_free_coherent(chan->device->dev, 395 PL011_DMA_BUFFER_SIZE, db->buf, db->dma); 396 } 397 } 398 399 static void pl011_dma_probe(struct uart_amba_port *uap) 400 { 401 /* DMA is the sole user of the platform data right now */ 402 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); 403 struct device *dev = uap->port.dev; 404 struct dma_slave_config tx_conf = { 405 .dst_addr = uap->port.mapbase + 406 pl011_reg_to_offset(uap, REG_DR), 407 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 408 .direction = DMA_MEM_TO_DEV, 409 .dst_maxburst = uap->fifosize >> 1, 410 .device_fc = false, 411 }; 412 struct dma_chan *chan; 413 dma_cap_mask_t mask; 414 415 uap->dma_probed = true; 416 chan = dma_request_chan(dev, "tx"); 417 if (IS_ERR(chan)) { 418 if (PTR_ERR(chan) == -EPROBE_DEFER) { 419 uap->dma_probed = false; 420 return; 421 } 422 423 /* We need platform data */ 424 if (!plat || !plat->dma_filter) { 425 dev_dbg(uap->port.dev, "no DMA platform data\n"); 426 return; 427 } 428 429 /* Try to acquire a generic DMA engine slave TX channel */ 430 dma_cap_zero(mask); 431 dma_cap_set(DMA_SLAVE, mask); 432 433 chan = dma_request_channel(mask, plat->dma_filter, 434 plat->dma_tx_param); 435 if (!chan) { 436 dev_err(uap->port.dev, "no TX DMA channel!\n"); 437 return; 438 } 439 } 440 441 dmaengine_slave_config(chan, &tx_conf); 442 uap->dmatx.chan = chan; 443 444 dev_info(uap->port.dev, "DMA channel TX %s\n", 445 dma_chan_name(uap->dmatx.chan)); 446 447 /* Optionally make use of an RX channel as well */ 448 chan = dma_request_chan(dev, "rx"); 449 450 if (IS_ERR(chan) && plat && plat->dma_rx_param) { 451 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 452 453 if (!chan) { 454 dev_err(uap->port.dev, "no RX DMA channel!\n"); 455 return; 456 } 457 } 458 459 if (!IS_ERR(chan)) { 460 struct dma_slave_config rx_conf = { 461 .src_addr = uap->port.mapbase + 462 pl011_reg_to_offset(uap, REG_DR), 463 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 464 .direction = DMA_DEV_TO_MEM, 465 .src_maxburst = uap->fifosize >> 2, 466 .device_fc = false, 467 }; 468 struct dma_slave_caps caps; 469 470 /* 471 * Some DMA controllers provide information on their capabilities. 472 * If the controller does, check for suitable residue processing 473 * otherwise assime all is well. 474 */ 475 if (dma_get_slave_caps(chan, &caps) == 0) { 476 if (caps.residue_granularity == 477 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { 478 dma_release_channel(chan); 479 dev_info(uap->port.dev, 480 "RX DMA disabled - no residue processing\n"); 481 return; 482 } 483 } 484 dmaengine_slave_config(chan, &rx_conf); 485 uap->dmarx.chan = chan; 486 487 uap->dmarx.auto_poll_rate = false; 488 if (plat && plat->dma_rx_poll_enable) { 489 /* Set poll rate if specified. */ 490 if (plat->dma_rx_poll_rate) { 491 uap->dmarx.auto_poll_rate = false; 492 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; 493 } else { 494 /* 495 * 100 ms defaults to poll rate if not 496 * specified. This will be adjusted with 497 * the baud rate at set_termios. 498 */ 499 uap->dmarx.auto_poll_rate = true; 500 uap->dmarx.poll_rate = 100; 501 } 502 /* 3 secs defaults poll_timeout if not specified. */ 503 if (plat->dma_rx_poll_timeout) 504 uap->dmarx.poll_timeout = 505 plat->dma_rx_poll_timeout; 506 else 507 uap->dmarx.poll_timeout = 3000; 508 } else if (!plat && dev->of_node) { 509 uap->dmarx.auto_poll_rate = 510 of_property_read_bool(dev->of_node, "auto-poll"); 511 if (uap->dmarx.auto_poll_rate) { 512 u32 x; 513 514 if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0) 515 uap->dmarx.poll_rate = x; 516 else 517 uap->dmarx.poll_rate = 100; 518 if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0) 519 uap->dmarx.poll_timeout = x; 520 else 521 uap->dmarx.poll_timeout = 3000; 522 } 523 } 524 dev_info(uap->port.dev, "DMA channel RX %s\n", 525 dma_chan_name(uap->dmarx.chan)); 526 } 527 } 528 529 static void pl011_dma_remove(struct uart_amba_port *uap) 530 { 531 if (uap->dmatx.chan) 532 dma_release_channel(uap->dmatx.chan); 533 if (uap->dmarx.chan) 534 dma_release_channel(uap->dmarx.chan); 535 } 536 537 /* Forward declare these for the refill routine */ 538 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 539 static void pl011_start_tx_pio(struct uart_amba_port *uap); 540 541 /* 542 * The current DMA TX buffer has been sent. 543 * Try to queue up another DMA buffer. 544 */ 545 static void pl011_dma_tx_callback(void *data) 546 { 547 struct uart_amba_port *uap = data; 548 struct tty_port *tport = &uap->port.state->port; 549 struct pl011_dmatx_data *dmatx = &uap->dmatx; 550 unsigned long flags; 551 u16 dmacr; 552 553 uart_port_lock_irqsave(&uap->port, &flags); 554 if (uap->dmatx.queued) 555 dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, 556 dmatx->len, DMA_TO_DEVICE); 557 558 dmacr = uap->dmacr; 559 uap->dmacr = dmacr & ~UART011_TXDMAE; 560 pl011_write(uap->dmacr, uap, REG_DMACR); 561 562 /* 563 * If TX DMA was disabled, it means that we've stopped the DMA for 564 * some reason (eg, XOFF received, or we want to send an X-char.) 565 * 566 * Note: we need to be careful here of a potential race between DMA 567 * and the rest of the driver - if the driver disables TX DMA while 568 * a TX buffer completing, we must update the tx queued status to 569 * get further refills (hence we check dmacr). 570 */ 571 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || 572 kfifo_is_empty(&tport->xmit_fifo)) { 573 uap->dmatx.queued = false; 574 uart_port_unlock_irqrestore(&uap->port, flags); 575 return; 576 } 577 578 if (pl011_dma_tx_refill(uap) <= 0) 579 /* 580 * We didn't queue a DMA buffer for some reason, but we 581 * have data pending to be sent. Re-enable the TX IRQ. 582 */ 583 pl011_start_tx_pio(uap); 584 585 uart_port_unlock_irqrestore(&uap->port, flags); 586 } 587 588 /* 589 * Try to refill the TX DMA buffer. 590 * Locking: called with port lock held and IRQs disabled. 591 * Returns: 592 * 1 if we queued up a TX DMA buffer. 593 * 0 if we didn't want to handle this by DMA 594 * <0 on error 595 */ 596 static int pl011_dma_tx_refill(struct uart_amba_port *uap) 597 { 598 struct pl011_dmatx_data *dmatx = &uap->dmatx; 599 struct dma_chan *chan = dmatx->chan; 600 struct dma_device *dma_dev = chan->device; 601 struct dma_async_tx_descriptor *desc; 602 struct tty_port *tport = &uap->port.state->port; 603 unsigned int count; 604 605 /* 606 * Try to avoid the overhead involved in using DMA if the 607 * transaction fits in the first half of the FIFO, by using 608 * the standard interrupt handling. This ensures that we 609 * issue a uart_write_wakeup() at the appropriate time. 610 */ 611 count = kfifo_len(&tport->xmit_fifo); 612 if (count < (uap->fifosize >> 1)) { 613 uap->dmatx.queued = false; 614 return 0; 615 } 616 617 /* 618 * Bodge: don't send the last character by DMA, as this 619 * will prevent XON from notifying us to restart DMA. 620 */ 621 count -= 1; 622 623 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ 624 if (count > PL011_DMA_BUFFER_SIZE) 625 count = PL011_DMA_BUFFER_SIZE; 626 627 count = kfifo_out_peek(&tport->xmit_fifo, dmatx->buf, count); 628 dmatx->len = count; 629 dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, 630 DMA_TO_DEVICE); 631 if (dmatx->dma == DMA_MAPPING_ERROR) { 632 uap->dmatx.queued = false; 633 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 634 return -EBUSY; 635 } 636 637 desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV, 638 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 639 if (!desc) { 640 dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); 641 uap->dmatx.queued = false; 642 /* 643 * If DMA cannot be used right now, we complete this 644 * transaction via IRQ and let the TTY layer retry. 645 */ 646 dev_dbg(uap->port.dev, "TX DMA busy\n"); 647 return -EBUSY; 648 } 649 650 /* Some data to go along to the callback */ 651 desc->callback = pl011_dma_tx_callback; 652 desc->callback_param = uap; 653 654 /* All errors should happen at prepare time */ 655 dmaengine_submit(desc); 656 657 /* Fire the DMA transaction */ 658 dma_dev->device_issue_pending(chan); 659 660 uap->dmacr |= UART011_TXDMAE; 661 pl011_write(uap->dmacr, uap, REG_DMACR); 662 uap->dmatx.queued = true; 663 664 /* 665 * Now we know that DMA will fire, so advance the ring buffer 666 * with the stuff we just dispatched. 667 */ 668 uart_xmit_advance(&uap->port, count); 669 670 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) 671 uart_write_wakeup(&uap->port); 672 673 return 1; 674 } 675 676 /* 677 * We received a transmit interrupt without a pending X-char but with 678 * pending characters. 679 * Locking: called with port lock held and IRQs disabled. 680 * Returns: 681 * false if we want to use PIO to transmit 682 * true if we queued a DMA buffer 683 */ 684 static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 685 { 686 if (!uap->using_tx_dma) 687 return false; 688 689 /* 690 * If we already have a TX buffer queued, but received a 691 * TX interrupt, it will be because we've just sent an X-char. 692 * Ensure the TX DMA is enabled and the TX IRQ is disabled. 693 */ 694 if (uap->dmatx.queued) { 695 uap->dmacr |= UART011_TXDMAE; 696 pl011_write(uap->dmacr, uap, REG_DMACR); 697 uap->im &= ~UART011_TXIM; 698 pl011_write(uap->im, uap, REG_IMSC); 699 return true; 700 } 701 702 /* 703 * We don't have a TX buffer queued, so try to queue one. 704 * If we successfully queued a buffer, mask the TX IRQ. 705 */ 706 if (pl011_dma_tx_refill(uap) > 0) { 707 uap->im &= ~UART011_TXIM; 708 pl011_write(uap->im, uap, REG_IMSC); 709 return true; 710 } 711 return false; 712 } 713 714 /* 715 * Stop the DMA transmit (eg, due to received XOFF). 716 * Locking: called with port lock held and IRQs disabled. 717 */ 718 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 719 { 720 if (uap->dmatx.queued) { 721 uap->dmacr &= ~UART011_TXDMAE; 722 pl011_write(uap->dmacr, uap, REG_DMACR); 723 } 724 } 725 726 /* 727 * Try to start a DMA transmit, or in the case of an XON/OFF 728 * character queued for send, try to get that character out ASAP. 729 * Locking: called with port lock held and IRQs disabled. 730 * Returns: 731 * false if we want the TX IRQ to be enabled 732 * true if we have a buffer queued 733 */ 734 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 735 { 736 u16 dmacr; 737 738 if (!uap->using_tx_dma) 739 return false; 740 741 if (!uap->port.x_char) { 742 /* no X-char, try to push chars out in DMA mode */ 743 bool ret = true; 744 745 if (!uap->dmatx.queued) { 746 if (pl011_dma_tx_refill(uap) > 0) { 747 uap->im &= ~UART011_TXIM; 748 pl011_write(uap->im, uap, REG_IMSC); 749 } else { 750 ret = false; 751 } 752 } else if (!(uap->dmacr & UART011_TXDMAE)) { 753 uap->dmacr |= UART011_TXDMAE; 754 pl011_write(uap->dmacr, uap, REG_DMACR); 755 } 756 return ret; 757 } 758 759 /* 760 * We have an X-char to send. Disable DMA to prevent it loading 761 * the TX fifo, and then see if we can stuff it into the FIFO. 762 */ 763 dmacr = uap->dmacr; 764 uap->dmacr &= ~UART011_TXDMAE; 765 pl011_write(uap->dmacr, uap, REG_DMACR); 766 767 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) { 768 /* 769 * No space in the FIFO, so enable the transmit interrupt 770 * so we know when there is space. Note that once we've 771 * loaded the character, we should just re-enable DMA. 772 */ 773 return false; 774 } 775 776 pl011_write(uap->port.x_char, uap, REG_DR); 777 uap->port.icount.tx++; 778 uap->port.x_char = 0; 779 780 /* Success - restore the DMA state */ 781 uap->dmacr = dmacr; 782 pl011_write(dmacr, uap, REG_DMACR); 783 784 return true; 785 } 786 787 /* 788 * Flush the transmit buffer. 789 * Locking: called with port lock held and IRQs disabled. 790 */ 791 static void pl011_dma_flush_buffer(struct uart_port *port) 792 __releases(&uap->port.lock) 793 __acquires(&uap->port.lock) 794 { 795 struct uart_amba_port *uap = 796 container_of(port, struct uart_amba_port, port); 797 798 if (!uap->using_tx_dma) 799 return; 800 801 dmaengine_terminate_async(uap->dmatx.chan); 802 803 if (uap->dmatx.queued) { 804 dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma, 805 uap->dmatx.len, DMA_TO_DEVICE); 806 uap->dmatx.queued = false; 807 uap->dmacr &= ~UART011_TXDMAE; 808 pl011_write(uap->dmacr, uap, REG_DMACR); 809 } 810 } 811 812 static void pl011_dma_rx_callback(void *data); 813 814 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 815 { 816 struct dma_chan *rxchan = uap->dmarx.chan; 817 struct pl011_dmarx_data *dmarx = &uap->dmarx; 818 struct dma_async_tx_descriptor *desc; 819 struct pl011_dmabuf *dbuf; 820 821 if (!rxchan) 822 return -EIO; 823 824 /* Start the RX DMA job */ 825 dbuf = uap->dmarx.use_buf_b ? 826 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; 827 desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len, 828 DMA_DEV_TO_MEM, 829 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 830 /* 831 * If the DMA engine is busy and cannot prepare a 832 * channel, no big deal, the driver will fall back 833 * to interrupt mode as a result of this error code. 834 */ 835 if (!desc) { 836 uap->dmarx.running = false; 837 dmaengine_terminate_all(rxchan); 838 return -EBUSY; 839 } 840 841 /* Some data to go along to the callback */ 842 desc->callback = pl011_dma_rx_callback; 843 desc->callback_param = uap; 844 dmarx->cookie = dmaengine_submit(desc); 845 dma_async_issue_pending(rxchan); 846 847 uap->dmacr |= UART011_RXDMAE; 848 pl011_write(uap->dmacr, uap, REG_DMACR); 849 uap->dmarx.running = true; 850 851 uap->im &= ~UART011_RXIM; 852 pl011_write(uap->im, uap, REG_IMSC); 853 854 return 0; 855 } 856 857 /* 858 * This is called when either the DMA job is complete, or 859 * the FIFO timeout interrupt occurred. This must be called 860 * with the port spinlock uap->port.lock held. 861 */ 862 static void pl011_dma_rx_chars(struct uart_amba_port *uap, 863 u32 pending, bool use_buf_b, 864 bool readfifo) 865 { 866 struct tty_port *port = &uap->port.state->port; 867 struct pl011_dmabuf *dbuf = use_buf_b ? 868 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; 869 int dma_count = 0; 870 u32 fifotaken = 0; /* only used for vdbg() */ 871 872 struct pl011_dmarx_data *dmarx = &uap->dmarx; 873 int dmataken = 0; 874 875 if (uap->dmarx.poll_rate) { 876 /* The data can be taken by polling */ 877 dmataken = dbuf->len - dmarx->last_residue; 878 /* Recalculate the pending size */ 879 if (pending >= dmataken) 880 pending -= dmataken; 881 } 882 883 /* Pick the remain data from the DMA */ 884 if (pending) { 885 /* 886 * First take all chars in the DMA pipe, then look in the FIFO. 887 * Note that tty_insert_flip_buf() tries to take as many chars 888 * as it can. 889 */ 890 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending); 891 892 uap->port.icount.rx += dma_count; 893 if (dma_count < pending) 894 dev_warn(uap->port.dev, 895 "couldn't insert all characters (TTY is full?)\n"); 896 } 897 898 /* Reset the last_residue for Rx DMA poll */ 899 if (uap->dmarx.poll_rate) 900 dmarx->last_residue = dbuf->len; 901 902 /* 903 * Only continue with trying to read the FIFO if all DMA chars have 904 * been taken first. 905 */ 906 if (dma_count == pending && readfifo) { 907 /* Clear any error flags */ 908 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | 909 UART011_FEIS, uap, REG_ICR); 910 911 /* 912 * If we read all the DMA'd characters, and we had an 913 * incomplete buffer, that could be due to an rx error, or 914 * maybe we just timed out. Read any pending chars and check 915 * the error status. 916 * 917 * Error conditions will only occur in the FIFO, these will 918 * trigger an immediate interrupt and stop the DMA job, so we 919 * will always find the error in the FIFO, never in the DMA 920 * buffer. 921 */ 922 fifotaken = pl011_fifo_to_tty(uap); 923 } 924 925 dev_vdbg(uap->port.dev, 926 "Took %d chars from DMA buffer and %d chars from the FIFO\n", 927 dma_count, fifotaken); 928 tty_flip_buffer_push(port); 929 } 930 931 static void pl011_dma_rx_irq(struct uart_amba_port *uap) 932 { 933 struct pl011_dmarx_data *dmarx = &uap->dmarx; 934 struct dma_chan *rxchan = dmarx->chan; 935 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? 936 &dmarx->dbuf_b : &dmarx->dbuf_a; 937 size_t pending; 938 struct dma_tx_state state; 939 enum dma_status dmastat; 940 941 /* 942 * Pause the transfer so we can trust the current counter, 943 * do this before we pause the PL011 block, else we may 944 * overflow the FIFO. 945 */ 946 if (dmaengine_pause(rxchan)) 947 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 948 dmastat = rxchan->device->device_tx_status(rxchan, 949 dmarx->cookie, &state); 950 if (dmastat != DMA_PAUSED) 951 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 952 953 /* Disable RX DMA - incoming data will wait in the FIFO */ 954 uap->dmacr &= ~UART011_RXDMAE; 955 pl011_write(uap->dmacr, uap, REG_DMACR); 956 uap->dmarx.running = false; 957 958 pending = dbuf->len - state.residue; 959 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 960 /* Then we terminate the transfer - we now know our residue */ 961 dmaengine_terminate_all(rxchan); 962 963 /* 964 * This will take the chars we have so far and insert 965 * into the framework. 966 */ 967 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); 968 969 /* Switch buffer & re-trigger DMA job */ 970 dmarx->use_buf_b = !dmarx->use_buf_b; 971 if (pl011_dma_rx_trigger_dma(uap)) { 972 dev_dbg(uap->port.dev, 973 "could not retrigger RX DMA job fall back to interrupt mode\n"); 974 uap->im |= UART011_RXIM; 975 pl011_write(uap->im, uap, REG_IMSC); 976 } 977 } 978 979 static void pl011_dma_rx_callback(void *data) 980 { 981 struct uart_amba_port *uap = data; 982 struct pl011_dmarx_data *dmarx = &uap->dmarx; 983 struct dma_chan *rxchan = dmarx->chan; 984 bool lastbuf = dmarx->use_buf_b; 985 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? 986 &dmarx->dbuf_b : &dmarx->dbuf_a; 987 size_t pending; 988 struct dma_tx_state state; 989 int ret; 990 991 /* 992 * This completion interrupt occurs typically when the 993 * RX buffer is totally stuffed but no timeout has yet 994 * occurred. When that happens, we just want the RX 995 * routine to flush out the secondary DMA buffer while 996 * we immediately trigger the next DMA job. 997 */ 998 uart_port_lock_irq(&uap->port); 999 /* 1000 * Rx data can be taken by the UART interrupts during 1001 * the DMA irq handler. So we check the residue here. 1002 */ 1003 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 1004 pending = dbuf->len - state.residue; 1005 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 1006 /* Then we terminate the transfer - we now know our residue */ 1007 dmaengine_terminate_all(rxchan); 1008 1009 uap->dmarx.running = false; 1010 dmarx->use_buf_b = !lastbuf; 1011 ret = pl011_dma_rx_trigger_dma(uap); 1012 1013 pl011_dma_rx_chars(uap, pending, lastbuf, false); 1014 uart_unlock_and_check_sysrq(&uap->port); 1015 /* 1016 * Do this check after we picked the DMA chars so we don't 1017 * get some IRQ immediately from RX. 1018 */ 1019 if (ret) { 1020 dev_dbg(uap->port.dev, 1021 "could not retrigger RX DMA job fall back to interrupt mode\n"); 1022 uap->im |= UART011_RXIM; 1023 pl011_write(uap->im, uap, REG_IMSC); 1024 } 1025 } 1026 1027 /* 1028 * Stop accepting received characters, when we're shutting down or 1029 * suspending this port. 1030 * Locking: called with port lock held and IRQs disabled. 1031 */ 1032 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1033 { 1034 if (!uap->using_rx_dma) 1035 return; 1036 1037 /* FIXME. Just disable the DMA enable */ 1038 uap->dmacr &= ~UART011_RXDMAE; 1039 pl011_write(uap->dmacr, uap, REG_DMACR); 1040 } 1041 1042 /* 1043 * Timer handler for Rx DMA polling. 1044 * Every polling, It checks the residue in the dma buffer and transfer 1045 * data to the tty. Also, last_residue is updated for the next polling. 1046 */ 1047 static void pl011_dma_rx_poll(struct timer_list *t) 1048 { 1049 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer); 1050 struct tty_port *port = &uap->port.state->port; 1051 struct pl011_dmarx_data *dmarx = &uap->dmarx; 1052 struct dma_chan *rxchan = uap->dmarx.chan; 1053 unsigned long flags; 1054 unsigned int dmataken = 0; 1055 unsigned int size = 0; 1056 struct pl011_dmabuf *dbuf; 1057 int dma_count; 1058 struct dma_tx_state state; 1059 1060 dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; 1061 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 1062 if (likely(state.residue < dmarx->last_residue)) { 1063 dmataken = dbuf->len - dmarx->last_residue; 1064 size = dmarx->last_residue - state.residue; 1065 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, 1066 size); 1067 if (dma_count == size) 1068 dmarx->last_residue = state.residue; 1069 dmarx->last_jiffies = jiffies; 1070 } 1071 tty_flip_buffer_push(port); 1072 1073 /* 1074 * If no data is received in poll_timeout, the driver will fall back 1075 * to interrupt mode. We will retrigger DMA at the first interrupt. 1076 */ 1077 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) 1078 > uap->dmarx.poll_timeout) { 1079 uart_port_lock_irqsave(&uap->port, &flags); 1080 pl011_dma_rx_stop(uap); 1081 uap->im |= UART011_RXIM; 1082 pl011_write(uap->im, uap, REG_IMSC); 1083 uart_port_unlock_irqrestore(&uap->port, flags); 1084 1085 uap->dmarx.running = false; 1086 dmaengine_terminate_all(rxchan); 1087 timer_delete(&uap->dmarx.timer); 1088 } else { 1089 mod_timer(&uap->dmarx.timer, 1090 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 1091 } 1092 } 1093 1094 static void pl011_dma_startup(struct uart_amba_port *uap) 1095 { 1096 int ret; 1097 1098 if (!uap->dma_probed) 1099 pl011_dma_probe(uap); 1100 1101 if (!uap->dmatx.chan) 1102 return; 1103 1104 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); 1105 if (!uap->dmatx.buf) { 1106 uap->port.fifosize = uap->fifosize; 1107 return; 1108 } 1109 1110 uap->dmatx.len = PL011_DMA_BUFFER_SIZE; 1111 1112 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 1113 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 1114 uap->using_tx_dma = true; 1115 1116 if (!uap->dmarx.chan) 1117 goto skip_rx; 1118 1119 /* Allocate and map DMA RX buffers */ 1120 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a, 1121 DMA_FROM_DEVICE); 1122 if (ret) { 1123 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1124 "RX buffer A", ret); 1125 goto skip_rx; 1126 } 1127 1128 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b, 1129 DMA_FROM_DEVICE); 1130 if (ret) { 1131 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1132 "RX buffer B", ret); 1133 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, 1134 DMA_FROM_DEVICE); 1135 goto skip_rx; 1136 } 1137 1138 uap->using_rx_dma = true; 1139 1140 skip_rx: 1141 /* Turn on DMA error (RX/TX will be enabled on demand) */ 1142 uap->dmacr |= UART011_DMAONERR; 1143 pl011_write(uap->dmacr, uap, REG_DMACR); 1144 1145 /* 1146 * ST Micro variants has some specific dma burst threshold 1147 * compensation. Set this to 16 bytes, so burst will only 1148 * be issued above/below 16 bytes. 1149 */ 1150 if (uap->vendor->dma_threshold) 1151 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 1152 uap, REG_ST_DMAWM); 1153 1154 if (uap->using_rx_dma) { 1155 if (pl011_dma_rx_trigger_dma(uap)) 1156 dev_dbg(uap->port.dev, 1157 "could not trigger initial RX DMA job, fall back to interrupt mode\n"); 1158 if (uap->dmarx.poll_rate) { 1159 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); 1160 mod_timer(&uap->dmarx.timer, 1161 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 1162 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1163 uap->dmarx.last_jiffies = jiffies; 1164 } 1165 } 1166 } 1167 1168 static void pl011_dma_shutdown(struct uart_amba_port *uap) 1169 { 1170 if (!(uap->using_tx_dma || uap->using_rx_dma)) 1171 return; 1172 1173 /* Disable RX and TX DMA */ 1174 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) 1175 cpu_relax(); 1176 1177 uart_port_lock_irq(&uap->port); 1178 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); 1179 pl011_write(uap->dmacr, uap, REG_DMACR); 1180 uart_port_unlock_irq(&uap->port); 1181 1182 if (uap->using_tx_dma) { 1183 /* In theory, this should already be done by pl011_dma_flush_buffer */ 1184 dmaengine_terminate_all(uap->dmatx.chan); 1185 if (uap->dmatx.queued) { 1186 dma_unmap_single(uap->dmatx.chan->device->dev, 1187 uap->dmatx.dma, uap->dmatx.len, 1188 DMA_TO_DEVICE); 1189 uap->dmatx.queued = false; 1190 } 1191 1192 kfree(uap->dmatx.buf); 1193 uap->using_tx_dma = false; 1194 } 1195 1196 if (uap->using_rx_dma) { 1197 dmaengine_terminate_all(uap->dmarx.chan); 1198 /* Clean up the RX DMA */ 1199 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); 1200 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); 1201 if (uap->dmarx.poll_rate) 1202 timer_delete_sync(&uap->dmarx.timer); 1203 uap->using_rx_dma = false; 1204 } 1205 } 1206 1207 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1208 { 1209 return uap->using_rx_dma; 1210 } 1211 1212 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1213 { 1214 return uap->using_rx_dma && uap->dmarx.running; 1215 } 1216 1217 #else 1218 /* Blank functions if the DMA engine is not available */ 1219 static inline void pl011_dma_remove(struct uart_amba_port *uap) 1220 { 1221 } 1222 1223 static inline void pl011_dma_startup(struct uart_amba_port *uap) 1224 { 1225 } 1226 1227 static inline void pl011_dma_shutdown(struct uart_amba_port *uap) 1228 { 1229 } 1230 1231 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) 1232 { 1233 return false; 1234 } 1235 1236 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 1237 { 1238 } 1239 1240 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 1241 { 1242 return false; 1243 } 1244 1245 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) 1246 { 1247 } 1248 1249 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1250 { 1251 } 1252 1253 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 1254 { 1255 return -EIO; 1256 } 1257 1258 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1259 { 1260 return false; 1261 } 1262 1263 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1264 { 1265 return false; 1266 } 1267 1268 #define pl011_dma_flush_buffer NULL 1269 #endif 1270 1271 static void pl011_rs485_tx_stop(struct uart_amba_port *uap) 1272 { 1273 struct uart_port *port = &uap->port; 1274 u32 cr; 1275 1276 if (uap->rs485_tx_state == SEND) 1277 uap->rs485_tx_state = WAIT_AFTER_SEND; 1278 1279 if (uap->rs485_tx_state == WAIT_AFTER_SEND) { 1280 /* Schedule hrtimer if tx queue not empty */ 1281 if (!pl011_tx_empty(port)) { 1282 hrtimer_start(&uap->trigger_stop_tx, 1283 uap->rs485_tx_drain_interval, 1284 HRTIMER_MODE_REL); 1285 return; 1286 } 1287 if (port->rs485.delay_rts_after_send > 0) { 1288 hrtimer_start(&uap->trigger_stop_tx, 1289 ms_to_ktime(port->rs485.delay_rts_after_send), 1290 HRTIMER_MODE_REL); 1291 return; 1292 } 1293 /* Continue without any delay */ 1294 } else if (uap->rs485_tx_state == WAIT_AFTER_RTS) { 1295 hrtimer_try_to_cancel(&uap->trigger_start_tx); 1296 } 1297 1298 cr = pl011_read(uap, REG_CR); 1299 1300 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 1301 cr &= ~UART011_CR_RTS; 1302 else 1303 cr |= UART011_CR_RTS; 1304 1305 /* Disable the transmitter and reenable the transceiver */ 1306 cr &= ~UART011_CR_TXE; 1307 cr |= UART011_CR_RXE; 1308 pl011_write(cr, uap, REG_CR); 1309 1310 uap->rs485_tx_state = OFF; 1311 } 1312 1313 static void pl011_stop_tx(struct uart_port *port) 1314 { 1315 struct uart_amba_port *uap = 1316 container_of(port, struct uart_amba_port, port); 1317 1318 if (port->rs485.flags & SER_RS485_ENABLED && 1319 uap->rs485_tx_state == WAIT_AFTER_RTS) { 1320 pl011_rs485_tx_stop(uap); 1321 return; 1322 } 1323 1324 uap->im &= ~UART011_TXIM; 1325 pl011_write(uap->im, uap, REG_IMSC); 1326 pl011_dma_tx_stop(uap); 1327 1328 if (port->rs485.flags & SER_RS485_ENABLED && 1329 uap->rs485_tx_state != OFF) 1330 pl011_rs485_tx_stop(uap); 1331 } 1332 1333 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); 1334 1335 /* Start TX with programmed I/O only (no DMA) */ 1336 static void pl011_start_tx_pio(struct uart_amba_port *uap) 1337 { 1338 if (pl011_tx_chars(uap, false)) { 1339 uap->im |= UART011_TXIM; 1340 pl011_write(uap->im, uap, REG_IMSC); 1341 } 1342 } 1343 1344 static void pl011_rs485_tx_start(struct uart_amba_port *uap) 1345 { 1346 struct uart_port *port = &uap->port; 1347 u32 cr; 1348 1349 if (uap->rs485_tx_state == WAIT_AFTER_RTS) { 1350 uap->rs485_tx_state = SEND; 1351 return; 1352 } 1353 if (uap->rs485_tx_state == WAIT_AFTER_SEND) { 1354 hrtimer_try_to_cancel(&uap->trigger_stop_tx); 1355 uap->rs485_tx_state = SEND; 1356 return; 1357 } 1358 /* uap->rs485_tx_state == OFF */ 1359 /* Enable transmitter */ 1360 cr = pl011_read(uap, REG_CR); 1361 cr |= UART011_CR_TXE; 1362 /* Disable receiver if half-duplex */ 1363 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) 1364 cr &= ~UART011_CR_RXE; 1365 1366 if (port->rs485.flags & SER_RS485_RTS_ON_SEND) 1367 cr &= ~UART011_CR_RTS; 1368 else 1369 cr |= UART011_CR_RTS; 1370 1371 pl011_write(cr, uap, REG_CR); 1372 1373 if (port->rs485.delay_rts_before_send > 0) { 1374 uap->rs485_tx_state = WAIT_AFTER_RTS; 1375 hrtimer_start(&uap->trigger_start_tx, 1376 ms_to_ktime(port->rs485.delay_rts_before_send), 1377 HRTIMER_MODE_REL); 1378 } else { 1379 uap->rs485_tx_state = SEND; 1380 } 1381 } 1382 1383 static void pl011_start_tx(struct uart_port *port) 1384 { 1385 struct uart_amba_port *uap = 1386 container_of(port, struct uart_amba_port, port); 1387 1388 if ((uap->port.rs485.flags & SER_RS485_ENABLED) && 1389 uap->rs485_tx_state != SEND) { 1390 pl011_rs485_tx_start(uap); 1391 if (uap->rs485_tx_state == WAIT_AFTER_RTS) 1392 return; 1393 } 1394 1395 if (!pl011_dma_tx_start(uap)) 1396 pl011_start_tx_pio(uap); 1397 } 1398 1399 static enum hrtimer_restart pl011_trigger_start_tx(struct hrtimer *t) 1400 { 1401 struct uart_amba_port *uap = 1402 container_of(t, struct uart_amba_port, trigger_start_tx); 1403 unsigned long flags; 1404 1405 uart_port_lock_irqsave(&uap->port, &flags); 1406 if (uap->rs485_tx_state == WAIT_AFTER_RTS) 1407 pl011_start_tx(&uap->port); 1408 uart_port_unlock_irqrestore(&uap->port, flags); 1409 1410 return HRTIMER_NORESTART; 1411 } 1412 1413 static enum hrtimer_restart pl011_trigger_stop_tx(struct hrtimer *t) 1414 { 1415 struct uart_amba_port *uap = 1416 container_of(t, struct uart_amba_port, trigger_stop_tx); 1417 unsigned long flags; 1418 1419 uart_port_lock_irqsave(&uap->port, &flags); 1420 if (uap->rs485_tx_state == WAIT_AFTER_SEND) 1421 pl011_rs485_tx_stop(uap); 1422 uart_port_unlock_irqrestore(&uap->port, flags); 1423 1424 return HRTIMER_NORESTART; 1425 } 1426 1427 static void pl011_stop_rx(struct uart_port *port) 1428 { 1429 struct uart_amba_port *uap = 1430 container_of(port, struct uart_amba_port, port); 1431 1432 uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM | 1433 UART011_PEIM | UART011_BEIM | UART011_OEIM); 1434 pl011_write(uap->im, uap, REG_IMSC); 1435 1436 pl011_dma_rx_stop(uap); 1437 } 1438 1439 static void pl011_throttle_rx(struct uart_port *port) 1440 { 1441 unsigned long flags; 1442 1443 uart_port_lock_irqsave(port, &flags); 1444 pl011_stop_rx(port); 1445 uart_port_unlock_irqrestore(port, flags); 1446 } 1447 1448 static void pl011_enable_ms(struct uart_port *port) 1449 { 1450 struct uart_amba_port *uap = 1451 container_of(port, struct uart_amba_port, port); 1452 1453 uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM; 1454 pl011_write(uap->im, uap, REG_IMSC); 1455 } 1456 1457 static void pl011_rx_chars(struct uart_amba_port *uap) 1458 __releases(&uap->port.lock) 1459 __acquires(&uap->port.lock) 1460 { 1461 pl011_fifo_to_tty(uap); 1462 1463 uart_port_unlock(&uap->port); 1464 tty_flip_buffer_push(&uap->port.state->port); 1465 /* 1466 * If we were temporarily out of DMA mode for a while, 1467 * attempt to switch back to DMA mode again. 1468 */ 1469 if (pl011_dma_rx_available(uap)) { 1470 if (pl011_dma_rx_trigger_dma(uap)) { 1471 dev_dbg(uap->port.dev, 1472 "could not trigger RX DMA job fall back to interrupt mode again\n"); 1473 uap->im |= UART011_RXIM; 1474 pl011_write(uap->im, uap, REG_IMSC); 1475 } else { 1476 #ifdef CONFIG_DMA_ENGINE 1477 /* Start Rx DMA poll */ 1478 if (uap->dmarx.poll_rate) { 1479 uap->dmarx.last_jiffies = jiffies; 1480 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1481 mod_timer(&uap->dmarx.timer, 1482 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 1483 } 1484 #endif 1485 } 1486 } 1487 uart_port_lock(&uap->port); 1488 } 1489 1490 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, 1491 bool from_irq) 1492 { 1493 if (unlikely(!from_irq) && 1494 pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 1495 return false; /* unable to transmit character */ 1496 1497 pl011_write(c, uap, REG_DR); 1498 uap->port.icount.tx++; 1499 1500 return true; 1501 } 1502 1503 /* Returns true if tx interrupts have to be (kept) enabled */ 1504 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) 1505 { 1506 struct tty_port *tport = &uap->port.state->port; 1507 int count = uap->fifosize >> 1; 1508 1509 if (uap->port.x_char) { 1510 if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) 1511 return true; 1512 uap->port.x_char = 0; 1513 --count; 1514 } 1515 if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&uap->port)) { 1516 pl011_stop_tx(&uap->port); 1517 return false; 1518 } 1519 1520 /* If we are using DMA mode, try to send some characters. */ 1521 if (pl011_dma_tx_irq(uap)) 1522 return true; 1523 1524 while (1) { 1525 unsigned char c; 1526 1527 if (likely(from_irq) && count-- == 0) 1528 break; 1529 1530 if (!kfifo_peek(&tport->xmit_fifo, &c)) 1531 break; 1532 1533 if (!pl011_tx_char(uap, c, from_irq)) 1534 break; 1535 1536 kfifo_skip(&tport->xmit_fifo); 1537 } 1538 1539 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) 1540 uart_write_wakeup(&uap->port); 1541 1542 if (kfifo_is_empty(&tport->xmit_fifo)) { 1543 pl011_stop_tx(&uap->port); 1544 return false; 1545 } 1546 return true; 1547 } 1548 1549 static void pl011_modem_status(struct uart_amba_port *uap) 1550 { 1551 unsigned int status, delta; 1552 1553 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; 1554 1555 delta = status ^ uap->old_status; 1556 uap->old_status = status; 1557 1558 if (!delta) 1559 return; 1560 1561 if (delta & UART01x_FR_DCD) 1562 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); 1563 1564 if (delta & uap->vendor->fr_dsr) 1565 uap->port.icount.dsr++; 1566 1567 if (delta & uap->vendor->fr_cts) 1568 uart_handle_cts_change(&uap->port, 1569 status & uap->vendor->fr_cts); 1570 1571 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1572 } 1573 1574 static void check_apply_cts_event_workaround(struct uart_amba_port *uap) 1575 { 1576 if (!uap->vendor->cts_event_workaround) 1577 return; 1578 1579 /* workaround to make sure that all bits are unlocked.. */ 1580 pl011_write(0x00, uap, REG_ICR); 1581 1582 /* 1583 * WA: introduce 26ns(1 uart clk) delay before W1C; 1584 * single apb access will incur 2 pclk(133.12Mhz) delay, 1585 * so add 2 dummy reads 1586 */ 1587 pl011_read(uap, REG_ICR); 1588 pl011_read(uap, REG_ICR); 1589 } 1590 1591 static irqreturn_t pl011_int(int irq, void *dev_id) 1592 { 1593 struct uart_amba_port *uap = dev_id; 1594 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1595 int handled = 0; 1596 1597 uart_port_lock(&uap->port); 1598 status = pl011_read(uap, REG_RIS) & uap->im; 1599 if (status) { 1600 do { 1601 check_apply_cts_event_workaround(uap); 1602 1603 pl011_write(status & ~(UART011_TXIS | UART011_RTIS | UART011_RXIS), 1604 uap, REG_ICR); 1605 1606 if (status & (UART011_RTIS | UART011_RXIS)) { 1607 if (pl011_dma_rx_running(uap)) 1608 pl011_dma_rx_irq(uap); 1609 else 1610 pl011_rx_chars(uap); 1611 } 1612 if (status & (UART011_DSRMIS | UART011_DCDMIS | 1613 UART011_CTSMIS | UART011_RIMIS)) 1614 pl011_modem_status(uap); 1615 if (status & UART011_TXIS) 1616 pl011_tx_chars(uap, true); 1617 1618 if (pass_counter-- == 0) 1619 break; 1620 1621 status = pl011_read(uap, REG_RIS) & uap->im; 1622 } while (status != 0); 1623 handled = 1; 1624 } 1625 1626 uart_unlock_and_check_sysrq(&uap->port); 1627 1628 return IRQ_RETVAL(handled); 1629 } 1630 1631 static unsigned int pl011_tx_empty(struct uart_port *port) 1632 { 1633 struct uart_amba_port *uap = 1634 container_of(port, struct uart_amba_port, port); 1635 1636 /* Allow feature register bits to be inverted to work around errata */ 1637 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr; 1638 1639 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? 1640 0 : TIOCSER_TEMT; 1641 } 1642 1643 static void pl011_maybe_set_bit(bool cond, unsigned int *ptr, unsigned int mask) 1644 { 1645 if (cond) 1646 *ptr |= mask; 1647 } 1648 1649 static unsigned int pl011_get_mctrl(struct uart_port *port) 1650 { 1651 struct uart_amba_port *uap = 1652 container_of(port, struct uart_amba_port, port); 1653 unsigned int result = 0; 1654 unsigned int status = pl011_read(uap, REG_FR); 1655 1656 pl011_maybe_set_bit(status & UART01x_FR_DCD, &result, TIOCM_CAR); 1657 pl011_maybe_set_bit(status & uap->vendor->fr_dsr, &result, TIOCM_DSR); 1658 pl011_maybe_set_bit(status & uap->vendor->fr_cts, &result, TIOCM_CTS); 1659 pl011_maybe_set_bit(status & uap->vendor->fr_ri, &result, TIOCM_RNG); 1660 1661 return result; 1662 } 1663 1664 static void pl011_assign_bit(bool cond, unsigned int *ptr, unsigned int mask) 1665 { 1666 if (cond) 1667 *ptr |= mask; 1668 else 1669 *ptr &= ~mask; 1670 } 1671 1672 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) 1673 { 1674 struct uart_amba_port *uap = 1675 container_of(port, struct uart_amba_port, port); 1676 unsigned int cr; 1677 1678 cr = pl011_read(uap, REG_CR); 1679 1680 pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTS); 1681 pl011_assign_bit(mctrl & TIOCM_DTR, &cr, UART011_CR_DTR); 1682 pl011_assign_bit(mctrl & TIOCM_OUT1, &cr, UART011_CR_OUT1); 1683 pl011_assign_bit(mctrl & TIOCM_OUT2, &cr, UART011_CR_OUT2); 1684 pl011_assign_bit(mctrl & TIOCM_LOOP, &cr, UART011_CR_LBE); 1685 1686 if (port->status & UPSTAT_AUTORTS) { 1687 /* We need to disable auto-RTS if we want to turn RTS off */ 1688 pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTSEN); 1689 } 1690 1691 pl011_write(cr, uap, REG_CR); 1692 } 1693 1694 static void pl011_break_ctl(struct uart_port *port, int break_state) 1695 { 1696 struct uart_amba_port *uap = 1697 container_of(port, struct uart_amba_port, port); 1698 unsigned long flags; 1699 unsigned int lcr_h; 1700 1701 uart_port_lock_irqsave(&uap->port, &flags); 1702 lcr_h = pl011_read(uap, REG_LCRH_TX); 1703 if (break_state == -1) 1704 lcr_h |= UART01x_LCRH_BRK; 1705 else 1706 lcr_h &= ~UART01x_LCRH_BRK; 1707 pl011_write(lcr_h, uap, REG_LCRH_TX); 1708 uart_port_unlock_irqrestore(&uap->port, flags); 1709 } 1710 1711 #ifdef CONFIG_CONSOLE_POLL 1712 1713 static void pl011_quiesce_irqs(struct uart_port *port) 1714 { 1715 struct uart_amba_port *uap = 1716 container_of(port, struct uart_amba_port, port); 1717 1718 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR); 1719 /* 1720 * There is no way to clear TXIM as this is "ready to transmit IRQ", so 1721 * we simply mask it. start_tx() will unmask it. 1722 * 1723 * Note we can race with start_tx(), and if the race happens, the 1724 * polling user might get another interrupt just after we clear it. 1725 * But it should be OK and can happen even w/o the race, e.g. 1726 * controller immediately got some new data and raised the IRQ. 1727 * 1728 * And whoever uses polling routines assumes that it manages the device 1729 * (including tx queue), so we're also fine with start_tx()'s caller 1730 * side. 1731 */ 1732 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap, 1733 REG_IMSC); 1734 } 1735 1736 static int pl011_get_poll_char(struct uart_port *port) 1737 { 1738 struct uart_amba_port *uap = 1739 container_of(port, struct uart_amba_port, port); 1740 unsigned int status; 1741 1742 /* 1743 * The caller might need IRQs lowered, e.g. if used with KDB NMI 1744 * debugger. 1745 */ 1746 pl011_quiesce_irqs(port); 1747 1748 status = pl011_read(uap, REG_FR); 1749 if (status & UART01x_FR_RXFE) 1750 return NO_POLL_CHAR; 1751 1752 return pl011_read(uap, REG_DR); 1753 } 1754 1755 static void pl011_put_poll_char(struct uart_port *port, unsigned char ch) 1756 { 1757 struct uart_amba_port *uap = 1758 container_of(port, struct uart_amba_port, port); 1759 1760 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 1761 cpu_relax(); 1762 1763 pl011_write(ch, uap, REG_DR); 1764 } 1765 1766 #endif /* CONFIG_CONSOLE_POLL */ 1767 1768 static int pl011_hwinit(struct uart_port *port) 1769 { 1770 struct uart_amba_port *uap = 1771 container_of(port, struct uart_amba_port, port); 1772 int retval; 1773 1774 /* Optionaly enable pins to be muxed in and configured */ 1775 pinctrl_pm_select_default_state(port->dev); 1776 1777 /* 1778 * Try to enable the clock producer. 1779 */ 1780 retval = clk_prepare_enable(uap->clk); 1781 if (retval) 1782 return retval; 1783 1784 uap->port.uartclk = clk_get_rate(uap->clk); 1785 1786 /* Clear pending error and receive interrupts */ 1787 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | 1788 UART011_FEIS | UART011_RTIS | UART011_RXIS, 1789 uap, REG_ICR); 1790 1791 /* 1792 * Save interrupts enable mask, and enable RX interrupts in case if 1793 * the interrupt is used for NMI entry. 1794 */ 1795 uap->im = pl011_read(uap, REG_IMSC); 1796 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC); 1797 1798 if (dev_get_platdata(uap->port.dev)) { 1799 struct amba_pl011_data *plat; 1800 1801 plat = dev_get_platdata(uap->port.dev); 1802 if (plat->init) 1803 plat->init(); 1804 } 1805 return 0; 1806 } 1807 1808 static bool pl011_split_lcrh(const struct uart_amba_port *uap) 1809 { 1810 return pl011_reg_to_offset(uap, REG_LCRH_RX) != 1811 pl011_reg_to_offset(uap, REG_LCRH_TX); 1812 } 1813 1814 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) 1815 { 1816 pl011_write(lcr_h, uap, REG_LCRH_RX); 1817 if (pl011_split_lcrh(uap)) { 1818 int i; 1819 /* 1820 * Wait 10 PCLKs before writing LCRH_TX register, 1821 * to get this delay write read only register 10 times 1822 */ 1823 for (i = 0; i < 10; ++i) 1824 pl011_write(0xff, uap, REG_MIS); 1825 pl011_write(lcr_h, uap, REG_LCRH_TX); 1826 } 1827 } 1828 1829 static int pl011_allocate_irq(struct uart_amba_port *uap) 1830 { 1831 pl011_write(uap->im, uap, REG_IMSC); 1832 1833 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap); 1834 } 1835 1836 /* 1837 * Enable interrupts, only timeouts when using DMA 1838 * if initial RX DMA job failed, start in interrupt mode 1839 * as well. 1840 */ 1841 static void pl011_enable_interrupts(struct uart_amba_port *uap) 1842 { 1843 unsigned long flags; 1844 unsigned int i; 1845 1846 uart_port_lock_irqsave(&uap->port, &flags); 1847 1848 /* Clear out any spuriously appearing RX interrupts */ 1849 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); 1850 1851 /* 1852 * RXIS is asserted only when the RX FIFO transitions from below 1853 * to above the trigger threshold. If the RX FIFO is already 1854 * full to the threshold this can't happen and RXIS will now be 1855 * stuck off. Drain the RX FIFO explicitly to fix this: 1856 */ 1857 for (i = 0; i < uap->fifosize * 2; ++i) { 1858 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE) 1859 break; 1860 1861 pl011_read(uap, REG_DR); 1862 } 1863 1864 uap->im = UART011_RTIM; 1865 if (!pl011_dma_rx_running(uap)) 1866 uap->im |= UART011_RXIM; 1867 pl011_write(uap->im, uap, REG_IMSC); 1868 uart_port_unlock_irqrestore(&uap->port, flags); 1869 } 1870 1871 static void pl011_unthrottle_rx(struct uart_port *port) 1872 { 1873 struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); 1874 unsigned long flags; 1875 1876 uart_port_lock_irqsave(&uap->port, &flags); 1877 1878 uap->im = UART011_RTIM; 1879 if (!pl011_dma_rx_running(uap)) 1880 uap->im |= UART011_RXIM; 1881 1882 pl011_write(uap->im, uap, REG_IMSC); 1883 1884 #ifdef CONFIG_DMA_ENGINE 1885 if (uap->using_rx_dma) { 1886 uap->dmacr |= UART011_RXDMAE; 1887 pl011_write(uap->dmacr, uap, REG_DMACR); 1888 } 1889 #endif 1890 1891 uart_port_unlock_irqrestore(&uap->port, flags); 1892 } 1893 1894 static int pl011_startup(struct uart_port *port) 1895 { 1896 struct uart_amba_port *uap = 1897 container_of(port, struct uart_amba_port, port); 1898 unsigned int cr; 1899 int retval; 1900 1901 retval = pl011_hwinit(port); 1902 if (retval) 1903 goto clk_dis; 1904 1905 retval = pl011_allocate_irq(uap); 1906 if (retval) 1907 goto clk_dis; 1908 1909 pl011_write(uap->vendor->ifls, uap, REG_IFLS); 1910 1911 uart_port_lock_irq(&uap->port); 1912 1913 cr = pl011_read(uap, REG_CR); 1914 cr &= UART011_CR_RTS | UART011_CR_DTR; 1915 cr |= UART01x_CR_UARTEN | UART011_CR_RXE; 1916 1917 if (!(port->rs485.flags & SER_RS485_ENABLED)) 1918 cr |= UART011_CR_TXE; 1919 1920 pl011_write(cr, uap, REG_CR); 1921 1922 uart_port_unlock_irq(&uap->port); 1923 1924 /* 1925 * initialise the old status of the modem signals 1926 */ 1927 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; 1928 1929 /* Startup DMA */ 1930 pl011_dma_startup(uap); 1931 1932 pl011_enable_interrupts(uap); 1933 1934 return 0; 1935 1936 clk_dis: 1937 clk_disable_unprepare(uap->clk); 1938 return retval; 1939 } 1940 1941 static int sbsa_uart_startup(struct uart_port *port) 1942 { 1943 struct uart_amba_port *uap = 1944 container_of(port, struct uart_amba_port, port); 1945 int retval; 1946 1947 retval = pl011_hwinit(port); 1948 if (retval) 1949 return retval; 1950 1951 retval = pl011_allocate_irq(uap); 1952 if (retval) 1953 return retval; 1954 1955 /* The SBSA UART does not support any modem status lines. */ 1956 uap->old_status = 0; 1957 1958 pl011_enable_interrupts(uap); 1959 1960 return 0; 1961 } 1962 1963 static void pl011_shutdown_channel(struct uart_amba_port *uap, unsigned int lcrh) 1964 { 1965 unsigned long val; 1966 1967 val = pl011_read(uap, lcrh); 1968 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1969 pl011_write(val, uap, lcrh); 1970 } 1971 1972 /* 1973 * disable the port. It should not disable RTS and DTR. 1974 * Also RTS and DTR state should be preserved to restore 1975 * it during startup(). 1976 */ 1977 static void pl011_disable_uart(struct uart_amba_port *uap) 1978 { 1979 unsigned int cr; 1980 1981 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1982 uart_port_lock_irq(&uap->port); 1983 cr = pl011_read(uap, REG_CR); 1984 cr &= UART011_CR_RTS | UART011_CR_DTR; 1985 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1986 pl011_write(cr, uap, REG_CR); 1987 uart_port_unlock_irq(&uap->port); 1988 1989 /* 1990 * disable break condition and fifos 1991 */ 1992 pl011_shutdown_channel(uap, REG_LCRH_RX); 1993 if (pl011_split_lcrh(uap)) 1994 pl011_shutdown_channel(uap, REG_LCRH_TX); 1995 } 1996 1997 static void pl011_disable_interrupts(struct uart_amba_port *uap) 1998 { 1999 uart_port_lock_irq(&uap->port); 2000 2001 /* mask all interrupts and clear all pending ones */ 2002 uap->im = 0; 2003 pl011_write(uap->im, uap, REG_IMSC); 2004 pl011_write(0xffff, uap, REG_ICR); 2005 2006 uart_port_unlock_irq(&uap->port); 2007 } 2008 2009 static void pl011_shutdown(struct uart_port *port) 2010 { 2011 struct uart_amba_port *uap = 2012 container_of(port, struct uart_amba_port, port); 2013 2014 pl011_disable_interrupts(uap); 2015 2016 pl011_dma_shutdown(uap); 2017 2018 if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF)) 2019 pl011_rs485_tx_stop(uap); 2020 2021 free_irq(uap->port.irq, uap); 2022 2023 pl011_disable_uart(uap); 2024 2025 /* 2026 * Shut down the clock producer 2027 */ 2028 clk_disable_unprepare(uap->clk); 2029 /* Optionally let pins go into sleep states */ 2030 pinctrl_pm_select_sleep_state(port->dev); 2031 2032 if (dev_get_platdata(uap->port.dev)) { 2033 struct amba_pl011_data *plat; 2034 2035 plat = dev_get_platdata(uap->port.dev); 2036 if (plat->exit) 2037 plat->exit(); 2038 } 2039 2040 if (uap->port.ops->flush_buffer) 2041 uap->port.ops->flush_buffer(port); 2042 } 2043 2044 static void sbsa_uart_shutdown(struct uart_port *port) 2045 { 2046 struct uart_amba_port *uap = 2047 container_of(port, struct uart_amba_port, port); 2048 2049 pl011_disable_interrupts(uap); 2050 2051 free_irq(uap->port.irq, uap); 2052 2053 if (uap->port.ops->flush_buffer) 2054 uap->port.ops->flush_buffer(port); 2055 } 2056 2057 static void 2058 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios) 2059 { 2060 port->read_status_mask = UART011_DR_OE | 255; 2061 if (termios->c_iflag & INPCK) 2062 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 2063 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2064 port->read_status_mask |= UART011_DR_BE; 2065 2066 /* 2067 * Characters to ignore 2068 */ 2069 port->ignore_status_mask = 0; 2070 if (termios->c_iflag & IGNPAR) 2071 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; 2072 if (termios->c_iflag & IGNBRK) { 2073 port->ignore_status_mask |= UART011_DR_BE; 2074 /* 2075 * If we're ignoring parity and break indicators, 2076 * ignore overruns too (for real raw support). 2077 */ 2078 if (termios->c_iflag & IGNPAR) 2079 port->ignore_status_mask |= UART011_DR_OE; 2080 } 2081 2082 /* 2083 * Ignore all characters if CREAD is not set. 2084 */ 2085 if ((termios->c_cflag & CREAD) == 0) 2086 port->ignore_status_mask |= UART_DUMMY_DR_RX; 2087 } 2088 2089 static void 2090 pl011_set_termios(struct uart_port *port, struct ktermios *termios, 2091 const struct ktermios *old) 2092 { 2093 struct uart_amba_port *uap = 2094 container_of(port, struct uart_amba_port, port); 2095 unsigned int lcr_h, old_cr; 2096 unsigned long flags; 2097 unsigned int baud, quot, clkdiv; 2098 unsigned int bits; 2099 2100 if (uap->vendor->oversampling) 2101 clkdiv = 8; 2102 else 2103 clkdiv = 16; 2104 2105 /* 2106 * Ask the core to calculate the divisor for us. 2107 */ 2108 baud = uart_get_baud_rate(port, termios, old, 0, 2109 port->uartclk / clkdiv); 2110 #ifdef CONFIG_DMA_ENGINE 2111 /* 2112 * Adjust RX DMA polling rate with baud rate if not specified. 2113 */ 2114 if (uap->dmarx.auto_poll_rate) 2115 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); 2116 #endif 2117 2118 if (baud > port->uartclk / 16) 2119 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 2120 else 2121 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 2122 2123 switch (termios->c_cflag & CSIZE) { 2124 case CS5: 2125 lcr_h = UART01x_LCRH_WLEN_5; 2126 break; 2127 case CS6: 2128 lcr_h = UART01x_LCRH_WLEN_6; 2129 break; 2130 case CS7: 2131 lcr_h = UART01x_LCRH_WLEN_7; 2132 break; 2133 default: // CS8 2134 lcr_h = UART01x_LCRH_WLEN_8; 2135 break; 2136 } 2137 if (termios->c_cflag & CSTOPB) 2138 lcr_h |= UART01x_LCRH_STP2; 2139 if (termios->c_cflag & PARENB) { 2140 lcr_h |= UART01x_LCRH_PEN; 2141 if (!(termios->c_cflag & PARODD)) 2142 lcr_h |= UART01x_LCRH_EPS; 2143 if (termios->c_cflag & CMSPAR) 2144 lcr_h |= UART011_LCRH_SPS; 2145 } 2146 if (uap->fifosize > 1) 2147 lcr_h |= UART01x_LCRH_FEN; 2148 2149 bits = tty_get_frame_size(termios->c_cflag); 2150 2151 uart_port_lock_irqsave(port, &flags); 2152 2153 /* 2154 * Update the per-port timeout. 2155 */ 2156 uart_update_timeout(port, termios->c_cflag, baud); 2157 2158 /* 2159 * Calculate the approximated time it takes to transmit one character 2160 * with the given baud rate. We use this as the poll interval when we 2161 * wait for the tx queue to empty. 2162 */ 2163 uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud)); 2164 2165 pl011_setup_status_masks(port, termios); 2166 2167 if (UART_ENABLE_MS(port, termios->c_cflag)) 2168 pl011_enable_ms(port); 2169 2170 if (port->rs485.flags & SER_RS485_ENABLED) 2171 termios->c_cflag &= ~CRTSCTS; 2172 2173 old_cr = pl011_read(uap, REG_CR); 2174 2175 if (termios->c_cflag & CRTSCTS) { 2176 if (old_cr & UART011_CR_RTS) 2177 old_cr |= UART011_CR_RTSEN; 2178 2179 old_cr |= UART011_CR_CTSEN; 2180 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 2181 } else { 2182 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); 2183 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 2184 } 2185 2186 if (uap->vendor->oversampling) { 2187 if (baud > port->uartclk / 16) 2188 old_cr |= ST_UART011_CR_OVSFACT; 2189 else 2190 old_cr &= ~ST_UART011_CR_OVSFACT; 2191 } 2192 2193 /* 2194 * Workaround for the ST Micro oversampling variants to 2195 * increase the bitrate slightly, by lowering the divisor, 2196 * to avoid delayed sampling of start bit at high speeds, 2197 * else we see data corruption. 2198 */ 2199 if (uap->vendor->oversampling) { 2200 if (baud >= 3000000 && baud < 3250000 && quot > 1) 2201 quot -= 1; 2202 else if (baud > 3250000 && quot > 2) 2203 quot -= 2; 2204 } 2205 /* Set baud rate */ 2206 pl011_write(quot & 0x3f, uap, REG_FBRD); 2207 pl011_write(quot >> 6, uap, REG_IBRD); 2208 2209 /* 2210 * ----------v----------v----------v----------v----- 2211 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER 2212 * REG_FBRD & REG_IBRD. 2213 * ----------^----------^----------^----------^----- 2214 */ 2215 pl011_write_lcr_h(uap, lcr_h); 2216 2217 /* 2218 * Receive was disabled by pl011_disable_uart during shutdown. 2219 * Need to reenable receive if you need to use a tty_driver 2220 * returns from tty_find_polling_driver() after a port shutdown. 2221 */ 2222 old_cr |= UART011_CR_RXE; 2223 pl011_write(old_cr, uap, REG_CR); 2224 2225 uart_port_unlock_irqrestore(port, flags); 2226 } 2227 2228 static void 2229 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, 2230 const struct ktermios *old) 2231 { 2232 struct uart_amba_port *uap = 2233 container_of(port, struct uart_amba_port, port); 2234 unsigned long flags; 2235 2236 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud); 2237 2238 /* The SBSA UART only supports 8n1 without hardware flow control. */ 2239 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); 2240 termios->c_cflag &= ~(CMSPAR | CRTSCTS); 2241 termios->c_cflag |= CS8 | CLOCAL; 2242 2243 uart_port_lock_irqsave(port, &flags); 2244 uart_update_timeout(port, CS8, uap->fixed_baud); 2245 pl011_setup_status_masks(port, termios); 2246 uart_port_unlock_irqrestore(port, flags); 2247 } 2248 2249 static const char *pl011_type(struct uart_port *port) 2250 { 2251 struct uart_amba_port *uap = 2252 container_of(port, struct uart_amba_port, port); 2253 return uap->port.type == PORT_AMBA ? uap->type : NULL; 2254 } 2255 2256 /* 2257 * Configure/autoconfigure the port. 2258 */ 2259 static void pl011_config_port(struct uart_port *port, int flags) 2260 { 2261 if (flags & UART_CONFIG_TYPE) 2262 port->type = PORT_AMBA; 2263 } 2264 2265 /* 2266 * verify the new serial_struct (for TIOCSSERIAL). 2267 */ 2268 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) 2269 { 2270 int ret = 0; 2271 2272 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 2273 ret = -EINVAL; 2274 if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) 2275 ret = -EINVAL; 2276 if (ser->baud_base < 9600) 2277 ret = -EINVAL; 2278 if (port->mapbase != (unsigned long)ser->iomem_base) 2279 ret = -EINVAL; 2280 return ret; 2281 } 2282 2283 static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios, 2284 struct serial_rs485 *rs485) 2285 { 2286 struct uart_amba_port *uap = 2287 container_of(port, struct uart_amba_port, port); 2288 2289 if (port->rs485.flags & SER_RS485_ENABLED) 2290 pl011_rs485_tx_stop(uap); 2291 2292 /* Make sure auto RTS is disabled */ 2293 if (rs485->flags & SER_RS485_ENABLED) { 2294 u32 cr = pl011_read(uap, REG_CR); 2295 2296 cr &= ~UART011_CR_RTSEN; 2297 pl011_write(cr, uap, REG_CR); 2298 port->status &= ~UPSTAT_AUTORTS; 2299 } 2300 2301 return 0; 2302 } 2303 2304 static const struct uart_ops amba_pl011_pops = { 2305 .tx_empty = pl011_tx_empty, 2306 .set_mctrl = pl011_set_mctrl, 2307 .get_mctrl = pl011_get_mctrl, 2308 .stop_tx = pl011_stop_tx, 2309 .start_tx = pl011_start_tx, 2310 .stop_rx = pl011_stop_rx, 2311 .throttle = pl011_throttle_rx, 2312 .unthrottle = pl011_unthrottle_rx, 2313 .enable_ms = pl011_enable_ms, 2314 .break_ctl = pl011_break_ctl, 2315 .startup = pl011_startup, 2316 .shutdown = pl011_shutdown, 2317 .flush_buffer = pl011_dma_flush_buffer, 2318 .set_termios = pl011_set_termios, 2319 .type = pl011_type, 2320 .config_port = pl011_config_port, 2321 .verify_port = pl011_verify_port, 2322 #ifdef CONFIG_CONSOLE_POLL 2323 .poll_init = pl011_hwinit, 2324 .poll_get_char = pl011_get_poll_char, 2325 .poll_put_char = pl011_put_poll_char, 2326 #endif 2327 }; 2328 2329 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) 2330 { 2331 } 2332 2333 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port) 2334 { 2335 return 0; 2336 } 2337 2338 static const struct uart_ops sbsa_uart_pops = { 2339 .tx_empty = pl011_tx_empty, 2340 .set_mctrl = sbsa_uart_set_mctrl, 2341 .get_mctrl = sbsa_uart_get_mctrl, 2342 .stop_tx = pl011_stop_tx, 2343 .start_tx = pl011_start_tx, 2344 .stop_rx = pl011_stop_rx, 2345 .startup = sbsa_uart_startup, 2346 .shutdown = sbsa_uart_shutdown, 2347 .set_termios = sbsa_uart_set_termios, 2348 .type = pl011_type, 2349 .config_port = pl011_config_port, 2350 .verify_port = pl011_verify_port, 2351 #ifdef CONFIG_CONSOLE_POLL 2352 .poll_init = pl011_hwinit, 2353 .poll_get_char = pl011_get_poll_char, 2354 .poll_put_char = pl011_put_poll_char, 2355 #endif 2356 }; 2357 2358 static struct uart_amba_port *amba_ports[UART_NR]; 2359 2360 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE 2361 2362 static void pl011_console_putchar(struct uart_port *port, unsigned char ch) 2363 { 2364 struct uart_amba_port *uap = 2365 container_of(port, struct uart_amba_port, port); 2366 2367 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 2368 cpu_relax(); 2369 pl011_write(ch, uap, REG_DR); 2370 uap->console_line_ended = (ch == '\n'); 2371 } 2372 2373 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, 2374 int *parity, int *bits) 2375 { 2376 unsigned int lcr_h, ibrd, fbrd; 2377 2378 if (!(pl011_read(uap, REG_CR) & UART01x_CR_UARTEN)) 2379 return; 2380 2381 lcr_h = pl011_read(uap, REG_LCRH_TX); 2382 2383 *parity = 'n'; 2384 if (lcr_h & UART01x_LCRH_PEN) { 2385 if (lcr_h & UART01x_LCRH_EPS) 2386 *parity = 'e'; 2387 else 2388 *parity = 'o'; 2389 } 2390 2391 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) 2392 *bits = 7; 2393 else 2394 *bits = 8; 2395 2396 ibrd = pl011_read(uap, REG_IBRD); 2397 fbrd = pl011_read(uap, REG_FBRD); 2398 2399 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); 2400 2401 if (uap->vendor->oversampling && 2402 (pl011_read(uap, REG_CR) & ST_UART011_CR_OVSFACT)) 2403 *baud *= 2; 2404 } 2405 2406 static int pl011_console_setup(struct console *co, char *options) 2407 { 2408 struct uart_amba_port *uap; 2409 int baud = 38400; 2410 int bits = 8; 2411 int parity = 'n'; 2412 int flow = 'n'; 2413 int ret; 2414 2415 /* 2416 * Check whether an invalid uart number has been specified, and 2417 * if so, search for the first available port that does have 2418 * console support. 2419 */ 2420 if (co->index >= UART_NR) 2421 co->index = 0; 2422 uap = amba_ports[co->index]; 2423 if (!uap) 2424 return -ENODEV; 2425 2426 /* Allow pins to be muxed in and configured */ 2427 pinctrl_pm_select_default_state(uap->port.dev); 2428 2429 ret = clk_prepare(uap->clk); 2430 if (ret) 2431 return ret; 2432 2433 uap->console_line_ended = true; 2434 2435 if (dev_get_platdata(uap->port.dev)) { 2436 struct amba_pl011_data *plat; 2437 2438 plat = dev_get_platdata(uap->port.dev); 2439 if (plat->init) 2440 plat->init(); 2441 } 2442 2443 uap->port.uartclk = clk_get_rate(uap->clk); 2444 2445 if (uap->vendor->fixed_options) { 2446 baud = uap->fixed_baud; 2447 } else { 2448 if (options) 2449 uart_parse_options(options, 2450 &baud, &parity, &bits, &flow); 2451 else 2452 pl011_console_get_options(uap, &baud, &parity, &bits); 2453 } 2454 2455 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 2456 } 2457 2458 /** 2459 * pl011_console_match - non-standard console matching 2460 * @co: registering console 2461 * @name: name from console command line 2462 * @idx: index from console command line 2463 * @options: ptr to option string from console command line 2464 * 2465 * Only attempts to match console command lines of the form: 2466 * console=pl011,mmio|mmio32,<addr>[,<options>] 2467 * console=pl011,0x<addr>[,<options>] 2468 * This form is used to register an initial earlycon boot console and 2469 * replace it with the amba_console at pl011 driver init. 2470 * 2471 * Performs console setup for a match (as required by interface) 2472 * If no <options> are specified, then assume the h/w is already setup. 2473 * 2474 * Returns 0 if console matches; otherwise non-zero to use default matching 2475 */ 2476 static int pl011_console_match(struct console *co, char *name, int idx, 2477 char *options) 2478 { 2479 unsigned char iotype; 2480 resource_size_t addr; 2481 int i; 2482 2483 /* 2484 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum 2485 * have a distinct console name, so make sure we check for that. 2486 * The actual implementation of the erratum occurs in the probe 2487 * function. 2488 */ 2489 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) 2490 return -ENODEV; 2491 2492 if (uart_parse_earlycon(options, &iotype, &addr, &options)) 2493 return -ENODEV; 2494 2495 if (iotype != UPIO_MEM && iotype != UPIO_MEM32) 2496 return -ENODEV; 2497 2498 /* try to match the port specified on the command line */ 2499 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { 2500 struct uart_port *port; 2501 2502 if (!amba_ports[i]) 2503 continue; 2504 2505 port = &amba_ports[i]->port; 2506 2507 if (port->mapbase != addr) 2508 continue; 2509 2510 co->index = i; 2511 uart_port_set_cons(port, co); 2512 return pl011_console_setup(co, options); 2513 } 2514 2515 return -ENODEV; 2516 } 2517 2518 static void 2519 pl011_console_write_atomic(struct console *co, struct nbcon_write_context *wctxt) 2520 { 2521 struct uart_amba_port *uap = amba_ports[co->index]; 2522 unsigned int old_cr = 0; 2523 2524 if (!nbcon_enter_unsafe(wctxt)) 2525 return; 2526 2527 clk_enable(uap->clk); 2528 2529 if (!uap->vendor->always_enabled) { 2530 old_cr = pl011_read(uap, REG_CR); 2531 pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE), 2532 uap, REG_CR); 2533 } 2534 2535 if (!uap->console_line_ended) 2536 uart_console_write(&uap->port, "\n", 1, pl011_console_putchar); 2537 uart_console_write(&uap->port, wctxt->outbuf, wctxt->len, pl011_console_putchar); 2538 2539 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy) 2540 cpu_relax(); 2541 2542 if (!uap->vendor->always_enabled) 2543 pl011_write(old_cr, uap, REG_CR); 2544 2545 clk_disable(uap->clk); 2546 2547 nbcon_exit_unsafe(wctxt); 2548 } 2549 2550 static void 2551 pl011_console_write_thread(struct console *co, struct nbcon_write_context *wctxt) 2552 { 2553 struct uart_amba_port *uap = amba_ports[co->index]; 2554 unsigned int old_cr = 0; 2555 2556 if (!nbcon_enter_unsafe(wctxt)) 2557 return; 2558 2559 clk_enable(uap->clk); 2560 2561 if (!uap->vendor->always_enabled) { 2562 old_cr = pl011_read(uap, REG_CR); 2563 pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE), 2564 uap, REG_CR); 2565 } 2566 2567 if (nbcon_exit_unsafe(wctxt)) { 2568 int i; 2569 unsigned int len = READ_ONCE(wctxt->len); 2570 2571 for (i = 0; i < len; i++) { 2572 if (!nbcon_enter_unsafe(wctxt)) 2573 break; 2574 uart_console_write(&uap->port, wctxt->outbuf + i, 1, pl011_console_putchar); 2575 if (!nbcon_exit_unsafe(wctxt)) 2576 break; 2577 } 2578 } 2579 2580 while (!nbcon_enter_unsafe(wctxt)) 2581 nbcon_reacquire_nobuf(wctxt); 2582 2583 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy) 2584 cpu_relax(); 2585 2586 if (!uap->vendor->always_enabled) 2587 pl011_write(old_cr, uap, REG_CR); 2588 2589 clk_disable(uap->clk); 2590 2591 nbcon_exit_unsafe(wctxt); 2592 } 2593 2594 static void 2595 pl011_console_device_lock(struct console *co, unsigned long *flags) 2596 { 2597 __uart_port_lock_irqsave(&amba_ports[co->index]->port, flags); 2598 } 2599 2600 static void 2601 pl011_console_device_unlock(struct console *co, unsigned long flags) 2602 { 2603 __uart_port_unlock_irqrestore(&amba_ports[co->index]->port, flags); 2604 } 2605 2606 static struct uart_driver amba_reg; 2607 static struct console amba_console = { 2608 .name = "ttyAMA", 2609 .device = uart_console_device, 2610 .setup = pl011_console_setup, 2611 .match = pl011_console_match, 2612 .write_atomic = pl011_console_write_atomic, 2613 .write_thread = pl011_console_write_thread, 2614 .device_lock = pl011_console_device_lock, 2615 .device_unlock = pl011_console_device_unlock, 2616 .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON, 2617 .index = -1, 2618 .data = &amba_reg, 2619 }; 2620 2621 #define AMBA_CONSOLE (&amba_console) 2622 2623 static void qdf2400_e44_putc(struct uart_port *port, unsigned char c) 2624 { 2625 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) 2626 cpu_relax(); 2627 writel(c, port->membase + UART01x_DR); 2628 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) 2629 cpu_relax(); 2630 } 2631 2632 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned int n) 2633 { 2634 struct earlycon_device *dev = con->data; 2635 2636 uart_console_write(&dev->port, s, n, qdf2400_e44_putc); 2637 } 2638 2639 static void pl011_putc(struct uart_port *port, unsigned char c) 2640 { 2641 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) 2642 cpu_relax(); 2643 if (port->iotype == UPIO_MEM32) 2644 writel(c, port->membase + UART01x_DR); 2645 else 2646 writeb(c, port->membase + UART01x_DR); 2647 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) 2648 cpu_relax(); 2649 } 2650 2651 static void pl011_early_write(struct console *con, const char *s, unsigned int n) 2652 { 2653 struct earlycon_device *dev = con->data; 2654 2655 uart_console_write(&dev->port, s, n, pl011_putc); 2656 } 2657 2658 #ifdef CONFIG_CONSOLE_POLL 2659 static int pl011_getc(struct uart_port *port) 2660 { 2661 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) 2662 return NO_POLL_CHAR; 2663 2664 if (port->iotype == UPIO_MEM32) 2665 return readl(port->membase + UART01x_DR); 2666 else 2667 return readb(port->membase + UART01x_DR); 2668 } 2669 2670 static int pl011_early_read(struct console *con, char *s, unsigned int n) 2671 { 2672 struct earlycon_device *dev = con->data; 2673 int ch, num_read = 0; 2674 2675 while (num_read < n) { 2676 ch = pl011_getc(&dev->port); 2677 if (ch == NO_POLL_CHAR) 2678 break; 2679 2680 s[num_read++] = ch; 2681 } 2682 2683 return num_read; 2684 } 2685 #else 2686 #define pl011_early_read NULL 2687 #endif 2688 2689 /* 2690 * On non-ACPI systems, earlycon is enabled by specifying 2691 * "earlycon=pl011,<address>" on the kernel command line. 2692 * 2693 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, 2694 * by specifying only "earlycon" on the command line. Because it requires 2695 * SPCR, the console starts after ACPI is parsed, which is later than a 2696 * traditional early console. 2697 * 2698 * To get the traditional early console that starts before ACPI is parsed, 2699 * specify the full "earlycon=pl011,<address>" option. 2700 */ 2701 static int __init pl011_early_console_setup(struct earlycon_device *device, 2702 const char *opt) 2703 { 2704 if (!device->port.membase) 2705 return -ENODEV; 2706 2707 device->con->write = pl011_early_write; 2708 device->con->read = pl011_early_read; 2709 2710 return 0; 2711 } 2712 2713 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2714 2715 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); 2716 2717 /* 2718 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by 2719 * Erratum 44, traditional earlycon can be enabled by specifying 2720 * "earlycon=qdf2400_e44,<address>". Any options are ignored. 2721 * 2722 * Alternatively, you can just specify "earlycon", and the early console 2723 * will be enabled with the information from the SPCR table. In this 2724 * case, the SPCR code will detect the need for the E44 work-around, 2725 * and set the console name to "qdf2400_e44". 2726 */ 2727 static int __init 2728 qdf2400_e44_early_console_setup(struct earlycon_device *device, 2729 const char *opt) 2730 { 2731 if (!device->port.membase) 2732 return -ENODEV; 2733 2734 device->con->write = qdf2400_e44_early_write; 2735 return 0; 2736 } 2737 2738 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup); 2739 2740 #else 2741 #define AMBA_CONSOLE NULL 2742 #endif 2743 2744 static struct uart_driver amba_reg = { 2745 .owner = THIS_MODULE, 2746 .driver_name = "ttyAMA", 2747 .dev_name = "ttyAMA", 2748 .major = SERIAL_AMBA_MAJOR, 2749 .minor = SERIAL_AMBA_MINOR, 2750 .nr = UART_NR, 2751 .cons = AMBA_CONSOLE, 2752 }; 2753 2754 static int pl011_probe_dt_alias(int index, struct device *dev) 2755 { 2756 struct device_node *np; 2757 static bool seen_dev_with_alias; 2758 static bool seen_dev_without_alias; 2759 int ret = index; 2760 2761 if (!IS_ENABLED(CONFIG_OF)) 2762 return ret; 2763 2764 np = dev->of_node; 2765 if (!np) 2766 return ret; 2767 2768 ret = of_alias_get_id(np, "serial"); 2769 if (ret < 0) { 2770 seen_dev_without_alias = true; 2771 ret = index; 2772 } else { 2773 seen_dev_with_alias = true; 2774 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret]) { 2775 dev_warn(dev, "requested serial port %d not available.\n", ret); 2776 ret = index; 2777 } 2778 } 2779 2780 if (seen_dev_with_alias && seen_dev_without_alias) 2781 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); 2782 2783 return ret; 2784 } 2785 2786 /* unregisters the driver also if no more ports are left */ 2787 static void pl011_unregister_port(struct uart_amba_port *uap) 2788 { 2789 int i; 2790 bool busy = false; 2791 2792 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { 2793 if (amba_ports[i] == uap) 2794 amba_ports[i] = NULL; 2795 else if (amba_ports[i]) 2796 busy = true; 2797 } 2798 pl011_dma_remove(uap); 2799 if (!busy) 2800 uart_unregister_driver(&amba_reg); 2801 } 2802 2803 static int pl011_find_free_port(void) 2804 { 2805 int i; 2806 2807 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2808 if (!amba_ports[i]) 2809 return i; 2810 2811 return -EBUSY; 2812 } 2813 2814 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, 2815 struct resource *mmiobase, int index) 2816 { 2817 void __iomem *base; 2818 int ret; 2819 2820 base = devm_ioremap_resource(dev, mmiobase); 2821 if (IS_ERR(base)) 2822 return PTR_ERR(base); 2823 2824 index = pl011_probe_dt_alias(index, dev); 2825 2826 uap->port.dev = dev; 2827 uap->port.mapbase = mmiobase->start; 2828 uap->port.membase = base; 2829 uap->port.fifosize = uap->fifosize; 2830 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); 2831 uap->port.flags = UPF_BOOT_AUTOCONF; 2832 uap->port.line = index; 2833 2834 ret = uart_get_rs485_mode(&uap->port); 2835 if (ret) 2836 return ret; 2837 2838 amba_ports[index] = uap; 2839 2840 return 0; 2841 } 2842 2843 static int pl011_register_port(struct uart_amba_port *uap) 2844 { 2845 int ret, i; 2846 2847 /* Ensure interrupts from this UART are masked and cleared */ 2848 pl011_write(0, uap, REG_IMSC); 2849 pl011_write(0xffff, uap, REG_ICR); 2850 2851 if (!amba_reg.state) { 2852 ret = uart_register_driver(&amba_reg); 2853 if (ret < 0) { 2854 dev_err(uap->port.dev, 2855 "Failed to register AMBA-PL011 driver\n"); 2856 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2857 if (amba_ports[i] == uap) 2858 amba_ports[i] = NULL; 2859 return ret; 2860 } 2861 } 2862 2863 ret = uart_add_one_port(&amba_reg, &uap->port); 2864 if (ret) 2865 pl011_unregister_port(uap); 2866 2867 return ret; 2868 } 2869 2870 static const struct serial_rs485 pl011_rs485_supported = { 2871 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 2872 SER_RS485_RX_DURING_TX, 2873 .delay_rts_before_send = 1, 2874 .delay_rts_after_send = 1, 2875 }; 2876 2877 static int pl011_probe(struct amba_device *dev, const struct amba_id *id) 2878 { 2879 struct uart_amba_port *uap; 2880 struct vendor_data *vendor = id->data; 2881 int portnr, ret; 2882 u32 val; 2883 2884 portnr = pl011_find_free_port(); 2885 if (portnr < 0) 2886 return portnr; 2887 2888 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), 2889 GFP_KERNEL); 2890 if (!uap) 2891 return -ENOMEM; 2892 2893 uap->clk = devm_clk_get(&dev->dev, NULL); 2894 if (IS_ERR(uap->clk)) 2895 return PTR_ERR(uap->clk); 2896 2897 uap->reg_offset = vendor->reg_offset; 2898 uap->vendor = vendor; 2899 uap->fifosize = vendor->get_fifosize(dev); 2900 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; 2901 uap->port.irq = dev->irq[0]; 2902 uap->port.ops = &amba_pl011_pops; 2903 uap->port.rs485_config = pl011_rs485_config; 2904 uap->port.rs485_supported = pl011_rs485_supported; 2905 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 2906 2907 if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) { 2908 switch (val) { 2909 case 1: 2910 uap->port.iotype = UPIO_MEM; 2911 break; 2912 case 4: 2913 uap->port.iotype = UPIO_MEM32; 2914 break; 2915 default: 2916 dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n", 2917 val); 2918 return -EINVAL; 2919 } 2920 } 2921 hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC, 2922 HRTIMER_MODE_REL); 2923 hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC, 2924 HRTIMER_MODE_REL); 2925 2926 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); 2927 if (ret) 2928 return ret; 2929 2930 amba_set_drvdata(dev, uap); 2931 2932 return pl011_register_port(uap); 2933 } 2934 2935 static void pl011_remove(struct amba_device *dev) 2936 { 2937 struct uart_amba_port *uap = amba_get_drvdata(dev); 2938 2939 uart_remove_one_port(&amba_reg, &uap->port); 2940 pl011_unregister_port(uap); 2941 } 2942 2943 #ifdef CONFIG_PM_SLEEP 2944 static int pl011_suspend(struct device *dev) 2945 { 2946 struct uart_amba_port *uap = dev_get_drvdata(dev); 2947 2948 if (!uap) 2949 return -EINVAL; 2950 2951 return uart_suspend_port(&amba_reg, &uap->port); 2952 } 2953 2954 static int pl011_resume(struct device *dev) 2955 { 2956 struct uart_amba_port *uap = dev_get_drvdata(dev); 2957 2958 if (!uap) 2959 return -EINVAL; 2960 2961 return uart_resume_port(&amba_reg, &uap->port); 2962 } 2963 #endif 2964 2965 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume); 2966 2967 #ifdef CONFIG_ACPI_SPCR_TABLE 2968 static void qpdf2400_erratum44_workaround(struct device *dev, 2969 struct uart_amba_port *uap) 2970 { 2971 if (!qdf2400_e44_present) 2972 return; 2973 2974 dev_info(dev, "working around QDF2400 SoC erratum 44\n"); 2975 uap->vendor = &vendor_qdt_qdf2400_e44; 2976 } 2977 #else 2978 static void qpdf2400_erratum44_workaround(struct device *dev, 2979 struct uart_amba_port *uap) 2980 { /* empty */ } 2981 #endif 2982 2983 static int sbsa_uart_probe(struct platform_device *pdev) 2984 { 2985 struct uart_amba_port *uap; 2986 struct resource *r; 2987 int portnr, ret; 2988 int baudrate; 2989 2990 /* 2991 * Check the mandatory baud rate parameter in the DT node early 2992 * so that we can easily exit with the error. 2993 */ 2994 if (pdev->dev.of_node) { 2995 struct device_node *np = pdev->dev.of_node; 2996 2997 ret = of_property_read_u32(np, "current-speed", &baudrate); 2998 if (ret) 2999 return ret; 3000 } else { 3001 baudrate = 115200; 3002 } 3003 3004 portnr = pl011_find_free_port(); 3005 if (portnr < 0) 3006 return portnr; 3007 3008 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port), 3009 GFP_KERNEL); 3010 if (!uap) 3011 return -ENOMEM; 3012 3013 ret = platform_get_irq(pdev, 0); 3014 if (ret < 0) 3015 return ret; 3016 uap->port.irq = ret; 3017 3018 uap->vendor = &vendor_sbsa; 3019 qpdf2400_erratum44_workaround(&pdev->dev, uap); 3020 3021 uap->reg_offset = uap->vendor->reg_offset; 3022 uap->fifosize = 32; 3023 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; 3024 uap->port.ops = &sbsa_uart_pops; 3025 uap->fixed_baud = baudrate; 3026 3027 snprintf(uap->type, sizeof(uap->type), "SBSA"); 3028 3029 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3030 3031 ret = pl011_setup_port(&pdev->dev, uap, r, portnr); 3032 if (ret) 3033 return ret; 3034 3035 platform_set_drvdata(pdev, uap); 3036 3037 return pl011_register_port(uap); 3038 } 3039 3040 static void sbsa_uart_remove(struct platform_device *pdev) 3041 { 3042 struct uart_amba_port *uap = platform_get_drvdata(pdev); 3043 3044 uart_remove_one_port(&amba_reg, &uap->port); 3045 pl011_unregister_port(uap); 3046 } 3047 3048 static const struct of_device_id sbsa_uart_of_match[] = { 3049 { .compatible = "arm,sbsa-uart", }, 3050 {}, 3051 }; 3052 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); 3053 3054 static const struct acpi_device_id sbsa_uart_acpi_match[] = { 3055 { "ARMH0011", 0 }, 3056 { "ARMHB000", 0 }, 3057 {}, 3058 }; 3059 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); 3060 3061 static struct platform_driver arm_sbsa_uart_platform_driver = { 3062 .probe = sbsa_uart_probe, 3063 .remove = sbsa_uart_remove, 3064 .driver = { 3065 .name = "sbsa-uart", 3066 .pm = &pl011_dev_pm_ops, 3067 .of_match_table = sbsa_uart_of_match, 3068 .acpi_match_table = sbsa_uart_acpi_match, 3069 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), 3070 }, 3071 }; 3072 3073 static const struct amba_id pl011_ids[] = { 3074 { 3075 .id = 0x00041011, 3076 .mask = 0x000fffff, 3077 .data = &vendor_arm, 3078 }, 3079 { 3080 .id = 0x00380802, 3081 .mask = 0x00ffffff, 3082 .data = &vendor_st, 3083 }, 3084 { 0, 0 }, 3085 }; 3086 3087 MODULE_DEVICE_TABLE(amba, pl011_ids); 3088 3089 static struct amba_driver pl011_driver = { 3090 .drv = { 3091 .name = "uart-pl011", 3092 .pm = &pl011_dev_pm_ops, 3093 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), 3094 }, 3095 .id_table = pl011_ids, 3096 .probe = pl011_probe, 3097 .remove = pl011_remove, 3098 }; 3099 3100 static int __init pl011_init(void) 3101 { 3102 pr_info("Serial: AMBA PL011 UART driver\n"); 3103 3104 if (platform_driver_register(&arm_sbsa_uart_platform_driver)) 3105 pr_warn("could not register SBSA UART platform driver\n"); 3106 return amba_driver_register(&pl011_driver); 3107 } 3108 3109 static void __exit pl011_exit(void) 3110 { 3111 platform_driver_unregister(&arm_sbsa_uart_platform_driver); 3112 amba_driver_unregister(&pl011_driver); 3113 } 3114 3115 /* 3116 * While this can be a module, if builtin it's most likely the console 3117 * So let's leave module_exit but move module_init to an earlier place 3118 */ 3119 arch_initcall(pl011_init); 3120 module_exit(pl011_exit); 3121 3122 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 3123 MODULE_DESCRIPTION("ARM AMBA serial port driver"); 3124 MODULE_LICENSE("GPL"); 3125