1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for AMBA serial ports 4 * 5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 6 * 7 * Copyright 1999 ARM Limited 8 * Copyright (C) 2000 Deep Blue Solutions Ltd. 9 * Copyright (C) 2010 ST-Ericsson SA 10 * 11 * This is a generic driver for ARM AMBA-type serial ports. They 12 * have a lot of 16550-like features, but are not register compatible. 13 * Note that although they do have CTS, DCD and DSR inputs, they do 14 * not have an RI input, nor do they have DTR or RTS outputs. If 15 * required, these have to be supplied via some other means (eg, GPIO) 16 * and hooked into this driver. 17 */ 18 19 #include <linux/module.h> 20 #include <linux/ioport.h> 21 #include <linux/init.h> 22 #include <linux/console.h> 23 #include <linux/platform_device.h> 24 #include <linux/sysrq.h> 25 #include <linux/device.h> 26 #include <linux/tty.h> 27 #include <linux/tty_flip.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/amba/bus.h> 31 #include <linux/amba/serial.h> 32 #include <linux/clk.h> 33 #include <linux/slab.h> 34 #include <linux/dmaengine.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/scatterlist.h> 37 #include <linux/delay.h> 38 #include <linux/types.h> 39 #include <linux/of.h> 40 #include <linux/pinctrl/consumer.h> 41 #include <linux/sizes.h> 42 #include <linux/io.h> 43 #include <linux/acpi.h> 44 45 #define UART_NR 14 46 47 #define SERIAL_AMBA_MAJOR 204 48 #define SERIAL_AMBA_MINOR 64 49 #define SERIAL_AMBA_NR UART_NR 50 51 #define AMBA_ISR_PASS_LIMIT 256 52 53 #define UART_DR_ERROR (UART011_DR_OE | UART011_DR_BE | UART011_DR_PE | UART011_DR_FE) 54 #define UART_DUMMY_DR_RX BIT(16) 55 56 enum { 57 REG_DR, 58 REG_ST_DMAWM, 59 REG_ST_TIMEOUT, 60 REG_FR, 61 REG_LCRH_RX, 62 REG_LCRH_TX, 63 REG_IBRD, 64 REG_FBRD, 65 REG_CR, 66 REG_IFLS, 67 REG_IMSC, 68 REG_RIS, 69 REG_MIS, 70 REG_ICR, 71 REG_DMACR, 72 REG_ST_XFCR, 73 REG_ST_XON1, 74 REG_ST_XON2, 75 REG_ST_XOFF1, 76 REG_ST_XOFF2, 77 REG_ST_ITCR, 78 REG_ST_ITIP, 79 REG_ST_ABCR, 80 REG_ST_ABIMSC, 81 82 /* The size of the array - must be last */ 83 REG_ARRAY_SIZE, 84 }; 85 86 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = { 87 [REG_DR] = UART01x_DR, 88 [REG_FR] = UART01x_FR, 89 [REG_LCRH_RX] = UART011_LCRH, 90 [REG_LCRH_TX] = UART011_LCRH, 91 [REG_IBRD] = UART011_IBRD, 92 [REG_FBRD] = UART011_FBRD, 93 [REG_CR] = UART011_CR, 94 [REG_IFLS] = UART011_IFLS, 95 [REG_IMSC] = UART011_IMSC, 96 [REG_RIS] = UART011_RIS, 97 [REG_MIS] = UART011_MIS, 98 [REG_ICR] = UART011_ICR, 99 [REG_DMACR] = UART011_DMACR, 100 }; 101 102 /* There is by now at least one vendor with differing details, so handle it */ 103 struct vendor_data { 104 const u16 *reg_offset; 105 unsigned int ifls; 106 unsigned int fr_busy; 107 unsigned int fr_dsr; 108 unsigned int fr_cts; 109 unsigned int fr_ri; 110 unsigned int inv_fr; 111 bool access_32b; 112 bool oversampling; 113 bool dma_threshold; 114 bool cts_event_workaround; 115 bool always_enabled; 116 bool fixed_options; 117 bool skip_ibrd_fbrd; 118 bool set_uartclk_rate; 119 120 unsigned int (*get_fifosize)(struct amba_device *dev); 121 }; 122 123 static unsigned int get_fifosize_arm(struct amba_device *dev) 124 { 125 return amba_rev(dev) < 3 ? 16 : 32; 126 } 127 128 static struct vendor_data vendor_arm = { 129 .reg_offset = pl011_std_offsets, 130 .ifls = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8, 131 .fr_busy = UART01x_FR_BUSY, 132 .fr_dsr = UART01x_FR_DSR, 133 .fr_cts = UART01x_FR_CTS, 134 .fr_ri = UART011_FR_RI, 135 .oversampling = false, 136 .dma_threshold = false, 137 .cts_event_workaround = false, 138 .always_enabled = false, 139 .fixed_options = false, 140 .get_fifosize = get_fifosize_arm, 141 }; 142 143 static const struct vendor_data vendor_sbsa = { 144 .reg_offset = pl011_std_offsets, 145 .fr_busy = UART01x_FR_BUSY, 146 .fr_dsr = UART01x_FR_DSR, 147 .fr_cts = UART01x_FR_CTS, 148 .fr_ri = UART011_FR_RI, 149 .access_32b = true, 150 .oversampling = false, 151 .dma_threshold = false, 152 .cts_event_workaround = false, 153 .always_enabled = true, 154 .fixed_options = true, 155 }; 156 157 #ifdef CONFIG_ACPI_SPCR_TABLE 158 static const struct vendor_data vendor_qdt_qdf2400_e44 = { 159 .reg_offset = pl011_std_offsets, 160 .fr_busy = UART011_FR_TXFE, 161 .fr_dsr = UART01x_FR_DSR, 162 .fr_cts = UART01x_FR_CTS, 163 .fr_ri = UART011_FR_RI, 164 .inv_fr = UART011_FR_TXFE, 165 .access_32b = true, 166 .oversampling = false, 167 .dma_threshold = false, 168 .cts_event_workaround = false, 169 .always_enabled = true, 170 .fixed_options = true, 171 }; 172 #endif 173 174 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { 175 [REG_DR] = UART01x_DR, 176 [REG_ST_DMAWM] = ST_UART011_DMAWM, 177 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT, 178 [REG_FR] = UART01x_FR, 179 [REG_LCRH_RX] = ST_UART011_LCRH_RX, 180 [REG_LCRH_TX] = ST_UART011_LCRH_TX, 181 [REG_IBRD] = UART011_IBRD, 182 [REG_FBRD] = UART011_FBRD, 183 [REG_CR] = UART011_CR, 184 [REG_IFLS] = UART011_IFLS, 185 [REG_IMSC] = UART011_IMSC, 186 [REG_RIS] = UART011_RIS, 187 [REG_MIS] = UART011_MIS, 188 [REG_ICR] = UART011_ICR, 189 [REG_DMACR] = UART011_DMACR, 190 [REG_ST_XFCR] = ST_UART011_XFCR, 191 [REG_ST_XON1] = ST_UART011_XON1, 192 [REG_ST_XON2] = ST_UART011_XON2, 193 [REG_ST_XOFF1] = ST_UART011_XOFF1, 194 [REG_ST_XOFF2] = ST_UART011_XOFF2, 195 [REG_ST_ITCR] = ST_UART011_ITCR, 196 [REG_ST_ITIP] = ST_UART011_ITIP, 197 [REG_ST_ABCR] = ST_UART011_ABCR, 198 [REG_ST_ABIMSC] = ST_UART011_ABIMSC, 199 }; 200 201 static unsigned int get_fifosize_st(struct amba_device *dev) 202 { 203 return 64; 204 } 205 206 static struct vendor_data vendor_st = { 207 .reg_offset = pl011_st_offsets, 208 .ifls = UART011_IFLS_RX_HALF | UART011_IFLS_TX_HALF, 209 .fr_busy = UART01x_FR_BUSY, 210 .fr_dsr = UART01x_FR_DSR, 211 .fr_cts = UART01x_FR_CTS, 212 .fr_ri = UART011_FR_RI, 213 .oversampling = true, 214 .dma_threshold = true, 215 .cts_event_workaround = true, 216 .always_enabled = false, 217 .fixed_options = false, 218 .get_fifosize = get_fifosize_st, 219 }; 220 221 static unsigned int get_fifosize_nvidia(struct amba_device *dev) 222 { 223 return 32; 224 } 225 226 static struct vendor_data vendor_nvidia = { 227 .reg_offset = pl011_std_offsets, 228 .ifls = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8, 229 .fr_busy = UART01x_FR_BUSY, 230 .fr_dsr = UART01x_FR_DSR, 231 .fr_cts = UART01x_FR_CTS, 232 .fr_ri = UART011_FR_RI, 233 .oversampling = false, 234 .dma_threshold = false, 235 .cts_event_workaround = false, 236 .always_enabled = false, 237 .fixed_options = false, 238 .skip_ibrd_fbrd = true, 239 .set_uartclk_rate = true, 240 .get_fifosize = get_fifosize_nvidia, 241 }; 242 243 /* Deals with DMA transactions */ 244 245 struct pl011_dmabuf { 246 dma_addr_t dma; 247 size_t len; 248 char *buf; 249 }; 250 251 struct pl011_dmarx_data { 252 struct dma_chan *chan; 253 struct completion complete; 254 bool use_buf_b; 255 struct pl011_dmabuf dbuf_a; 256 struct pl011_dmabuf dbuf_b; 257 dma_cookie_t cookie; 258 bool running; 259 struct timer_list timer; 260 unsigned int last_residue; 261 unsigned long last_jiffies; 262 bool auto_poll_rate; 263 unsigned int poll_rate; 264 unsigned int poll_timeout; 265 }; 266 267 struct pl011_dmatx_data { 268 struct dma_chan *chan; 269 dma_addr_t dma; 270 size_t len; 271 char *buf; 272 bool queued; 273 }; 274 275 enum pl011_rs485_tx_state { 276 OFF, 277 WAIT_AFTER_RTS, 278 SEND, 279 WAIT_AFTER_SEND, 280 }; 281 282 /* 283 * We wrap our port structure around the generic uart_port. 284 */ 285 struct uart_amba_port { 286 struct uart_port port; 287 const u16 *reg_offset; 288 struct clk *clk; 289 const struct vendor_data *vendor; 290 unsigned int im; /* interrupt mask */ 291 unsigned int old_status; 292 unsigned int fifosize; /* vendor-specific */ 293 unsigned int fixed_baud; /* vendor-set fixed baud rate */ 294 char type[12]; 295 ktime_t rs485_tx_drain_interval; /* nano */ 296 enum pl011_rs485_tx_state rs485_tx_state; 297 struct hrtimer trigger_start_tx; 298 struct hrtimer trigger_stop_tx; 299 bool console_line_ended; 300 #ifdef CONFIG_DMA_ENGINE 301 /* DMA stuff */ 302 unsigned int dmacr; /* dma control reg */ 303 bool using_tx_dma; 304 bool using_rx_dma; 305 struct pl011_dmarx_data dmarx; 306 struct pl011_dmatx_data dmatx; 307 bool dma_probed; 308 #endif 309 }; 310 311 static unsigned int pl011_tx_empty(struct uart_port *port); 312 313 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap, 314 unsigned int reg) 315 { 316 return uap->reg_offset[reg]; 317 } 318 319 static unsigned int pl011_read(const struct uart_amba_port *uap, 320 unsigned int reg) 321 { 322 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); 323 324 return (uap->port.iotype == UPIO_MEM32) ? 325 readl_relaxed(addr) : readw_relaxed(addr); 326 } 327 328 static void pl011_write(unsigned int val, const struct uart_amba_port *uap, 329 unsigned int reg) 330 { 331 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); 332 333 if (uap->port.iotype == UPIO_MEM32) 334 writel_relaxed(val, addr); 335 else 336 writew_relaxed(val, addr); 337 } 338 339 /* 340 * Reads up to 256 characters from the FIFO or until it's empty and 341 * inserts them into the TTY layer. Returns the number of characters 342 * read from the FIFO. 343 */ 344 static int pl011_fifo_to_tty(struct uart_amba_port *uap) 345 { 346 unsigned int ch, fifotaken; 347 int sysrq; 348 u16 status; 349 u8 flag; 350 351 for (fifotaken = 0; fifotaken != 256; fifotaken++) { 352 status = pl011_read(uap, REG_FR); 353 if (status & UART01x_FR_RXFE) 354 break; 355 356 /* Take chars from the FIFO and update status */ 357 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX; 358 flag = TTY_NORMAL; 359 uap->port.icount.rx++; 360 361 if (unlikely(ch & UART_DR_ERROR)) { 362 if (ch & UART011_DR_BE) { 363 ch &= ~(UART011_DR_FE | UART011_DR_PE); 364 uap->port.icount.brk++; 365 if (uart_handle_break(&uap->port)) 366 continue; 367 } else if (ch & UART011_DR_PE) { 368 uap->port.icount.parity++; 369 } else if (ch & UART011_DR_FE) { 370 uap->port.icount.frame++; 371 } 372 if (ch & UART011_DR_OE) 373 uap->port.icount.overrun++; 374 375 ch &= uap->port.read_status_mask; 376 377 if (ch & UART011_DR_BE) 378 flag = TTY_BREAK; 379 else if (ch & UART011_DR_PE) 380 flag = TTY_PARITY; 381 else if (ch & UART011_DR_FE) 382 flag = TTY_FRAME; 383 } 384 385 sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255); 386 if (!sysrq) 387 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); 388 } 389 390 return fifotaken; 391 } 392 393 /* 394 * All the DMA operation mode stuff goes inside this ifdef. 395 * This assumes that you have a generic DMA device interface, 396 * no custom DMA interfaces are supported. 397 */ 398 #ifdef CONFIG_DMA_ENGINE 399 400 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE 401 402 static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db, 403 enum dma_data_direction dir) 404 { 405 db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE, 406 &db->dma, GFP_KERNEL); 407 if (!db->buf) 408 return -ENOMEM; 409 db->len = PL011_DMA_BUFFER_SIZE; 410 411 return 0; 412 } 413 414 static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db, 415 enum dma_data_direction dir) 416 { 417 if (db->buf) { 418 dma_free_coherent(chan->device->dev, 419 PL011_DMA_BUFFER_SIZE, db->buf, db->dma); 420 } 421 } 422 423 static void pl011_dma_probe(struct uart_amba_port *uap) 424 { 425 /* DMA is the sole user of the platform data right now */ 426 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); 427 struct device *dev = uap->port.dev; 428 struct dma_slave_config tx_conf = { 429 .dst_addr = uap->port.mapbase + 430 pl011_reg_to_offset(uap, REG_DR), 431 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 432 .direction = DMA_MEM_TO_DEV, 433 .dst_maxburst = uap->fifosize >> 1, 434 .device_fc = false, 435 }; 436 struct dma_chan *chan; 437 dma_cap_mask_t mask; 438 439 uap->dma_probed = true; 440 chan = dma_request_chan(dev, "tx"); 441 if (IS_ERR(chan)) { 442 if (PTR_ERR(chan) == -EPROBE_DEFER) { 443 uap->dma_probed = false; 444 return; 445 } 446 447 /* We need platform data */ 448 if (!plat || !plat->dma_filter) { 449 dev_dbg(uap->port.dev, "no DMA platform data\n"); 450 return; 451 } 452 453 /* Try to acquire a generic DMA engine slave TX channel */ 454 dma_cap_zero(mask); 455 dma_cap_set(DMA_SLAVE, mask); 456 457 chan = dma_request_channel(mask, plat->dma_filter, 458 plat->dma_tx_param); 459 if (!chan) { 460 dev_err(uap->port.dev, "no TX DMA channel!\n"); 461 return; 462 } 463 } 464 465 dmaengine_slave_config(chan, &tx_conf); 466 uap->dmatx.chan = chan; 467 468 dev_info(uap->port.dev, "DMA channel TX %s\n", 469 dma_chan_name(uap->dmatx.chan)); 470 471 /* Optionally make use of an RX channel as well */ 472 chan = dma_request_chan(dev, "rx"); 473 474 if (IS_ERR(chan) && plat && plat->dma_rx_param) { 475 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 476 477 if (!chan) { 478 dev_err(uap->port.dev, "no RX DMA channel!\n"); 479 return; 480 } 481 } 482 483 if (!IS_ERR(chan)) { 484 struct dma_slave_config rx_conf = { 485 .src_addr = uap->port.mapbase + 486 pl011_reg_to_offset(uap, REG_DR), 487 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 488 .direction = DMA_DEV_TO_MEM, 489 .src_maxburst = uap->fifosize >> 2, 490 .device_fc = false, 491 }; 492 struct dma_slave_caps caps; 493 494 /* 495 * Some DMA controllers provide information on their capabilities. 496 * If the controller does, check for suitable residue processing 497 * otherwise assime all is well. 498 */ 499 if (dma_get_slave_caps(chan, &caps) == 0) { 500 if (caps.residue_granularity == 501 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { 502 dma_release_channel(chan); 503 dev_info(uap->port.dev, 504 "RX DMA disabled - no residue processing\n"); 505 return; 506 } 507 } 508 dmaengine_slave_config(chan, &rx_conf); 509 uap->dmarx.chan = chan; 510 511 uap->dmarx.auto_poll_rate = false; 512 if (plat && plat->dma_rx_poll_enable) { 513 /* Set poll rate if specified. */ 514 if (plat->dma_rx_poll_rate) { 515 uap->dmarx.auto_poll_rate = false; 516 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; 517 } else { 518 /* 519 * 100 ms defaults to poll rate if not 520 * specified. This will be adjusted with 521 * the baud rate at set_termios. 522 */ 523 uap->dmarx.auto_poll_rate = true; 524 uap->dmarx.poll_rate = 100; 525 } 526 /* 3 secs defaults poll_timeout if not specified. */ 527 if (plat->dma_rx_poll_timeout) 528 uap->dmarx.poll_timeout = 529 plat->dma_rx_poll_timeout; 530 else 531 uap->dmarx.poll_timeout = 3000; 532 } else if (!plat && dev->of_node) { 533 uap->dmarx.auto_poll_rate = 534 of_property_read_bool(dev->of_node, "auto-poll"); 535 if (uap->dmarx.auto_poll_rate) { 536 u32 x; 537 538 if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0) 539 uap->dmarx.poll_rate = x; 540 else 541 uap->dmarx.poll_rate = 100; 542 if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0) 543 uap->dmarx.poll_timeout = x; 544 else 545 uap->dmarx.poll_timeout = 3000; 546 } 547 } 548 dev_info(uap->port.dev, "DMA channel RX %s\n", 549 dma_chan_name(uap->dmarx.chan)); 550 } 551 } 552 553 static void pl011_dma_remove(struct uart_amba_port *uap) 554 { 555 if (uap->dmatx.chan) 556 dma_release_channel(uap->dmatx.chan); 557 if (uap->dmarx.chan) 558 dma_release_channel(uap->dmarx.chan); 559 } 560 561 /* Forward declare these for the refill routine */ 562 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 563 static void pl011_start_tx_pio(struct uart_amba_port *uap); 564 565 /* 566 * The current DMA TX buffer has been sent. 567 * Try to queue up another DMA buffer. 568 */ 569 static void pl011_dma_tx_callback(void *data) 570 { 571 struct uart_amba_port *uap = data; 572 struct tty_port *tport = &uap->port.state->port; 573 struct pl011_dmatx_data *dmatx = &uap->dmatx; 574 unsigned long flags; 575 u16 dmacr; 576 577 uart_port_lock_irqsave(&uap->port, &flags); 578 if (uap->dmatx.queued) 579 dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, 580 dmatx->len, DMA_TO_DEVICE); 581 582 dmacr = uap->dmacr; 583 uap->dmacr = dmacr & ~UART011_TXDMAE; 584 pl011_write(uap->dmacr, uap, REG_DMACR); 585 586 /* 587 * If TX DMA was disabled, it means that we've stopped the DMA for 588 * some reason (eg, XOFF received, or we want to send an X-char.) 589 * 590 * Note: we need to be careful here of a potential race between DMA 591 * and the rest of the driver - if the driver disables TX DMA while 592 * a TX buffer completing, we must update the tx queued status to 593 * get further refills (hence we check dmacr). 594 */ 595 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || 596 kfifo_is_empty(&tport->xmit_fifo)) { 597 uap->dmatx.queued = false; 598 uart_port_unlock_irqrestore(&uap->port, flags); 599 return; 600 } 601 602 if (pl011_dma_tx_refill(uap) <= 0) 603 /* 604 * We didn't queue a DMA buffer for some reason, but we 605 * have data pending to be sent. Re-enable the TX IRQ. 606 */ 607 pl011_start_tx_pio(uap); 608 609 uart_port_unlock_irqrestore(&uap->port, flags); 610 } 611 612 /* 613 * Try to refill the TX DMA buffer. 614 * Locking: called with port lock held and IRQs disabled. 615 * Returns: 616 * 1 if we queued up a TX DMA buffer. 617 * 0 if we didn't want to handle this by DMA 618 * <0 on error 619 */ 620 static int pl011_dma_tx_refill(struct uart_amba_port *uap) 621 { 622 struct pl011_dmatx_data *dmatx = &uap->dmatx; 623 struct dma_chan *chan = dmatx->chan; 624 struct dma_device *dma_dev = chan->device; 625 struct dma_async_tx_descriptor *desc; 626 struct tty_port *tport = &uap->port.state->port; 627 unsigned int count; 628 629 /* 630 * Try to avoid the overhead involved in using DMA if the 631 * transaction fits in the first half of the FIFO, by using 632 * the standard interrupt handling. This ensures that we 633 * issue a uart_write_wakeup() at the appropriate time. 634 */ 635 count = kfifo_len(&tport->xmit_fifo); 636 if (count < (uap->fifosize >> 1)) { 637 uap->dmatx.queued = false; 638 return 0; 639 } 640 641 /* 642 * Bodge: don't send the last character by DMA, as this 643 * will prevent XON from notifying us to restart DMA. 644 */ 645 count -= 1; 646 647 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ 648 if (count > PL011_DMA_BUFFER_SIZE) 649 count = PL011_DMA_BUFFER_SIZE; 650 651 count = kfifo_out_peek(&tport->xmit_fifo, dmatx->buf, count); 652 653 /* 654 * Align the TX buffer length to the DMA controller's copy_align 655 * requirements. Some DMA controllers (e.g., Tegra GPC DMA) require 656 * word-aligned transfers. Unaligned bytes will be sent via PIO. 657 */ 658 if (chan->device->copy_align) 659 count = ALIGN_DOWN(count, 1 << chan->device->copy_align); 660 661 dmatx->len = count; 662 dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, 663 DMA_TO_DEVICE); 664 if (dma_mapping_error(dma_dev->dev, dmatx->dma)) { 665 uap->dmatx.queued = false; 666 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 667 return -EBUSY; 668 } 669 670 desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV, 671 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 672 if (!desc) { 673 dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); 674 uap->dmatx.queued = false; 675 /* 676 * If DMA cannot be used right now, we complete this 677 * transaction via IRQ and let the TTY layer retry. 678 */ 679 dev_dbg(uap->port.dev, "TX DMA busy\n"); 680 return -EBUSY; 681 } 682 683 /* Some data to go along to the callback */ 684 desc->callback = pl011_dma_tx_callback; 685 desc->callback_param = uap; 686 687 /* All errors should happen at prepare time */ 688 dmaengine_submit(desc); 689 690 /* Fire the DMA transaction */ 691 dma_dev->device_issue_pending(chan); 692 693 uap->dmacr |= UART011_TXDMAE; 694 pl011_write(uap->dmacr, uap, REG_DMACR); 695 uap->dmatx.queued = true; 696 697 /* 698 * Now we know that DMA will fire, so advance the ring buffer 699 * with the stuff we just dispatched. 700 */ 701 uart_xmit_advance(&uap->port, count); 702 703 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) 704 uart_write_wakeup(&uap->port); 705 706 return 1; 707 } 708 709 /* 710 * We received a transmit interrupt without a pending X-char but with 711 * pending characters. 712 * Locking: called with port lock held and IRQs disabled. 713 * Returns: 714 * false if we want to use PIO to transmit 715 * true if we queued a DMA buffer 716 */ 717 static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 718 { 719 if (!uap->using_tx_dma) 720 return false; 721 722 /* 723 * If we already have a TX buffer queued, but received a 724 * TX interrupt, it will be because we've just sent an X-char. 725 * Ensure the TX DMA is enabled and the TX IRQ is disabled. 726 */ 727 if (uap->dmatx.queued) { 728 uap->dmacr |= UART011_TXDMAE; 729 pl011_write(uap->dmacr, uap, REG_DMACR); 730 uap->im &= ~UART011_TXIM; 731 pl011_write(uap->im, uap, REG_IMSC); 732 return true; 733 } 734 735 /* 736 * We don't have a TX buffer queued, so try to queue one. 737 * If we successfully queued a buffer, mask the TX IRQ. 738 */ 739 if (pl011_dma_tx_refill(uap) > 0) { 740 uap->im &= ~UART011_TXIM; 741 pl011_write(uap->im, uap, REG_IMSC); 742 return true; 743 } 744 return false; 745 } 746 747 /* 748 * Stop the DMA transmit (eg, due to received XOFF). 749 * Locking: called with port lock held and IRQs disabled. 750 */ 751 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 752 { 753 if (uap->dmatx.queued) { 754 uap->dmacr &= ~UART011_TXDMAE; 755 pl011_write(uap->dmacr, uap, REG_DMACR); 756 } 757 } 758 759 /* 760 * Try to start a DMA transmit, or in the case of an XON/OFF 761 * character queued for send, try to get that character out ASAP. 762 * Locking: called with port lock held and IRQs disabled. 763 * Returns: 764 * false if we want the TX IRQ to be enabled 765 * true if we have a buffer queued 766 */ 767 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 768 { 769 u16 dmacr; 770 771 if (!uap->using_tx_dma) 772 return false; 773 774 if (!uap->port.x_char) { 775 /* no X-char, try to push chars out in DMA mode */ 776 bool ret = true; 777 778 if (!uap->dmatx.queued) { 779 if (pl011_dma_tx_refill(uap) > 0) { 780 uap->im &= ~UART011_TXIM; 781 pl011_write(uap->im, uap, REG_IMSC); 782 } else { 783 ret = false; 784 } 785 } else if (!(uap->dmacr & UART011_TXDMAE)) { 786 uap->dmacr |= UART011_TXDMAE; 787 pl011_write(uap->dmacr, uap, REG_DMACR); 788 } 789 return ret; 790 } 791 792 /* 793 * We have an X-char to send. Disable DMA to prevent it loading 794 * the TX fifo, and then see if we can stuff it into the FIFO. 795 */ 796 dmacr = uap->dmacr; 797 uap->dmacr &= ~UART011_TXDMAE; 798 pl011_write(uap->dmacr, uap, REG_DMACR); 799 800 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) { 801 /* 802 * No space in the FIFO, so enable the transmit interrupt 803 * so we know when there is space. Note that once we've 804 * loaded the character, we should just re-enable DMA. 805 */ 806 return false; 807 } 808 809 pl011_write(uap->port.x_char, uap, REG_DR); 810 uap->port.icount.tx++; 811 uap->port.x_char = 0; 812 813 /* Success - restore the DMA state */ 814 uap->dmacr = dmacr; 815 pl011_write(dmacr, uap, REG_DMACR); 816 817 return true; 818 } 819 820 /* 821 * Flush the transmit buffer. 822 * Locking: called with port lock held and IRQs disabled. 823 */ 824 static void pl011_dma_flush_buffer(struct uart_port *port) 825 __releases(&uap->port.lock) 826 __acquires(&uap->port.lock) 827 { 828 struct uart_amba_port *uap = 829 container_of(port, struct uart_amba_port, port); 830 831 if (!uap->using_tx_dma) 832 return; 833 834 dmaengine_terminate_async(uap->dmatx.chan); 835 836 if (uap->dmatx.queued) { 837 dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma, 838 uap->dmatx.len, DMA_TO_DEVICE); 839 uap->dmatx.queued = false; 840 uap->dmacr &= ~UART011_TXDMAE; 841 pl011_write(uap->dmacr, uap, REG_DMACR); 842 } 843 } 844 845 static void pl011_dma_rx_callback(void *data); 846 847 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 848 { 849 struct dma_chan *rxchan = uap->dmarx.chan; 850 struct pl011_dmarx_data *dmarx = &uap->dmarx; 851 struct dma_async_tx_descriptor *desc; 852 struct pl011_dmabuf *dbuf; 853 854 if (!rxchan) 855 return -EIO; 856 857 /* Start the RX DMA job */ 858 dbuf = uap->dmarx.use_buf_b ? 859 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; 860 desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len, 861 DMA_DEV_TO_MEM, 862 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 863 /* 864 * If the DMA engine is busy and cannot prepare a 865 * channel, no big deal, the driver will fall back 866 * to interrupt mode as a result of this error code. 867 */ 868 if (!desc) { 869 uap->dmarx.running = false; 870 dmaengine_terminate_all(rxchan); 871 return -EBUSY; 872 } 873 874 /* Some data to go along to the callback */ 875 desc->callback = pl011_dma_rx_callback; 876 desc->callback_param = uap; 877 dmarx->cookie = dmaengine_submit(desc); 878 dma_async_issue_pending(rxchan); 879 880 uap->dmacr |= UART011_RXDMAE; 881 pl011_write(uap->dmacr, uap, REG_DMACR); 882 uap->dmarx.running = true; 883 884 uap->im &= ~UART011_RXIM; 885 pl011_write(uap->im, uap, REG_IMSC); 886 887 return 0; 888 } 889 890 /* 891 * This is called when either the DMA job is complete, or 892 * the FIFO timeout interrupt occurred. This must be called 893 * with the port spinlock uap->port.lock held. 894 */ 895 static void pl011_dma_rx_chars(struct uart_amba_port *uap, 896 u32 pending, bool use_buf_b, 897 bool readfifo) 898 { 899 struct tty_port *port = &uap->port.state->port; 900 struct pl011_dmabuf *dbuf = use_buf_b ? 901 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; 902 int dma_count = 0; 903 u32 fifotaken = 0; /* only used for vdbg() */ 904 905 struct pl011_dmarx_data *dmarx = &uap->dmarx; 906 int dmataken = 0; 907 908 if (uap->dmarx.poll_rate) { 909 /* The data can be taken by polling */ 910 dmataken = dbuf->len - dmarx->last_residue; 911 /* Recalculate the pending size */ 912 if (pending >= dmataken) 913 pending -= dmataken; 914 } 915 916 /* Pick the remain data from the DMA */ 917 if (pending) { 918 /* 919 * First take all chars in the DMA pipe, then look in the FIFO. 920 * Note that tty_insert_flip_buf() tries to take as many chars 921 * as it can. 922 */ 923 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending); 924 925 uap->port.icount.rx += dma_count; 926 if (dma_count < pending) 927 dev_warn(uap->port.dev, 928 "couldn't insert all characters (TTY is full?)\n"); 929 } 930 931 /* Reset the last_residue for Rx DMA poll */ 932 if (uap->dmarx.poll_rate) 933 dmarx->last_residue = dbuf->len; 934 935 /* 936 * Only continue with trying to read the FIFO if all DMA chars have 937 * been taken first. 938 */ 939 if (dma_count == pending && readfifo) { 940 /* Clear any error flags */ 941 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | 942 UART011_FEIS, uap, REG_ICR); 943 944 /* 945 * If we read all the DMA'd characters, and we had an 946 * incomplete buffer, that could be due to an rx error, or 947 * maybe we just timed out. Read any pending chars and check 948 * the error status. 949 * 950 * Error conditions will only occur in the FIFO, these will 951 * trigger an immediate interrupt and stop the DMA job, so we 952 * will always find the error in the FIFO, never in the DMA 953 * buffer. 954 */ 955 fifotaken = pl011_fifo_to_tty(uap); 956 } 957 958 dev_vdbg(uap->port.dev, 959 "Took %d chars from DMA buffer and %d chars from the FIFO\n", 960 dma_count, fifotaken); 961 tty_flip_buffer_push(port); 962 } 963 964 static void pl011_dma_rx_irq(struct uart_amba_port *uap) 965 { 966 struct pl011_dmarx_data *dmarx = &uap->dmarx; 967 struct dma_chan *rxchan = dmarx->chan; 968 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? 969 &dmarx->dbuf_b : &dmarx->dbuf_a; 970 size_t pending; 971 struct dma_tx_state state; 972 enum dma_status dmastat; 973 974 /* 975 * Pause the transfer so we can trust the current counter, 976 * do this before we pause the PL011 block, else we may 977 * overflow the FIFO. 978 */ 979 if (dmaengine_pause(rxchan)) 980 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 981 dmastat = rxchan->device->device_tx_status(rxchan, 982 dmarx->cookie, &state); 983 if (dmastat != DMA_PAUSED) 984 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 985 986 /* Disable RX DMA - incoming data will wait in the FIFO */ 987 uap->dmacr &= ~UART011_RXDMAE; 988 pl011_write(uap->dmacr, uap, REG_DMACR); 989 uap->dmarx.running = false; 990 991 pending = dbuf->len - state.residue; 992 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 993 /* Then we terminate the transfer - we now know our residue */ 994 dmaengine_terminate_all(rxchan); 995 996 /* 997 * This will take the chars we have so far and insert 998 * into the framework. 999 */ 1000 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); 1001 1002 /* Switch buffer & re-trigger DMA job */ 1003 dmarx->use_buf_b = !dmarx->use_buf_b; 1004 if (pl011_dma_rx_trigger_dma(uap)) { 1005 dev_dbg(uap->port.dev, 1006 "could not retrigger RX DMA job fall back to interrupt mode\n"); 1007 uap->im |= UART011_RXIM; 1008 pl011_write(uap->im, uap, REG_IMSC); 1009 } 1010 } 1011 1012 static void pl011_dma_rx_callback(void *data) 1013 { 1014 struct uart_amba_port *uap = data; 1015 struct pl011_dmarx_data *dmarx = &uap->dmarx; 1016 struct dma_chan *rxchan = dmarx->chan; 1017 bool lastbuf = dmarx->use_buf_b; 1018 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? 1019 &dmarx->dbuf_b : &dmarx->dbuf_a; 1020 size_t pending; 1021 struct dma_tx_state state; 1022 int ret; 1023 1024 /* 1025 * This completion interrupt occurs typically when the 1026 * RX buffer is totally stuffed but no timeout has yet 1027 * occurred. When that happens, we just want the RX 1028 * routine to flush out the secondary DMA buffer while 1029 * we immediately trigger the next DMA job. 1030 */ 1031 uart_port_lock_irq(&uap->port); 1032 /* 1033 * Rx data can be taken by the UART interrupts during 1034 * the DMA irq handler. So we check the residue here. 1035 */ 1036 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 1037 pending = dbuf->len - state.residue; 1038 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 1039 /* Then we terminate the transfer - we now know our residue */ 1040 dmaengine_terminate_all(rxchan); 1041 1042 uap->dmarx.running = false; 1043 dmarx->use_buf_b = !lastbuf; 1044 ret = pl011_dma_rx_trigger_dma(uap); 1045 1046 pl011_dma_rx_chars(uap, pending, lastbuf, false); 1047 uart_unlock_and_check_sysrq(&uap->port); 1048 /* 1049 * Do this check after we picked the DMA chars so we don't 1050 * get some IRQ immediately from RX. 1051 */ 1052 if (ret) { 1053 dev_dbg(uap->port.dev, 1054 "could not retrigger RX DMA job fall back to interrupt mode\n"); 1055 uap->im |= UART011_RXIM; 1056 pl011_write(uap->im, uap, REG_IMSC); 1057 } 1058 } 1059 1060 /* 1061 * Stop accepting received characters, when we're shutting down or 1062 * suspending this port. 1063 * Locking: called with port lock held and IRQs disabled. 1064 */ 1065 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1066 { 1067 if (!uap->using_rx_dma) 1068 return; 1069 1070 /* FIXME. Just disable the DMA enable */ 1071 uap->dmacr &= ~UART011_RXDMAE; 1072 pl011_write(uap->dmacr, uap, REG_DMACR); 1073 } 1074 1075 /* 1076 * Timer handler for Rx DMA polling. 1077 * Every polling, It checks the residue in the dma buffer and transfer 1078 * data to the tty. Also, last_residue is updated for the next polling. 1079 */ 1080 static void pl011_dma_rx_poll(struct timer_list *t) 1081 { 1082 struct uart_amba_port *uap = timer_container_of(uap, t, dmarx.timer); 1083 struct tty_port *port = &uap->port.state->port; 1084 struct pl011_dmarx_data *dmarx = &uap->dmarx; 1085 struct dma_chan *rxchan = uap->dmarx.chan; 1086 unsigned long flags; 1087 unsigned int dmataken = 0; 1088 unsigned int size = 0; 1089 struct pl011_dmabuf *dbuf; 1090 int dma_count; 1091 struct dma_tx_state state; 1092 1093 dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; 1094 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 1095 if (likely(state.residue < dmarx->last_residue)) { 1096 dmataken = dbuf->len - dmarx->last_residue; 1097 size = dmarx->last_residue - state.residue; 1098 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, 1099 size); 1100 if (dma_count == size) 1101 dmarx->last_residue = state.residue; 1102 dmarx->last_jiffies = jiffies; 1103 } 1104 tty_flip_buffer_push(port); 1105 1106 /* 1107 * If no data is received in poll_timeout, the driver will fall back 1108 * to interrupt mode. We will retrigger DMA at the first interrupt. 1109 */ 1110 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) 1111 > uap->dmarx.poll_timeout) { 1112 uart_port_lock_irqsave(&uap->port, &flags); 1113 pl011_dma_rx_stop(uap); 1114 uap->im |= UART011_RXIM; 1115 pl011_write(uap->im, uap, REG_IMSC); 1116 uart_port_unlock_irqrestore(&uap->port, flags); 1117 1118 uap->dmarx.running = false; 1119 dmaengine_terminate_all(rxchan); 1120 timer_delete(&uap->dmarx.timer); 1121 } else { 1122 mod_timer(&uap->dmarx.timer, 1123 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 1124 } 1125 } 1126 1127 static void pl011_dma_startup(struct uart_amba_port *uap) 1128 { 1129 int ret; 1130 1131 if (!uap->dma_probed) 1132 pl011_dma_probe(uap); 1133 1134 if (!uap->dmatx.chan) 1135 return; 1136 1137 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); 1138 if (!uap->dmatx.buf) { 1139 uap->port.fifosize = uap->fifosize; 1140 return; 1141 } 1142 1143 uap->dmatx.len = PL011_DMA_BUFFER_SIZE; 1144 1145 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 1146 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 1147 uap->using_tx_dma = true; 1148 1149 if (!uap->dmarx.chan) 1150 goto skip_rx; 1151 1152 /* Allocate and map DMA RX buffers */ 1153 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a, 1154 DMA_FROM_DEVICE); 1155 if (ret) { 1156 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1157 "RX buffer A", ret); 1158 goto skip_rx; 1159 } 1160 1161 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b, 1162 DMA_FROM_DEVICE); 1163 if (ret) { 1164 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1165 "RX buffer B", ret); 1166 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, 1167 DMA_FROM_DEVICE); 1168 goto skip_rx; 1169 } 1170 1171 uap->using_rx_dma = true; 1172 1173 skip_rx: 1174 /* Turn on DMA error (RX/TX will be enabled on demand) */ 1175 uap->dmacr |= UART011_DMAONERR; 1176 pl011_write(uap->dmacr, uap, REG_DMACR); 1177 1178 /* 1179 * ST Micro variants has some specific dma burst threshold 1180 * compensation. Set this to 16 bytes, so burst will only 1181 * be issued above/below 16 bytes. 1182 */ 1183 if (uap->vendor->dma_threshold) 1184 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 1185 uap, REG_ST_DMAWM); 1186 1187 if (uap->using_rx_dma) { 1188 if (pl011_dma_rx_trigger_dma(uap)) 1189 dev_dbg(uap->port.dev, 1190 "could not trigger initial RX DMA job, fall back to interrupt mode\n"); 1191 if (uap->dmarx.poll_rate) { 1192 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); 1193 mod_timer(&uap->dmarx.timer, 1194 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 1195 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1196 uap->dmarx.last_jiffies = jiffies; 1197 } 1198 } 1199 } 1200 1201 static void pl011_dma_shutdown(struct uart_amba_port *uap) 1202 { 1203 if (!(uap->using_tx_dma || uap->using_rx_dma)) 1204 return; 1205 1206 /* Disable RX and TX DMA */ 1207 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) 1208 cpu_relax(); 1209 1210 uart_port_lock_irq(&uap->port); 1211 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); 1212 pl011_write(uap->dmacr, uap, REG_DMACR); 1213 uart_port_unlock_irq(&uap->port); 1214 1215 if (uap->using_tx_dma) { 1216 /* In theory, this should already be done by pl011_dma_flush_buffer */ 1217 dmaengine_terminate_all(uap->dmatx.chan); 1218 if (uap->dmatx.queued) { 1219 dma_unmap_single(uap->dmatx.chan->device->dev, 1220 uap->dmatx.dma, uap->dmatx.len, 1221 DMA_TO_DEVICE); 1222 uap->dmatx.queued = false; 1223 } 1224 1225 kfree(uap->dmatx.buf); 1226 uap->using_tx_dma = false; 1227 } 1228 1229 if (uap->using_rx_dma) { 1230 dmaengine_terminate_all(uap->dmarx.chan); 1231 /* Clean up the RX DMA */ 1232 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); 1233 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); 1234 if (uap->dmarx.poll_rate) 1235 timer_delete_sync(&uap->dmarx.timer); 1236 uap->using_rx_dma = false; 1237 } 1238 } 1239 1240 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1241 { 1242 return uap->using_rx_dma; 1243 } 1244 1245 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1246 { 1247 return uap->using_rx_dma && uap->dmarx.running; 1248 } 1249 1250 #else 1251 /* Blank functions if the DMA engine is not available */ 1252 static inline void pl011_dma_remove(struct uart_amba_port *uap) 1253 { 1254 } 1255 1256 static inline void pl011_dma_startup(struct uart_amba_port *uap) 1257 { 1258 } 1259 1260 static inline void pl011_dma_shutdown(struct uart_amba_port *uap) 1261 { 1262 } 1263 1264 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) 1265 { 1266 return false; 1267 } 1268 1269 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 1270 { 1271 } 1272 1273 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 1274 { 1275 return false; 1276 } 1277 1278 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) 1279 { 1280 } 1281 1282 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1283 { 1284 } 1285 1286 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 1287 { 1288 return -EIO; 1289 } 1290 1291 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1292 { 1293 return false; 1294 } 1295 1296 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1297 { 1298 return false; 1299 } 1300 1301 #define pl011_dma_flush_buffer NULL 1302 #endif 1303 1304 static void pl011_rs485_tx_stop(struct uart_amba_port *uap) 1305 { 1306 struct uart_port *port = &uap->port; 1307 u32 cr; 1308 1309 if (uap->rs485_tx_state == SEND) 1310 uap->rs485_tx_state = WAIT_AFTER_SEND; 1311 1312 if (uap->rs485_tx_state == WAIT_AFTER_SEND) { 1313 /* Schedule hrtimer if tx queue not empty */ 1314 if (!pl011_tx_empty(port)) { 1315 hrtimer_start(&uap->trigger_stop_tx, 1316 uap->rs485_tx_drain_interval, 1317 HRTIMER_MODE_REL); 1318 return; 1319 } 1320 if (port->rs485.delay_rts_after_send > 0) { 1321 hrtimer_start(&uap->trigger_stop_tx, 1322 ms_to_ktime(port->rs485.delay_rts_after_send), 1323 HRTIMER_MODE_REL); 1324 return; 1325 } 1326 /* Continue without any delay */ 1327 } else if (uap->rs485_tx_state == WAIT_AFTER_RTS) { 1328 hrtimer_try_to_cancel(&uap->trigger_start_tx); 1329 } 1330 1331 cr = pl011_read(uap, REG_CR); 1332 1333 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 1334 cr &= ~UART011_CR_RTS; 1335 else 1336 cr |= UART011_CR_RTS; 1337 1338 /* Disable the transmitter and reenable the transceiver */ 1339 cr &= ~UART011_CR_TXE; 1340 cr |= UART011_CR_RXE; 1341 pl011_write(cr, uap, REG_CR); 1342 1343 uap->rs485_tx_state = OFF; 1344 } 1345 1346 static void pl011_stop_tx(struct uart_port *port) 1347 { 1348 struct uart_amba_port *uap = 1349 container_of(port, struct uart_amba_port, port); 1350 1351 if (port->rs485.flags & SER_RS485_ENABLED && 1352 uap->rs485_tx_state == WAIT_AFTER_RTS) { 1353 pl011_rs485_tx_stop(uap); 1354 return; 1355 } 1356 1357 uap->im &= ~UART011_TXIM; 1358 pl011_write(uap->im, uap, REG_IMSC); 1359 pl011_dma_tx_stop(uap); 1360 1361 if (port->rs485.flags & SER_RS485_ENABLED && 1362 uap->rs485_tx_state != OFF) 1363 pl011_rs485_tx_stop(uap); 1364 } 1365 1366 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); 1367 1368 /* Start TX with programmed I/O only (no DMA) */ 1369 static void pl011_start_tx_pio(struct uart_amba_port *uap) 1370 { 1371 if (pl011_tx_chars(uap, false)) { 1372 uap->im |= UART011_TXIM; 1373 pl011_write(uap->im, uap, REG_IMSC); 1374 } 1375 } 1376 1377 static void pl011_rs485_tx_start(struct uart_amba_port *uap) 1378 { 1379 struct uart_port *port = &uap->port; 1380 u32 cr; 1381 1382 if (uap->rs485_tx_state == WAIT_AFTER_RTS) { 1383 uap->rs485_tx_state = SEND; 1384 return; 1385 } 1386 if (uap->rs485_tx_state == WAIT_AFTER_SEND) { 1387 hrtimer_try_to_cancel(&uap->trigger_stop_tx); 1388 uap->rs485_tx_state = SEND; 1389 return; 1390 } 1391 /* uap->rs485_tx_state == OFF */ 1392 /* Enable transmitter */ 1393 cr = pl011_read(uap, REG_CR); 1394 cr |= UART011_CR_TXE; 1395 /* Disable receiver if half-duplex */ 1396 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) 1397 cr &= ~UART011_CR_RXE; 1398 1399 if (port->rs485.flags & SER_RS485_RTS_ON_SEND) 1400 cr &= ~UART011_CR_RTS; 1401 else 1402 cr |= UART011_CR_RTS; 1403 1404 pl011_write(cr, uap, REG_CR); 1405 1406 if (port->rs485.delay_rts_before_send > 0) { 1407 uap->rs485_tx_state = WAIT_AFTER_RTS; 1408 hrtimer_start(&uap->trigger_start_tx, 1409 ms_to_ktime(port->rs485.delay_rts_before_send), 1410 HRTIMER_MODE_REL); 1411 } else { 1412 uap->rs485_tx_state = SEND; 1413 } 1414 } 1415 1416 static void pl011_start_tx(struct uart_port *port) 1417 { 1418 struct uart_amba_port *uap = 1419 container_of(port, struct uart_amba_port, port); 1420 1421 if ((uap->port.rs485.flags & SER_RS485_ENABLED) && 1422 uap->rs485_tx_state != SEND) { 1423 pl011_rs485_tx_start(uap); 1424 if (uap->rs485_tx_state == WAIT_AFTER_RTS) 1425 return; 1426 } 1427 1428 if (!pl011_dma_tx_start(uap)) 1429 pl011_start_tx_pio(uap); 1430 } 1431 1432 static enum hrtimer_restart pl011_trigger_start_tx(struct hrtimer *t) 1433 { 1434 struct uart_amba_port *uap = 1435 container_of(t, struct uart_amba_port, trigger_start_tx); 1436 unsigned long flags; 1437 1438 uart_port_lock_irqsave(&uap->port, &flags); 1439 if (uap->rs485_tx_state == WAIT_AFTER_RTS) 1440 pl011_start_tx(&uap->port); 1441 uart_port_unlock_irqrestore(&uap->port, flags); 1442 1443 return HRTIMER_NORESTART; 1444 } 1445 1446 static enum hrtimer_restart pl011_trigger_stop_tx(struct hrtimer *t) 1447 { 1448 struct uart_amba_port *uap = 1449 container_of(t, struct uart_amba_port, trigger_stop_tx); 1450 unsigned long flags; 1451 1452 uart_port_lock_irqsave(&uap->port, &flags); 1453 if (uap->rs485_tx_state == WAIT_AFTER_SEND) 1454 pl011_rs485_tx_stop(uap); 1455 uart_port_unlock_irqrestore(&uap->port, flags); 1456 1457 return HRTIMER_NORESTART; 1458 } 1459 1460 static void pl011_stop_rx(struct uart_port *port) 1461 { 1462 struct uart_amba_port *uap = 1463 container_of(port, struct uart_amba_port, port); 1464 1465 uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM | 1466 UART011_PEIM | UART011_BEIM | UART011_OEIM); 1467 pl011_write(uap->im, uap, REG_IMSC); 1468 1469 pl011_dma_rx_stop(uap); 1470 } 1471 1472 static void pl011_throttle_rx(struct uart_port *port) 1473 { 1474 unsigned long flags; 1475 1476 uart_port_lock_irqsave(port, &flags); 1477 pl011_stop_rx(port); 1478 uart_port_unlock_irqrestore(port, flags); 1479 } 1480 1481 static void pl011_enable_ms(struct uart_port *port) 1482 { 1483 struct uart_amba_port *uap = 1484 container_of(port, struct uart_amba_port, port); 1485 1486 uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM; 1487 pl011_write(uap->im, uap, REG_IMSC); 1488 } 1489 1490 static void pl011_rx_chars(struct uart_amba_port *uap) 1491 __releases(&uap->port.lock) 1492 __acquires(&uap->port.lock) 1493 { 1494 pl011_fifo_to_tty(uap); 1495 1496 uart_port_unlock(&uap->port); 1497 tty_flip_buffer_push(&uap->port.state->port); 1498 /* 1499 * If we were temporarily out of DMA mode for a while, 1500 * attempt to switch back to DMA mode again. 1501 */ 1502 if (pl011_dma_rx_available(uap)) { 1503 if (pl011_dma_rx_trigger_dma(uap)) { 1504 dev_dbg(uap->port.dev, 1505 "could not trigger RX DMA job fall back to interrupt mode again\n"); 1506 uap->im |= UART011_RXIM; 1507 pl011_write(uap->im, uap, REG_IMSC); 1508 } else { 1509 #ifdef CONFIG_DMA_ENGINE 1510 /* Start Rx DMA poll */ 1511 if (uap->dmarx.poll_rate) { 1512 uap->dmarx.last_jiffies = jiffies; 1513 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1514 mod_timer(&uap->dmarx.timer, 1515 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 1516 } 1517 #endif 1518 } 1519 } 1520 uart_port_lock(&uap->port); 1521 } 1522 1523 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, 1524 bool from_irq) 1525 { 1526 if (unlikely(!from_irq) && 1527 pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 1528 return false; /* unable to transmit character */ 1529 1530 pl011_write(c, uap, REG_DR); 1531 uap->port.icount.tx++; 1532 1533 return true; 1534 } 1535 1536 /* Returns true if tx interrupts have to be (kept) enabled */ 1537 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) 1538 { 1539 struct tty_port *tport = &uap->port.state->port; 1540 int count = uap->fifosize >> 1; 1541 1542 if (uap->port.x_char) { 1543 if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) 1544 return true; 1545 uap->port.x_char = 0; 1546 --count; 1547 } 1548 if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&uap->port)) { 1549 pl011_stop_tx(&uap->port); 1550 return false; 1551 } 1552 1553 /* If we are using DMA mode, try to send some characters. */ 1554 if (pl011_dma_tx_irq(uap)) 1555 return true; 1556 1557 while (1) { 1558 unsigned char c; 1559 1560 if (likely(from_irq) && count-- == 0) 1561 break; 1562 1563 if (!kfifo_peek(&tport->xmit_fifo, &c)) 1564 break; 1565 1566 if (!pl011_tx_char(uap, c, from_irq)) 1567 break; 1568 1569 kfifo_skip(&tport->xmit_fifo); 1570 } 1571 1572 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) 1573 uart_write_wakeup(&uap->port); 1574 1575 if (kfifo_is_empty(&tport->xmit_fifo)) { 1576 pl011_stop_tx(&uap->port); 1577 return false; 1578 } 1579 return true; 1580 } 1581 1582 static void pl011_modem_status(struct uart_amba_port *uap) 1583 { 1584 unsigned int status, delta; 1585 1586 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; 1587 1588 delta = status ^ uap->old_status; 1589 uap->old_status = status; 1590 1591 if (!delta) 1592 return; 1593 1594 if (delta & UART01x_FR_DCD) 1595 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); 1596 1597 if (delta & uap->vendor->fr_dsr) 1598 uap->port.icount.dsr++; 1599 1600 if (delta & uap->vendor->fr_cts) 1601 uart_handle_cts_change(&uap->port, 1602 status & uap->vendor->fr_cts); 1603 1604 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1605 } 1606 1607 static void check_apply_cts_event_workaround(struct uart_amba_port *uap) 1608 { 1609 if (!uap->vendor->cts_event_workaround) 1610 return; 1611 1612 /* workaround to make sure that all bits are unlocked.. */ 1613 pl011_write(0x00, uap, REG_ICR); 1614 1615 /* 1616 * WA: introduce 26ns(1 uart clk) delay before W1C; 1617 * single apb access will incur 2 pclk(133.12Mhz) delay, 1618 * so add 2 dummy reads 1619 */ 1620 pl011_read(uap, REG_ICR); 1621 pl011_read(uap, REG_ICR); 1622 } 1623 1624 static irqreturn_t pl011_int(int irq, void *dev_id) 1625 { 1626 struct uart_amba_port *uap = dev_id; 1627 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1628 int handled = 0; 1629 1630 uart_port_lock(&uap->port); 1631 status = pl011_read(uap, REG_RIS) & uap->im; 1632 if (status) { 1633 do { 1634 check_apply_cts_event_workaround(uap); 1635 1636 pl011_write(status & ~(UART011_TXIS | UART011_RTIS | UART011_RXIS), 1637 uap, REG_ICR); 1638 1639 if (status & (UART011_RTIS | UART011_RXIS)) { 1640 if (pl011_dma_rx_running(uap)) 1641 pl011_dma_rx_irq(uap); 1642 else 1643 pl011_rx_chars(uap); 1644 } 1645 if (status & (UART011_DSRMIS | UART011_DCDMIS | 1646 UART011_CTSMIS | UART011_RIMIS)) 1647 pl011_modem_status(uap); 1648 if (status & UART011_TXIS) 1649 pl011_tx_chars(uap, true); 1650 1651 if (pass_counter-- == 0) 1652 break; 1653 1654 status = pl011_read(uap, REG_RIS) & uap->im; 1655 } while (status != 0); 1656 handled = 1; 1657 } 1658 1659 uart_unlock_and_check_sysrq(&uap->port); 1660 1661 return IRQ_RETVAL(handled); 1662 } 1663 1664 static unsigned int pl011_tx_empty(struct uart_port *port) 1665 { 1666 struct uart_amba_port *uap = 1667 container_of(port, struct uart_amba_port, port); 1668 1669 /* Allow feature register bits to be inverted to work around errata */ 1670 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr; 1671 1672 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? 1673 0 : TIOCSER_TEMT; 1674 } 1675 1676 static void pl011_maybe_set_bit(bool cond, unsigned int *ptr, unsigned int mask) 1677 { 1678 if (cond) 1679 *ptr |= mask; 1680 } 1681 1682 static unsigned int pl011_get_mctrl(struct uart_port *port) 1683 { 1684 struct uart_amba_port *uap = 1685 container_of(port, struct uart_amba_port, port); 1686 unsigned int result = 0; 1687 unsigned int status = pl011_read(uap, REG_FR); 1688 1689 pl011_maybe_set_bit(status & UART01x_FR_DCD, &result, TIOCM_CAR); 1690 pl011_maybe_set_bit(status & uap->vendor->fr_dsr, &result, TIOCM_DSR); 1691 pl011_maybe_set_bit(status & uap->vendor->fr_cts, &result, TIOCM_CTS); 1692 pl011_maybe_set_bit(status & uap->vendor->fr_ri, &result, TIOCM_RNG); 1693 1694 return result; 1695 } 1696 1697 static void pl011_assign_bit(bool cond, unsigned int *ptr, unsigned int mask) 1698 { 1699 if (cond) 1700 *ptr |= mask; 1701 else 1702 *ptr &= ~mask; 1703 } 1704 1705 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) 1706 { 1707 struct uart_amba_port *uap = 1708 container_of(port, struct uart_amba_port, port); 1709 unsigned int cr; 1710 1711 cr = pl011_read(uap, REG_CR); 1712 1713 pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTS); 1714 pl011_assign_bit(mctrl & TIOCM_DTR, &cr, UART011_CR_DTR); 1715 pl011_assign_bit(mctrl & TIOCM_OUT1, &cr, UART011_CR_OUT1); 1716 pl011_assign_bit(mctrl & TIOCM_OUT2, &cr, UART011_CR_OUT2); 1717 pl011_assign_bit(mctrl & TIOCM_LOOP, &cr, UART011_CR_LBE); 1718 1719 if (port->status & UPSTAT_AUTORTS) { 1720 /* We need to disable auto-RTS if we want to turn RTS off */ 1721 pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTSEN); 1722 } 1723 1724 pl011_write(cr, uap, REG_CR); 1725 } 1726 1727 static void pl011_break_ctl(struct uart_port *port, int break_state) 1728 { 1729 struct uart_amba_port *uap = 1730 container_of(port, struct uart_amba_port, port); 1731 unsigned long flags; 1732 unsigned int lcr_h; 1733 1734 uart_port_lock_irqsave(&uap->port, &flags); 1735 lcr_h = pl011_read(uap, REG_LCRH_TX); 1736 if (break_state == -1) 1737 lcr_h |= UART01x_LCRH_BRK; 1738 else 1739 lcr_h &= ~UART01x_LCRH_BRK; 1740 pl011_write(lcr_h, uap, REG_LCRH_TX); 1741 uart_port_unlock_irqrestore(&uap->port, flags); 1742 } 1743 1744 #ifdef CONFIG_CONSOLE_POLL 1745 1746 static void pl011_quiesce_irqs(struct uart_port *port) 1747 { 1748 struct uart_amba_port *uap = 1749 container_of(port, struct uart_amba_port, port); 1750 1751 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR); 1752 /* 1753 * There is no way to clear TXIM as this is "ready to transmit IRQ", so 1754 * we simply mask it. start_tx() will unmask it. 1755 * 1756 * Note we can race with start_tx(), and if the race happens, the 1757 * polling user might get another interrupt just after we clear it. 1758 * But it should be OK and can happen even w/o the race, e.g. 1759 * controller immediately got some new data and raised the IRQ. 1760 * 1761 * And whoever uses polling routines assumes that it manages the device 1762 * (including tx queue), so we're also fine with start_tx()'s caller 1763 * side. 1764 */ 1765 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap, 1766 REG_IMSC); 1767 } 1768 1769 static int pl011_get_poll_char(struct uart_port *port) 1770 { 1771 struct uart_amba_port *uap = 1772 container_of(port, struct uart_amba_port, port); 1773 unsigned int status; 1774 1775 /* 1776 * The caller might need IRQs lowered, e.g. if used with KDB NMI 1777 * debugger. 1778 */ 1779 pl011_quiesce_irqs(port); 1780 1781 status = pl011_read(uap, REG_FR); 1782 if (status & UART01x_FR_RXFE) 1783 return NO_POLL_CHAR; 1784 1785 return pl011_read(uap, REG_DR); 1786 } 1787 1788 static void pl011_put_poll_char(struct uart_port *port, unsigned char ch) 1789 { 1790 struct uart_amba_port *uap = 1791 container_of(port, struct uart_amba_port, port); 1792 1793 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 1794 cpu_relax(); 1795 1796 pl011_write(ch, uap, REG_DR); 1797 } 1798 1799 #endif /* CONFIG_CONSOLE_POLL */ 1800 1801 static int pl011_hwinit(struct uart_port *port) 1802 { 1803 struct uart_amba_port *uap = 1804 container_of(port, struct uart_amba_port, port); 1805 int retval; 1806 1807 /* Optionaly enable pins to be muxed in and configured */ 1808 pinctrl_pm_select_default_state(port->dev); 1809 1810 /* 1811 * Try to enable the clock producer. 1812 */ 1813 retval = clk_prepare_enable(uap->clk); 1814 if (retval) 1815 return retval; 1816 1817 uap->port.uartclk = clk_get_rate(uap->clk); 1818 1819 /* Clear pending error and receive interrupts */ 1820 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | 1821 UART011_FEIS | UART011_RTIS | UART011_RXIS, 1822 uap, REG_ICR); 1823 1824 /* 1825 * Save interrupts enable mask, and enable RX interrupts in case if 1826 * the interrupt is used for NMI entry. 1827 */ 1828 uap->im = pl011_read(uap, REG_IMSC); 1829 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC); 1830 1831 if (dev_get_platdata(uap->port.dev)) { 1832 struct amba_pl011_data *plat; 1833 1834 plat = dev_get_platdata(uap->port.dev); 1835 if (plat->init) 1836 plat->init(); 1837 } 1838 return 0; 1839 } 1840 1841 static bool pl011_split_lcrh(const struct uart_amba_port *uap) 1842 { 1843 return pl011_reg_to_offset(uap, REG_LCRH_RX) != 1844 pl011_reg_to_offset(uap, REG_LCRH_TX); 1845 } 1846 1847 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) 1848 { 1849 pl011_write(lcr_h, uap, REG_LCRH_RX); 1850 if (pl011_split_lcrh(uap)) { 1851 int i; 1852 /* 1853 * Wait 10 PCLKs before writing LCRH_TX register, 1854 * to get this delay write read only register 10 times 1855 */ 1856 for (i = 0; i < 10; ++i) 1857 pl011_write(0xff, uap, REG_MIS); 1858 pl011_write(lcr_h, uap, REG_LCRH_TX); 1859 } 1860 } 1861 1862 static int pl011_allocate_irq(struct uart_amba_port *uap) 1863 { 1864 pl011_write(uap->im, uap, REG_IMSC); 1865 1866 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap); 1867 } 1868 1869 /* 1870 * Enable interrupts, only timeouts when using DMA 1871 * if initial RX DMA job failed, start in interrupt mode 1872 * as well. 1873 */ 1874 static void pl011_enable_interrupts(struct uart_amba_port *uap) 1875 { 1876 unsigned long flags; 1877 unsigned int i; 1878 1879 uart_port_lock_irqsave(&uap->port, &flags); 1880 1881 /* Clear out any spuriously appearing RX interrupts */ 1882 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); 1883 1884 /* 1885 * RXIS is asserted only when the RX FIFO transitions from below 1886 * to above the trigger threshold. If the RX FIFO is already 1887 * full to the threshold this can't happen and RXIS will now be 1888 * stuck off. Drain the RX FIFO explicitly to fix this: 1889 */ 1890 for (i = 0; i < uap->fifosize * 2; ++i) { 1891 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE) 1892 break; 1893 1894 pl011_read(uap, REG_DR); 1895 } 1896 1897 uap->im = UART011_RTIM; 1898 if (!pl011_dma_rx_running(uap)) 1899 uap->im |= UART011_RXIM; 1900 pl011_write(uap->im, uap, REG_IMSC); 1901 uart_port_unlock_irqrestore(&uap->port, flags); 1902 } 1903 1904 static void pl011_unthrottle_rx(struct uart_port *port) 1905 { 1906 struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); 1907 unsigned long flags; 1908 1909 uart_port_lock_irqsave(&uap->port, &flags); 1910 1911 uap->im = UART011_RTIM; 1912 if (!pl011_dma_rx_running(uap)) 1913 uap->im |= UART011_RXIM; 1914 1915 pl011_write(uap->im, uap, REG_IMSC); 1916 1917 #ifdef CONFIG_DMA_ENGINE 1918 if (uap->using_rx_dma) { 1919 uap->dmacr |= UART011_RXDMAE; 1920 pl011_write(uap->dmacr, uap, REG_DMACR); 1921 } 1922 #endif 1923 1924 uart_port_unlock_irqrestore(&uap->port, flags); 1925 } 1926 1927 static int pl011_startup(struct uart_port *port) 1928 { 1929 struct uart_amba_port *uap = 1930 container_of(port, struct uart_amba_port, port); 1931 unsigned int cr; 1932 int retval; 1933 1934 retval = pl011_hwinit(port); 1935 if (retval) 1936 goto clk_dis; 1937 1938 retval = pl011_allocate_irq(uap); 1939 if (retval) 1940 goto clk_dis; 1941 1942 pl011_write(uap->vendor->ifls, uap, REG_IFLS); 1943 1944 uart_port_lock_irq(&uap->port); 1945 1946 cr = pl011_read(uap, REG_CR); 1947 cr &= UART011_CR_RTS | UART011_CR_DTR; 1948 cr |= UART01x_CR_UARTEN | UART011_CR_RXE; 1949 1950 if (!(port->rs485.flags & SER_RS485_ENABLED)) 1951 cr |= UART011_CR_TXE; 1952 1953 pl011_write(cr, uap, REG_CR); 1954 1955 uart_port_unlock_irq(&uap->port); 1956 1957 /* 1958 * initialise the old status of the modem signals 1959 */ 1960 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; 1961 1962 /* Startup DMA */ 1963 pl011_dma_startup(uap); 1964 1965 pl011_enable_interrupts(uap); 1966 1967 return 0; 1968 1969 clk_dis: 1970 clk_disable_unprepare(uap->clk); 1971 return retval; 1972 } 1973 1974 static int sbsa_uart_startup(struct uart_port *port) 1975 { 1976 struct uart_amba_port *uap = 1977 container_of(port, struct uart_amba_port, port); 1978 int retval; 1979 1980 retval = pl011_hwinit(port); 1981 if (retval) 1982 return retval; 1983 1984 retval = pl011_allocate_irq(uap); 1985 if (retval) 1986 return retval; 1987 1988 /* The SBSA UART does not support any modem status lines. */ 1989 uap->old_status = 0; 1990 1991 pl011_enable_interrupts(uap); 1992 1993 return 0; 1994 } 1995 1996 static void pl011_shutdown_channel(struct uart_amba_port *uap, unsigned int lcrh) 1997 { 1998 unsigned long val; 1999 2000 val = pl011_read(uap, lcrh); 2001 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 2002 pl011_write(val, uap, lcrh); 2003 } 2004 2005 /* 2006 * disable the port. It should not disable RTS and DTR. 2007 * Also RTS and DTR state should be preserved to restore 2008 * it during startup(). 2009 */ 2010 static void pl011_disable_uart(struct uart_amba_port *uap) 2011 { 2012 unsigned int cr; 2013 2014 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 2015 uart_port_lock_irq(&uap->port); 2016 cr = pl011_read(uap, REG_CR); 2017 cr &= UART011_CR_RTS | UART011_CR_DTR; 2018 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 2019 pl011_write(cr, uap, REG_CR); 2020 uart_port_unlock_irq(&uap->port); 2021 2022 /* 2023 * disable break condition and fifos 2024 */ 2025 pl011_shutdown_channel(uap, REG_LCRH_RX); 2026 if (pl011_split_lcrh(uap)) 2027 pl011_shutdown_channel(uap, REG_LCRH_TX); 2028 } 2029 2030 static void pl011_disable_interrupts(struct uart_amba_port *uap) 2031 { 2032 uart_port_lock_irq(&uap->port); 2033 2034 /* mask all interrupts and clear all pending ones */ 2035 uap->im = 0; 2036 pl011_write(uap->im, uap, REG_IMSC); 2037 pl011_write(0xffff, uap, REG_ICR); 2038 2039 uart_port_unlock_irq(&uap->port); 2040 } 2041 2042 static void pl011_shutdown(struct uart_port *port) 2043 { 2044 struct uart_amba_port *uap = 2045 container_of(port, struct uart_amba_port, port); 2046 2047 pl011_disable_interrupts(uap); 2048 2049 pl011_dma_shutdown(uap); 2050 2051 if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF)) 2052 pl011_rs485_tx_stop(uap); 2053 2054 free_irq(uap->port.irq, uap); 2055 2056 pl011_disable_uart(uap); 2057 2058 /* 2059 * Shut down the clock producer 2060 */ 2061 clk_disable_unprepare(uap->clk); 2062 /* Optionally let pins go into sleep states */ 2063 pinctrl_pm_select_sleep_state(port->dev); 2064 2065 if (dev_get_platdata(uap->port.dev)) { 2066 struct amba_pl011_data *plat; 2067 2068 plat = dev_get_platdata(uap->port.dev); 2069 if (plat->exit) 2070 plat->exit(); 2071 } 2072 2073 if (uap->port.ops->flush_buffer) 2074 uap->port.ops->flush_buffer(port); 2075 } 2076 2077 static void sbsa_uart_shutdown(struct uart_port *port) 2078 { 2079 struct uart_amba_port *uap = 2080 container_of(port, struct uart_amba_port, port); 2081 2082 pl011_disable_interrupts(uap); 2083 2084 free_irq(uap->port.irq, uap); 2085 2086 if (uap->port.ops->flush_buffer) 2087 uap->port.ops->flush_buffer(port); 2088 } 2089 2090 static void 2091 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios) 2092 { 2093 port->read_status_mask = UART011_DR_OE | 255; 2094 if (termios->c_iflag & INPCK) 2095 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 2096 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2097 port->read_status_mask |= UART011_DR_BE; 2098 2099 /* 2100 * Characters to ignore 2101 */ 2102 port->ignore_status_mask = 0; 2103 if (termios->c_iflag & IGNPAR) 2104 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; 2105 if (termios->c_iflag & IGNBRK) { 2106 port->ignore_status_mask |= UART011_DR_BE; 2107 /* 2108 * If we're ignoring parity and break indicators, 2109 * ignore overruns too (for real raw support). 2110 */ 2111 if (termios->c_iflag & IGNPAR) 2112 port->ignore_status_mask |= UART011_DR_OE; 2113 } 2114 2115 /* 2116 * Ignore all characters if CREAD is not set. 2117 */ 2118 if ((termios->c_cflag & CREAD) == 0) 2119 port->ignore_status_mask |= UART_DUMMY_DR_RX; 2120 } 2121 2122 static void 2123 pl011_set_termios(struct uart_port *port, struct ktermios *termios, 2124 const struct ktermios *old) 2125 { 2126 struct uart_amba_port *uap = 2127 container_of(port, struct uart_amba_port, port); 2128 unsigned int lcr_h, old_cr; 2129 unsigned long flags; 2130 unsigned int baud, quot, clkdiv; 2131 unsigned int max_baud; 2132 unsigned int bits; 2133 2134 if (uap->vendor->oversampling) 2135 clkdiv = 8; 2136 else 2137 clkdiv = 16; 2138 2139 max_baud = port->uartclk / clkdiv; 2140 2141 if (uap->vendor->set_uartclk_rate) { 2142 long max_clkrate = clk_round_rate(uap->clk, UINT_MAX); 2143 2144 /* 2145 * Clock is reprogrammable - determine max baud from the clock's 2146 * maximum rate, not the current uartclk. 2147 */ 2148 if (max_clkrate > 0) 2149 max_baud = max_clkrate / clkdiv; 2150 } 2151 2152 /* 2153 * Ask the core to calculate the divisor for us. 2154 */ 2155 baud = uart_get_baud_rate(port, termios, old, 0, max_baud); 2156 2157 if (uap->vendor->set_uartclk_rate) { 2158 int err; 2159 2160 err = clk_set_rate(uap->clk, baud * clkdiv); 2161 if (err) { 2162 dev_err(port->dev, "Failed to set clock rate: %d\n", err); 2163 return; 2164 } 2165 } 2166 2167 #ifdef CONFIG_DMA_ENGINE 2168 /* 2169 * Adjust RX DMA polling rate with baud rate if not specified. 2170 */ 2171 if (uap->dmarx.auto_poll_rate) 2172 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); 2173 #endif 2174 2175 switch (termios->c_cflag & CSIZE) { 2176 case CS5: 2177 lcr_h = UART01x_LCRH_WLEN_5; 2178 break; 2179 case CS6: 2180 lcr_h = UART01x_LCRH_WLEN_6; 2181 break; 2182 case CS7: 2183 lcr_h = UART01x_LCRH_WLEN_7; 2184 break; 2185 default: // CS8 2186 lcr_h = UART01x_LCRH_WLEN_8; 2187 break; 2188 } 2189 if (termios->c_cflag & CSTOPB) 2190 lcr_h |= UART01x_LCRH_STP2; 2191 if (termios->c_cflag & PARENB) { 2192 lcr_h |= UART01x_LCRH_PEN; 2193 if (!(termios->c_cflag & PARODD)) 2194 lcr_h |= UART01x_LCRH_EPS; 2195 if (termios->c_cflag & CMSPAR) 2196 lcr_h |= UART011_LCRH_SPS; 2197 } 2198 if (uap->fifosize > 1) 2199 lcr_h |= UART01x_LCRH_FEN; 2200 2201 bits = tty_get_frame_size(termios->c_cflag); 2202 2203 uart_port_lock_irqsave(port, &flags); 2204 2205 /* 2206 * Update the per-port timeout. 2207 */ 2208 uart_update_timeout(port, termios->c_cflag, baud); 2209 2210 /* 2211 * Calculate the approximated time it takes to transmit one character 2212 * with the given baud rate. We use this as the poll interval when we 2213 * wait for the tx queue to empty. 2214 */ 2215 uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud)); 2216 2217 pl011_setup_status_masks(port, termios); 2218 2219 if (UART_ENABLE_MS(port, termios->c_cflag)) 2220 pl011_enable_ms(port); 2221 2222 if (port->rs485.flags & SER_RS485_ENABLED) 2223 termios->c_cflag &= ~CRTSCTS; 2224 2225 old_cr = pl011_read(uap, REG_CR); 2226 2227 if (termios->c_cflag & CRTSCTS) { 2228 if (old_cr & UART011_CR_RTS) 2229 old_cr |= UART011_CR_RTSEN; 2230 2231 old_cr |= UART011_CR_CTSEN; 2232 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 2233 } else { 2234 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); 2235 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 2236 } 2237 2238 if (uap->vendor->oversampling) { 2239 if (baud > port->uartclk / 16) 2240 old_cr |= ST_UART011_CR_OVSFACT; 2241 else 2242 old_cr &= ~ST_UART011_CR_OVSFACT; 2243 } 2244 2245 if (!uap->vendor->skip_ibrd_fbrd) { 2246 if (baud > port->uartclk / 16) 2247 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 2248 else 2249 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 2250 2251 /* 2252 * Workaround for the ST Micro oversampling variants to 2253 * increase the bitrate slightly, by lowering the divisor, 2254 * to avoid delayed sampling of start bit at high speeds, 2255 * else we see data corruption. 2256 */ 2257 if (uap->vendor->oversampling) { 2258 if (baud >= 3000000 && baud < 3250000 && quot > 1) 2259 quot -= 1; 2260 else if (baud > 3250000 && quot > 2) 2261 quot -= 2; 2262 } 2263 /* Set baud rate */ 2264 pl011_write(quot & 0x3f, uap, REG_FBRD); 2265 pl011_write(quot >> 6, uap, REG_IBRD); 2266 } 2267 2268 /* 2269 * ----------v----------v----------v----------v----- 2270 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER 2271 * REG_FBRD & REG_IBRD. 2272 * ----------^----------^----------^----------^----- 2273 */ 2274 pl011_write_lcr_h(uap, lcr_h); 2275 2276 /* 2277 * Receive was disabled by pl011_disable_uart during shutdown. 2278 * Need to reenable receive if you need to use a tty_driver 2279 * returns from tty_find_polling_driver() after a port shutdown. 2280 */ 2281 old_cr |= UART011_CR_RXE; 2282 pl011_write(old_cr, uap, REG_CR); 2283 2284 uart_port_unlock_irqrestore(port, flags); 2285 } 2286 2287 static void 2288 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, 2289 const struct ktermios *old) 2290 { 2291 struct uart_amba_port *uap = 2292 container_of(port, struct uart_amba_port, port); 2293 unsigned long flags; 2294 2295 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud); 2296 2297 /* The SBSA UART only supports 8n1 without hardware flow control. */ 2298 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); 2299 termios->c_cflag &= ~(CMSPAR | CRTSCTS); 2300 termios->c_cflag |= CS8 | CLOCAL; 2301 2302 uart_port_lock_irqsave(port, &flags); 2303 uart_update_timeout(port, CS8, uap->fixed_baud); 2304 pl011_setup_status_masks(port, termios); 2305 uart_port_unlock_irqrestore(port, flags); 2306 } 2307 2308 static const char *pl011_type(struct uart_port *port) 2309 { 2310 struct uart_amba_port *uap = 2311 container_of(port, struct uart_amba_port, port); 2312 return uap->port.type == PORT_AMBA ? uap->type : NULL; 2313 } 2314 2315 /* 2316 * Configure/autoconfigure the port. 2317 */ 2318 static void pl011_config_port(struct uart_port *port, int flags) 2319 { 2320 if (flags & UART_CONFIG_TYPE) 2321 port->type = PORT_AMBA; 2322 } 2323 2324 /* 2325 * verify the new serial_struct (for TIOCSSERIAL). 2326 */ 2327 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) 2328 { 2329 int ret = 0; 2330 2331 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 2332 ret = -EINVAL; 2333 if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) 2334 ret = -EINVAL; 2335 if (ser->baud_base < 9600) 2336 ret = -EINVAL; 2337 if (port->mapbase != (unsigned long)ser->iomem_base) 2338 ret = -EINVAL; 2339 return ret; 2340 } 2341 2342 static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios, 2343 struct serial_rs485 *rs485) 2344 { 2345 struct uart_amba_port *uap = 2346 container_of(port, struct uart_amba_port, port); 2347 2348 if (port->rs485.flags & SER_RS485_ENABLED) 2349 pl011_rs485_tx_stop(uap); 2350 2351 /* Make sure auto RTS is disabled */ 2352 if (rs485->flags & SER_RS485_ENABLED) { 2353 u32 cr = pl011_read(uap, REG_CR); 2354 2355 cr &= ~UART011_CR_RTSEN; 2356 pl011_write(cr, uap, REG_CR); 2357 port->status &= ~UPSTAT_AUTORTS; 2358 } 2359 2360 return 0; 2361 } 2362 2363 static const struct uart_ops amba_pl011_pops = { 2364 .tx_empty = pl011_tx_empty, 2365 .set_mctrl = pl011_set_mctrl, 2366 .get_mctrl = pl011_get_mctrl, 2367 .stop_tx = pl011_stop_tx, 2368 .start_tx = pl011_start_tx, 2369 .stop_rx = pl011_stop_rx, 2370 .throttle = pl011_throttle_rx, 2371 .unthrottle = pl011_unthrottle_rx, 2372 .enable_ms = pl011_enable_ms, 2373 .break_ctl = pl011_break_ctl, 2374 .startup = pl011_startup, 2375 .shutdown = pl011_shutdown, 2376 .flush_buffer = pl011_dma_flush_buffer, 2377 .set_termios = pl011_set_termios, 2378 .type = pl011_type, 2379 .config_port = pl011_config_port, 2380 .verify_port = pl011_verify_port, 2381 #ifdef CONFIG_CONSOLE_POLL 2382 .poll_init = pl011_hwinit, 2383 .poll_get_char = pl011_get_poll_char, 2384 .poll_put_char = pl011_put_poll_char, 2385 #endif 2386 }; 2387 2388 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) 2389 { 2390 } 2391 2392 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port) 2393 { 2394 return 0; 2395 } 2396 2397 static const struct uart_ops sbsa_uart_pops = { 2398 .tx_empty = pl011_tx_empty, 2399 .set_mctrl = sbsa_uart_set_mctrl, 2400 .get_mctrl = sbsa_uart_get_mctrl, 2401 .stop_tx = pl011_stop_tx, 2402 .start_tx = pl011_start_tx, 2403 .stop_rx = pl011_stop_rx, 2404 .startup = sbsa_uart_startup, 2405 .shutdown = sbsa_uart_shutdown, 2406 .set_termios = sbsa_uart_set_termios, 2407 .type = pl011_type, 2408 .config_port = pl011_config_port, 2409 .verify_port = pl011_verify_port, 2410 #ifdef CONFIG_CONSOLE_POLL 2411 .poll_init = pl011_hwinit, 2412 .poll_get_char = pl011_get_poll_char, 2413 .poll_put_char = pl011_put_poll_char, 2414 #endif 2415 }; 2416 2417 static struct uart_amba_port *amba_ports[UART_NR]; 2418 2419 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE 2420 2421 static void pl011_console_putchar(struct uart_port *port, unsigned char ch) 2422 { 2423 struct uart_amba_port *uap = 2424 container_of(port, struct uart_amba_port, port); 2425 2426 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 2427 cpu_relax(); 2428 pl011_write(ch, uap, REG_DR); 2429 uap->console_line_ended = (ch == '\n'); 2430 } 2431 2432 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, 2433 int *parity, int *bits) 2434 { 2435 unsigned int lcr_h, ibrd, fbrd; 2436 unsigned int clkdiv; 2437 2438 if (!(pl011_read(uap, REG_CR) & UART01x_CR_UARTEN)) 2439 return; 2440 2441 lcr_h = pl011_read(uap, REG_LCRH_TX); 2442 2443 *parity = 'n'; 2444 if (lcr_h & UART01x_LCRH_PEN) { 2445 if (lcr_h & UART01x_LCRH_EPS) 2446 *parity = 'e'; 2447 else 2448 *parity = 'o'; 2449 } 2450 2451 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) 2452 *bits = 7; 2453 else 2454 *bits = 8; 2455 2456 if (uap->vendor->skip_ibrd_fbrd) { 2457 clkdiv = 64; 2458 } else { 2459 ibrd = pl011_read(uap, REG_IBRD); 2460 fbrd = pl011_read(uap, REG_FBRD); 2461 clkdiv = 64 * ibrd + fbrd; 2462 } 2463 2464 *baud = uap->port.uartclk * 4 / clkdiv; 2465 2466 if (uap->vendor->oversampling && 2467 (pl011_read(uap, REG_CR) & ST_UART011_CR_OVSFACT)) 2468 *baud *= 2; 2469 } 2470 2471 static int pl011_console_setup(struct console *co, char *options) 2472 { 2473 struct uart_amba_port *uap; 2474 int baud = 38400; 2475 int bits = 8; 2476 int parity = 'n'; 2477 int flow = 'n'; 2478 int ret; 2479 2480 /* 2481 * Check whether an invalid uart number has been specified, and 2482 * if so, search for the first available port that does have 2483 * console support. 2484 */ 2485 if (co->index >= UART_NR) 2486 co->index = 0; 2487 uap = amba_ports[co->index]; 2488 if (!uap) 2489 return -ENODEV; 2490 2491 /* Allow pins to be muxed in and configured */ 2492 pinctrl_pm_select_default_state(uap->port.dev); 2493 2494 ret = clk_prepare(uap->clk); 2495 if (ret) 2496 return ret; 2497 2498 uap->console_line_ended = true; 2499 2500 if (dev_get_platdata(uap->port.dev)) { 2501 struct amba_pl011_data *plat; 2502 2503 plat = dev_get_platdata(uap->port.dev); 2504 if (plat->init) 2505 plat->init(); 2506 } 2507 2508 uap->port.uartclk = clk_get_rate(uap->clk); 2509 2510 if (uap->vendor->fixed_options) { 2511 baud = uap->fixed_baud; 2512 } else { 2513 if (options) 2514 uart_parse_options(options, 2515 &baud, &parity, &bits, &flow); 2516 else 2517 pl011_console_get_options(uap, &baud, &parity, &bits); 2518 } 2519 2520 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 2521 } 2522 2523 /** 2524 * pl011_console_match - non-standard console matching 2525 * @co: registering console 2526 * @name: name from console command line 2527 * @idx: index from console command line 2528 * @options: ptr to option string from console command line 2529 * 2530 * Only attempts to match console command lines of the form: 2531 * console=pl011,mmio|mmio32,<addr>[,<options>] 2532 * console=pl011,0x<addr>[,<options>] 2533 * This form is used to register an initial earlycon boot console and 2534 * replace it with the amba_console at pl011 driver init. 2535 * 2536 * Performs console setup for a match (as required by interface) 2537 * If no <options> are specified, then assume the h/w is already setup. 2538 * 2539 * Returns 0 if console matches; otherwise non-zero to use default matching 2540 */ 2541 static int pl011_console_match(struct console *co, char *name, int idx, 2542 char *options) 2543 { 2544 enum uart_iotype iotype; 2545 resource_size_t addr; 2546 int i; 2547 2548 /* 2549 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum 2550 * have a distinct console name, so make sure we check for that. 2551 * The actual implementation of the erratum occurs in the probe 2552 * function. 2553 */ 2554 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) 2555 return -ENODEV; 2556 2557 if (uart_parse_earlycon(options, &iotype, &addr, &options)) 2558 return -ENODEV; 2559 2560 if (iotype != UPIO_MEM && iotype != UPIO_MEM32) 2561 return -ENODEV; 2562 2563 /* try to match the port specified on the command line */ 2564 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { 2565 struct uart_port *port; 2566 2567 if (!amba_ports[i]) 2568 continue; 2569 2570 port = &amba_ports[i]->port; 2571 2572 if (port->mapbase != addr) 2573 continue; 2574 2575 co->index = i; 2576 uart_port_set_cons(port, co); 2577 return pl011_console_setup(co, options); 2578 } 2579 2580 return -ENODEV; 2581 } 2582 2583 static void 2584 pl011_console_write_atomic(struct console *co, struct nbcon_write_context *wctxt) 2585 { 2586 struct uart_amba_port *uap = amba_ports[co->index]; 2587 unsigned int old_cr = 0; 2588 2589 if (!nbcon_enter_unsafe(wctxt)) 2590 return; 2591 2592 clk_enable(uap->clk); 2593 2594 if (!uap->vendor->always_enabled) { 2595 old_cr = pl011_read(uap, REG_CR); 2596 pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE), 2597 uap, REG_CR); 2598 } 2599 2600 if (!uap->console_line_ended) 2601 uart_console_write(&uap->port, "\n", 1, pl011_console_putchar); 2602 uart_console_write(&uap->port, wctxt->outbuf, wctxt->len, pl011_console_putchar); 2603 2604 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy) 2605 cpu_relax(); 2606 2607 if (!uap->vendor->always_enabled) 2608 pl011_write(old_cr, uap, REG_CR); 2609 2610 clk_disable(uap->clk); 2611 2612 nbcon_exit_unsafe(wctxt); 2613 } 2614 2615 static void 2616 pl011_console_write_thread(struct console *co, struct nbcon_write_context *wctxt) 2617 { 2618 struct uart_amba_port *uap = amba_ports[co->index]; 2619 unsigned int old_cr = 0; 2620 2621 if (!nbcon_enter_unsafe(wctxt)) 2622 return; 2623 2624 clk_enable(uap->clk); 2625 2626 if (!uap->vendor->always_enabled) { 2627 old_cr = pl011_read(uap, REG_CR); 2628 pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE), 2629 uap, REG_CR); 2630 } 2631 2632 if (nbcon_exit_unsafe(wctxt)) { 2633 int i; 2634 unsigned int len = READ_ONCE(wctxt->len); 2635 2636 for (i = 0; i < len; i++) { 2637 if (!nbcon_enter_unsafe(wctxt)) 2638 break; 2639 uart_console_write(&uap->port, wctxt->outbuf + i, 1, pl011_console_putchar); 2640 if (!nbcon_exit_unsafe(wctxt)) 2641 break; 2642 } 2643 } 2644 2645 while (!nbcon_enter_unsafe(wctxt)) 2646 nbcon_reacquire_nobuf(wctxt); 2647 2648 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy) 2649 cpu_relax(); 2650 2651 if (!uap->vendor->always_enabled) 2652 pl011_write(old_cr, uap, REG_CR); 2653 2654 clk_disable(uap->clk); 2655 2656 nbcon_exit_unsafe(wctxt); 2657 } 2658 2659 static void 2660 pl011_console_device_lock(struct console *co, unsigned long *flags) 2661 { 2662 __uart_port_lock_irqsave(&amba_ports[co->index]->port, flags); 2663 } 2664 2665 static void 2666 pl011_console_device_unlock(struct console *co, unsigned long flags) 2667 { 2668 __uart_port_unlock_irqrestore(&amba_ports[co->index]->port, flags); 2669 } 2670 2671 static struct uart_driver amba_reg; 2672 static struct console amba_console = { 2673 .name = "ttyAMA", 2674 .device = uart_console_device, 2675 .setup = pl011_console_setup, 2676 .match = pl011_console_match, 2677 .write_atomic = pl011_console_write_atomic, 2678 .write_thread = pl011_console_write_thread, 2679 .device_lock = pl011_console_device_lock, 2680 .device_unlock = pl011_console_device_unlock, 2681 .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON, 2682 .index = -1, 2683 .data = &amba_reg, 2684 }; 2685 2686 #define AMBA_CONSOLE (&amba_console) 2687 2688 static void qdf2400_e44_putc(struct uart_port *port, unsigned char c) 2689 { 2690 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) 2691 cpu_relax(); 2692 writel(c, port->membase + UART01x_DR); 2693 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) 2694 cpu_relax(); 2695 } 2696 2697 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned int n) 2698 { 2699 struct earlycon_device *dev = con->data; 2700 2701 uart_console_write(&dev->port, s, n, qdf2400_e44_putc); 2702 } 2703 2704 static void pl011_putc(struct uart_port *port, unsigned char c) 2705 { 2706 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) 2707 cpu_relax(); 2708 if (port->iotype == UPIO_MEM32) 2709 writel(c, port->membase + UART01x_DR); 2710 else 2711 writeb(c, port->membase + UART01x_DR); 2712 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) 2713 cpu_relax(); 2714 } 2715 2716 static void pl011_early_write(struct console *con, const char *s, unsigned int n) 2717 { 2718 struct earlycon_device *dev = con->data; 2719 2720 uart_console_write(&dev->port, s, n, pl011_putc); 2721 } 2722 2723 #ifdef CONFIG_CONSOLE_POLL 2724 static int pl011_getc(struct uart_port *port) 2725 { 2726 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) 2727 return NO_POLL_CHAR; 2728 2729 if (port->iotype == UPIO_MEM32) 2730 return readl(port->membase + UART01x_DR); 2731 else 2732 return readb(port->membase + UART01x_DR); 2733 } 2734 2735 static int pl011_early_read(struct console *con, char *s, unsigned int n) 2736 { 2737 struct earlycon_device *dev = con->data; 2738 int ch, num_read = 0; 2739 2740 while (num_read < n) { 2741 ch = pl011_getc(&dev->port); 2742 if (ch == NO_POLL_CHAR) 2743 break; 2744 2745 s[num_read++] = ch; 2746 } 2747 2748 return num_read; 2749 } 2750 #else 2751 #define pl011_early_read NULL 2752 #endif 2753 2754 /* 2755 * On non-ACPI systems, earlycon is enabled by specifying 2756 * "earlycon=pl011,<address>" on the kernel command line. 2757 * 2758 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, 2759 * by specifying only "earlycon" on the command line. Because it requires 2760 * SPCR, the console starts after ACPI is parsed, which is later than a 2761 * traditional early console. 2762 * 2763 * To get the traditional early console that starts before ACPI is parsed, 2764 * specify the full "earlycon=pl011,<address>" option. 2765 */ 2766 static int __init pl011_early_console_setup(struct earlycon_device *device, 2767 const char *opt) 2768 { 2769 unsigned int cr; 2770 2771 if (!device->port.membase) 2772 return -ENODEV; 2773 2774 device->con->write = pl011_early_write; 2775 device->con->read = pl011_early_read; 2776 2777 if (device->port.iotype == UPIO_MEM32) 2778 cr = readl(device->port.membase + UART011_CR); 2779 else 2780 cr = readw(device->port.membase + UART011_CR); 2781 cr &= UART011_CR_RTS | UART011_CR_DTR; 2782 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; 2783 if (device->port.iotype == UPIO_MEM32) 2784 writel(cr, device->port.membase + UART011_CR); 2785 else 2786 writew(cr, device->port.membase + UART011_CR); 2787 2788 return 0; 2789 } 2790 2791 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2792 2793 /* 2794 * The SBSA UART has no defined control register and is assumed to 2795 * be pre-enabled by firmware, so we do not write to UART011_CR. 2796 */ 2797 static int __init sbsa_uart_early_console_setup(struct earlycon_device *device, 2798 const char *opt) 2799 { 2800 if (!device->port.membase) 2801 return -ENODEV; 2802 2803 device->con->write = pl011_early_write; 2804 device->con->read = pl011_early_read; 2805 2806 return 0; 2807 } 2808 2809 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", sbsa_uart_early_console_setup); 2810 2811 /* 2812 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by 2813 * Erratum 44, traditional earlycon can be enabled by specifying 2814 * "earlycon=qdf2400_e44,<address>". Any options are ignored. 2815 * 2816 * Alternatively, you can just specify "earlycon", and the early console 2817 * will be enabled with the information from the SPCR table. In this 2818 * case, the SPCR code will detect the need for the E44 work-around, 2819 * and set the console name to "qdf2400_e44". 2820 */ 2821 static int __init 2822 qdf2400_e44_early_console_setup(struct earlycon_device *device, 2823 const char *opt) 2824 { 2825 if (!device->port.membase) 2826 return -ENODEV; 2827 2828 device->con->write = qdf2400_e44_early_write; 2829 return 0; 2830 } 2831 2832 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup); 2833 2834 #else 2835 #define AMBA_CONSOLE NULL 2836 #endif 2837 2838 static struct uart_driver amba_reg = { 2839 .owner = THIS_MODULE, 2840 .driver_name = "ttyAMA", 2841 .dev_name = "ttyAMA", 2842 .major = SERIAL_AMBA_MAJOR, 2843 .minor = SERIAL_AMBA_MINOR, 2844 .nr = UART_NR, 2845 .cons = AMBA_CONSOLE, 2846 }; 2847 2848 static int pl011_probe_dt_alias(int index, struct device *dev) 2849 { 2850 struct device_node *np; 2851 static bool seen_dev_with_alias; 2852 static bool seen_dev_without_alias; 2853 int ret = index; 2854 2855 if (!IS_ENABLED(CONFIG_OF)) 2856 return ret; 2857 2858 np = dev->of_node; 2859 if (!np) 2860 return ret; 2861 2862 ret = of_alias_get_id(np, "serial"); 2863 if (ret < 0) { 2864 seen_dev_without_alias = true; 2865 ret = index; 2866 } else { 2867 seen_dev_with_alias = true; 2868 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret]) { 2869 dev_warn(dev, "requested serial port %d not available.\n", ret); 2870 ret = index; 2871 } 2872 } 2873 2874 if (seen_dev_with_alias && seen_dev_without_alias) 2875 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); 2876 2877 return ret; 2878 } 2879 2880 /* unregisters the driver also if no more ports are left */ 2881 static void pl011_unregister_port(struct uart_amba_port *uap) 2882 { 2883 int i; 2884 bool busy = false; 2885 2886 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { 2887 if (amba_ports[i] == uap) 2888 amba_ports[i] = NULL; 2889 else if (amba_ports[i]) 2890 busy = true; 2891 } 2892 pl011_dma_remove(uap); 2893 if (!busy) 2894 uart_unregister_driver(&amba_reg); 2895 } 2896 2897 static int pl011_find_free_port(void) 2898 { 2899 int i; 2900 2901 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2902 if (!amba_ports[i]) 2903 return i; 2904 2905 return -EBUSY; 2906 } 2907 2908 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, 2909 struct resource *mmiobase, int index) 2910 { 2911 void __iomem *base; 2912 int ret; 2913 2914 base = devm_ioremap_resource(dev, mmiobase); 2915 if (IS_ERR(base)) 2916 return PTR_ERR(base); 2917 2918 index = pl011_probe_dt_alias(index, dev); 2919 2920 uap->port.dev = dev; 2921 uap->port.mapbase = mmiobase->start; 2922 uap->port.membase = base; 2923 uap->port.fifosize = uap->fifosize; 2924 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); 2925 uap->port.flags = UPF_BOOT_AUTOCONF; 2926 uap->port.line = index; 2927 2928 ret = uart_get_rs485_mode(&uap->port); 2929 if (ret) 2930 return ret; 2931 2932 amba_ports[index] = uap; 2933 2934 return 0; 2935 } 2936 2937 static int pl011_register_port(struct uart_amba_port *uap) 2938 { 2939 int ret, i; 2940 2941 /* Ensure interrupts from this UART are masked and cleared */ 2942 pl011_write(0, uap, REG_IMSC); 2943 pl011_write(0xffff, uap, REG_ICR); 2944 2945 if (!amba_reg.state) { 2946 ret = uart_register_driver(&amba_reg); 2947 if (ret < 0) { 2948 dev_err(uap->port.dev, 2949 "Failed to register AMBA-PL011 driver\n"); 2950 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2951 if (amba_ports[i] == uap) 2952 amba_ports[i] = NULL; 2953 return ret; 2954 } 2955 } 2956 2957 ret = uart_add_one_port(&amba_reg, &uap->port); 2958 if (ret) 2959 pl011_unregister_port(uap); 2960 2961 return ret; 2962 } 2963 2964 static const struct serial_rs485 pl011_rs485_supported = { 2965 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 2966 SER_RS485_RX_DURING_TX, 2967 .delay_rts_before_send = 1, 2968 .delay_rts_after_send = 1, 2969 }; 2970 2971 static int pl011_probe(struct amba_device *dev, const struct amba_id *id) 2972 { 2973 struct uart_amba_port *uap; 2974 struct vendor_data *vendor = id->data; 2975 int portnr, ret; 2976 u32 val; 2977 2978 portnr = pl011_find_free_port(); 2979 if (portnr < 0) 2980 return portnr; 2981 2982 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), 2983 GFP_KERNEL); 2984 if (!uap) 2985 return -ENOMEM; 2986 2987 uap->clk = devm_clk_get(&dev->dev, NULL); 2988 if (IS_ERR(uap->clk)) 2989 return PTR_ERR(uap->clk); 2990 2991 uap->reg_offset = vendor->reg_offset; 2992 uap->vendor = vendor; 2993 uap->fifosize = vendor->get_fifosize(dev); 2994 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; 2995 uap->port.irq = dev->irq[0]; 2996 uap->port.ops = &amba_pl011_pops; 2997 uap->port.rs485_config = pl011_rs485_config; 2998 uap->port.rs485_supported = pl011_rs485_supported; 2999 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 3000 3001 if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) { 3002 switch (val) { 3003 case 1: 3004 uap->port.iotype = UPIO_MEM; 3005 break; 3006 case 4: 3007 uap->port.iotype = UPIO_MEM32; 3008 break; 3009 default: 3010 dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n", 3011 val); 3012 return -EINVAL; 3013 } 3014 } 3015 hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC, 3016 HRTIMER_MODE_REL); 3017 hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC, 3018 HRTIMER_MODE_REL); 3019 3020 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); 3021 if (ret) 3022 return ret; 3023 3024 amba_set_drvdata(dev, uap); 3025 3026 return pl011_register_port(uap); 3027 } 3028 3029 static void pl011_remove(struct amba_device *dev) 3030 { 3031 struct uart_amba_port *uap = amba_get_drvdata(dev); 3032 3033 uart_remove_one_port(&amba_reg, &uap->port); 3034 pl011_unregister_port(uap); 3035 } 3036 3037 #ifdef CONFIG_PM_SLEEP 3038 static int pl011_suspend(struct device *dev) 3039 { 3040 struct uart_amba_port *uap = dev_get_drvdata(dev); 3041 3042 if (!uap) 3043 return -EINVAL; 3044 3045 return uart_suspend_port(&amba_reg, &uap->port); 3046 } 3047 3048 static int pl011_resume(struct device *dev) 3049 { 3050 struct uart_amba_port *uap = dev_get_drvdata(dev); 3051 3052 if (!uap) 3053 return -EINVAL; 3054 3055 return uart_resume_port(&amba_reg, &uap->port); 3056 } 3057 #endif 3058 3059 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume); 3060 3061 #ifdef CONFIG_ACPI_SPCR_TABLE 3062 static void qpdf2400_erratum44_workaround(struct device *dev, 3063 struct uart_amba_port *uap) 3064 { 3065 if (!qdf2400_e44_present) 3066 return; 3067 3068 dev_info(dev, "working around QDF2400 SoC erratum 44\n"); 3069 uap->vendor = &vendor_qdt_qdf2400_e44; 3070 } 3071 #else 3072 static void qpdf2400_erratum44_workaround(struct device *dev, 3073 struct uart_amba_port *uap) 3074 { /* empty */ } 3075 #endif 3076 3077 static int sbsa_uart_probe(struct platform_device *pdev) 3078 { 3079 struct uart_amba_port *uap; 3080 struct resource *r; 3081 int portnr, ret; 3082 int baudrate; 3083 3084 /* 3085 * Check the mandatory baud rate parameter in the DT node early 3086 * so that we can easily exit with the error. 3087 */ 3088 if (pdev->dev.of_node) { 3089 struct device_node *np = pdev->dev.of_node; 3090 3091 ret = of_property_read_u32(np, "current-speed", &baudrate); 3092 if (ret) 3093 return ret; 3094 } else { 3095 baudrate = 115200; 3096 } 3097 3098 portnr = pl011_find_free_port(); 3099 if (portnr < 0) 3100 return portnr; 3101 3102 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port), 3103 GFP_KERNEL); 3104 if (!uap) 3105 return -ENOMEM; 3106 3107 ret = platform_get_irq(pdev, 0); 3108 if (ret < 0) 3109 return ret; 3110 uap->port.irq = ret; 3111 3112 uap->vendor = &vendor_sbsa; 3113 qpdf2400_erratum44_workaround(&pdev->dev, uap); 3114 3115 uap->reg_offset = uap->vendor->reg_offset; 3116 uap->fifosize = 32; 3117 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; 3118 uap->port.ops = &sbsa_uart_pops; 3119 uap->fixed_baud = baudrate; 3120 3121 snprintf(uap->type, sizeof(uap->type), "SBSA"); 3122 3123 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3124 3125 ret = pl011_setup_port(&pdev->dev, uap, r, portnr); 3126 if (ret) 3127 return ret; 3128 3129 platform_set_drvdata(pdev, uap); 3130 3131 return pl011_register_port(uap); 3132 } 3133 3134 static void sbsa_uart_remove(struct platform_device *pdev) 3135 { 3136 struct uart_amba_port *uap = platform_get_drvdata(pdev); 3137 3138 uart_remove_one_port(&amba_reg, &uap->port); 3139 pl011_unregister_port(uap); 3140 } 3141 3142 static const struct of_device_id sbsa_uart_of_match[] = { 3143 { .compatible = "arm,sbsa-uart", }, 3144 {}, 3145 }; 3146 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); 3147 3148 static const struct acpi_device_id sbsa_uart_acpi_match[] = { 3149 { "ARMH0011", 0 }, 3150 { "ARMHB000", 0 }, 3151 {}, 3152 }; 3153 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); 3154 3155 static struct platform_driver arm_sbsa_uart_platform_driver = { 3156 .probe = sbsa_uart_probe, 3157 .remove = sbsa_uart_remove, 3158 .driver = { 3159 .name = "sbsa-uart", 3160 .pm = &pl011_dev_pm_ops, 3161 .of_match_table = sbsa_uart_of_match, 3162 .acpi_match_table = sbsa_uart_acpi_match, 3163 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), 3164 }, 3165 }; 3166 3167 static const struct amba_id pl011_ids[] = { 3168 { 3169 .id = 0x00041011, 3170 .mask = 0x000fffff, 3171 .data = &vendor_arm, 3172 }, 3173 { 3174 .id = 0x00380802, 3175 .mask = 0x00ffffff, 3176 .data = &vendor_st, 3177 }, 3178 { 3179 .id = 0x0006b011, 3180 .mask = 0x000fffff, 3181 .data = &vendor_nvidia, 3182 }, 3183 { 0, 0 }, 3184 }; 3185 3186 MODULE_DEVICE_TABLE(amba, pl011_ids); 3187 3188 static struct amba_driver pl011_driver = { 3189 .drv = { 3190 .name = "uart-pl011", 3191 .pm = &pl011_dev_pm_ops, 3192 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), 3193 }, 3194 .id_table = pl011_ids, 3195 .probe = pl011_probe, 3196 .remove = pl011_remove, 3197 }; 3198 3199 static int __init pl011_init(void) 3200 { 3201 pr_info("Serial: AMBA PL011 UART driver\n"); 3202 3203 if (platform_driver_register(&arm_sbsa_uart_platform_driver)) 3204 pr_warn("could not register SBSA UART platform driver\n"); 3205 return amba_driver_register(&pl011_driver); 3206 } 3207 3208 static void __exit pl011_exit(void) 3209 { 3210 platform_driver_unregister(&arm_sbsa_uart_platform_driver); 3211 amba_driver_unregister(&pl011_driver); 3212 } 3213 3214 /* 3215 * While this can be a module, if builtin it's most likely the console 3216 * So let's leave module_exit but move module_init to an earlier place 3217 */ 3218 arch_initcall(pl011_init); 3219 module_exit(pl011_exit); 3220 3221 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 3222 MODULE_DESCRIPTION("ARM AMBA serial port driver"); 3223 MODULE_LICENSE("GPL"); 3224