1 /* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * This is a generic driver for ARM AMBA-type serial ports. They 25 * have a lot of 16550-like features, but are not register compatible. 26 * Note that although they do have CTS, DCD and DSR inputs, they do 27 * not have an RI input, nor do they have DTR or RTS outputs. If 28 * required, these have to be supplied via some other means (eg, GPIO) 29 * and hooked into this driver. 30 */ 31 32 33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 34 #define SUPPORT_SYSRQ 35 #endif 36 37 #include <linux/module.h> 38 #include <linux/ioport.h> 39 #include <linux/init.h> 40 #include <linux/console.h> 41 #include <linux/sysrq.h> 42 #include <linux/device.h> 43 #include <linux/tty.h> 44 #include <linux/tty_flip.h> 45 #include <linux/serial_core.h> 46 #include <linux/serial.h> 47 #include <linux/amba/bus.h> 48 #include <linux/amba/serial.h> 49 #include <linux/clk.h> 50 #include <linux/slab.h> 51 #include <linux/dmaengine.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/scatterlist.h> 54 #include <linux/delay.h> 55 #include <linux/types.h> 56 #include <linux/of.h> 57 #include <linux/of_device.h> 58 #include <linux/pinctrl/consumer.h> 59 #include <linux/sizes.h> 60 #include <linux/io.h> 61 #include <linux/acpi.h> 62 63 #include "amba-pl011.h" 64 65 #define UART_NR 14 66 67 #define SERIAL_AMBA_MAJOR 204 68 #define SERIAL_AMBA_MINOR 64 69 #define SERIAL_AMBA_NR UART_NR 70 71 #define AMBA_ISR_PASS_LIMIT 256 72 73 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) 74 #define UART_DUMMY_DR_RX (1 << 16) 75 76 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = { 77 [REG_DR] = UART01x_DR, 78 [REG_FR] = UART01x_FR, 79 [REG_LCRH_RX] = UART011_LCRH, 80 [REG_LCRH_TX] = UART011_LCRH, 81 [REG_IBRD] = UART011_IBRD, 82 [REG_FBRD] = UART011_FBRD, 83 [REG_CR] = UART011_CR, 84 [REG_IFLS] = UART011_IFLS, 85 [REG_IMSC] = UART011_IMSC, 86 [REG_RIS] = UART011_RIS, 87 [REG_MIS] = UART011_MIS, 88 [REG_ICR] = UART011_ICR, 89 [REG_DMACR] = UART011_DMACR, 90 }; 91 92 /* There is by now at least one vendor with differing details, so handle it */ 93 struct vendor_data { 94 const u16 *reg_offset; 95 unsigned int ifls; 96 unsigned int fr_busy; 97 unsigned int fr_dsr; 98 unsigned int fr_cts; 99 unsigned int fr_ri; 100 bool access_32b; 101 bool oversampling; 102 bool dma_threshold; 103 bool cts_event_workaround; 104 bool always_enabled; 105 bool fixed_options; 106 107 unsigned int (*get_fifosize)(struct amba_device *dev); 108 }; 109 110 static unsigned int get_fifosize_arm(struct amba_device *dev) 111 { 112 return amba_rev(dev) < 3 ? 16 : 32; 113 } 114 115 static struct vendor_data vendor_arm = { 116 .reg_offset = pl011_std_offsets, 117 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, 118 .fr_busy = UART01x_FR_BUSY, 119 .fr_dsr = UART01x_FR_DSR, 120 .fr_cts = UART01x_FR_CTS, 121 .fr_ri = UART011_FR_RI, 122 .oversampling = false, 123 .dma_threshold = false, 124 .cts_event_workaround = false, 125 .always_enabled = false, 126 .fixed_options = false, 127 .get_fifosize = get_fifosize_arm, 128 }; 129 130 static struct vendor_data vendor_sbsa = { 131 .reg_offset = pl011_std_offsets, 132 .fr_busy = UART01x_FR_BUSY, 133 .fr_dsr = UART01x_FR_DSR, 134 .fr_cts = UART01x_FR_CTS, 135 .fr_ri = UART011_FR_RI, 136 .access_32b = true, 137 .oversampling = false, 138 .dma_threshold = false, 139 .cts_event_workaround = false, 140 .always_enabled = true, 141 .fixed_options = true, 142 }; 143 144 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { 145 [REG_DR] = UART01x_DR, 146 [REG_ST_DMAWM] = ST_UART011_DMAWM, 147 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT, 148 [REG_FR] = UART01x_FR, 149 [REG_LCRH_RX] = ST_UART011_LCRH_RX, 150 [REG_LCRH_TX] = ST_UART011_LCRH_TX, 151 [REG_IBRD] = UART011_IBRD, 152 [REG_FBRD] = UART011_FBRD, 153 [REG_CR] = UART011_CR, 154 [REG_IFLS] = UART011_IFLS, 155 [REG_IMSC] = UART011_IMSC, 156 [REG_RIS] = UART011_RIS, 157 [REG_MIS] = UART011_MIS, 158 [REG_ICR] = UART011_ICR, 159 [REG_DMACR] = UART011_DMACR, 160 [REG_ST_XFCR] = ST_UART011_XFCR, 161 [REG_ST_XON1] = ST_UART011_XON1, 162 [REG_ST_XON2] = ST_UART011_XON2, 163 [REG_ST_XOFF1] = ST_UART011_XOFF1, 164 [REG_ST_XOFF2] = ST_UART011_XOFF2, 165 [REG_ST_ITCR] = ST_UART011_ITCR, 166 [REG_ST_ITIP] = ST_UART011_ITIP, 167 [REG_ST_ABCR] = ST_UART011_ABCR, 168 [REG_ST_ABIMSC] = ST_UART011_ABIMSC, 169 }; 170 171 static unsigned int get_fifosize_st(struct amba_device *dev) 172 { 173 return 64; 174 } 175 176 static struct vendor_data vendor_st = { 177 .reg_offset = pl011_st_offsets, 178 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, 179 .fr_busy = UART01x_FR_BUSY, 180 .fr_dsr = UART01x_FR_DSR, 181 .fr_cts = UART01x_FR_CTS, 182 .fr_ri = UART011_FR_RI, 183 .oversampling = true, 184 .dma_threshold = true, 185 .cts_event_workaround = true, 186 .always_enabled = false, 187 .fixed_options = false, 188 .get_fifosize = get_fifosize_st, 189 }; 190 191 static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = { 192 [REG_DR] = ZX_UART011_DR, 193 [REG_FR] = ZX_UART011_FR, 194 [REG_LCRH_RX] = ZX_UART011_LCRH, 195 [REG_LCRH_TX] = ZX_UART011_LCRH, 196 [REG_IBRD] = ZX_UART011_IBRD, 197 [REG_FBRD] = ZX_UART011_FBRD, 198 [REG_CR] = ZX_UART011_CR, 199 [REG_IFLS] = ZX_UART011_IFLS, 200 [REG_IMSC] = ZX_UART011_IMSC, 201 [REG_RIS] = ZX_UART011_RIS, 202 [REG_MIS] = ZX_UART011_MIS, 203 [REG_ICR] = ZX_UART011_ICR, 204 [REG_DMACR] = ZX_UART011_DMACR, 205 }; 206 207 static struct vendor_data vendor_zte __maybe_unused = { 208 .reg_offset = pl011_zte_offsets, 209 .access_32b = true, 210 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, 211 .fr_busy = ZX_UART01x_FR_BUSY, 212 .fr_dsr = ZX_UART01x_FR_DSR, 213 .fr_cts = ZX_UART01x_FR_CTS, 214 .fr_ri = ZX_UART011_FR_RI, 215 .get_fifosize = get_fifosize_arm, 216 }; 217 218 /* Deals with DMA transactions */ 219 220 struct pl011_sgbuf { 221 struct scatterlist sg; 222 char *buf; 223 }; 224 225 struct pl011_dmarx_data { 226 struct dma_chan *chan; 227 struct completion complete; 228 bool use_buf_b; 229 struct pl011_sgbuf sgbuf_a; 230 struct pl011_sgbuf sgbuf_b; 231 dma_cookie_t cookie; 232 bool running; 233 struct timer_list timer; 234 unsigned int last_residue; 235 unsigned long last_jiffies; 236 bool auto_poll_rate; 237 unsigned int poll_rate; 238 unsigned int poll_timeout; 239 }; 240 241 struct pl011_dmatx_data { 242 struct dma_chan *chan; 243 struct scatterlist sg; 244 char *buf; 245 bool queued; 246 }; 247 248 /* 249 * We wrap our port structure around the generic uart_port. 250 */ 251 struct uart_amba_port { 252 struct uart_port port; 253 const u16 *reg_offset; 254 struct clk *clk; 255 const struct vendor_data *vendor; 256 unsigned int dmacr; /* dma control reg */ 257 unsigned int im; /* interrupt mask */ 258 unsigned int old_status; 259 unsigned int fifosize; /* vendor-specific */ 260 unsigned int old_cr; /* state during shutdown */ 261 bool autorts; 262 unsigned int fixed_baud; /* vendor-set fixed baud rate */ 263 char type[12]; 264 #ifdef CONFIG_DMA_ENGINE 265 /* DMA stuff */ 266 bool using_tx_dma; 267 bool using_rx_dma; 268 struct pl011_dmarx_data dmarx; 269 struct pl011_dmatx_data dmatx; 270 bool dma_probed; 271 #endif 272 }; 273 274 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap, 275 unsigned int reg) 276 { 277 return uap->reg_offset[reg]; 278 } 279 280 static unsigned int pl011_read(const struct uart_amba_port *uap, 281 unsigned int reg) 282 { 283 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); 284 285 return (uap->port.iotype == UPIO_MEM32) ? 286 readl_relaxed(addr) : readw_relaxed(addr); 287 } 288 289 static void pl011_write(unsigned int val, const struct uart_amba_port *uap, 290 unsigned int reg) 291 { 292 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); 293 294 if (uap->port.iotype == UPIO_MEM32) 295 writel_relaxed(val, addr); 296 else 297 writew_relaxed(val, addr); 298 } 299 300 /* 301 * Reads up to 256 characters from the FIFO or until it's empty and 302 * inserts them into the TTY layer. Returns the number of characters 303 * read from the FIFO. 304 */ 305 static int pl011_fifo_to_tty(struct uart_amba_port *uap) 306 { 307 u16 status; 308 unsigned int ch, flag, max_count = 256; 309 int fifotaken = 0; 310 311 while (max_count--) { 312 status = pl011_read(uap, REG_FR); 313 if (status & UART01x_FR_RXFE) 314 break; 315 316 /* Take chars from the FIFO and update status */ 317 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX; 318 flag = TTY_NORMAL; 319 uap->port.icount.rx++; 320 fifotaken++; 321 322 if (unlikely(ch & UART_DR_ERROR)) { 323 if (ch & UART011_DR_BE) { 324 ch &= ~(UART011_DR_FE | UART011_DR_PE); 325 uap->port.icount.brk++; 326 if (uart_handle_break(&uap->port)) 327 continue; 328 } else if (ch & UART011_DR_PE) 329 uap->port.icount.parity++; 330 else if (ch & UART011_DR_FE) 331 uap->port.icount.frame++; 332 if (ch & UART011_DR_OE) 333 uap->port.icount.overrun++; 334 335 ch &= uap->port.read_status_mask; 336 337 if (ch & UART011_DR_BE) 338 flag = TTY_BREAK; 339 else if (ch & UART011_DR_PE) 340 flag = TTY_PARITY; 341 else if (ch & UART011_DR_FE) 342 flag = TTY_FRAME; 343 } 344 345 if (uart_handle_sysrq_char(&uap->port, ch & 255)) 346 continue; 347 348 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); 349 } 350 351 return fifotaken; 352 } 353 354 355 /* 356 * All the DMA operation mode stuff goes inside this ifdef. 357 * This assumes that you have a generic DMA device interface, 358 * no custom DMA interfaces are supported. 359 */ 360 #ifdef CONFIG_DMA_ENGINE 361 362 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE 363 364 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 365 enum dma_data_direction dir) 366 { 367 dma_addr_t dma_addr; 368 369 sg->buf = dma_alloc_coherent(chan->device->dev, 370 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL); 371 if (!sg->buf) 372 return -ENOMEM; 373 374 sg_init_table(&sg->sg, 1); 375 sg_set_page(&sg->sg, phys_to_page(dma_addr), 376 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); 377 sg_dma_address(&sg->sg) = dma_addr; 378 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE; 379 380 return 0; 381 } 382 383 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, 384 enum dma_data_direction dir) 385 { 386 if (sg->buf) { 387 dma_free_coherent(chan->device->dev, 388 PL011_DMA_BUFFER_SIZE, sg->buf, 389 sg_dma_address(&sg->sg)); 390 } 391 } 392 393 static void pl011_dma_probe(struct uart_amba_port *uap) 394 { 395 /* DMA is the sole user of the platform data right now */ 396 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); 397 struct device *dev = uap->port.dev; 398 struct dma_slave_config tx_conf = { 399 .dst_addr = uap->port.mapbase + 400 pl011_reg_to_offset(uap, REG_DR), 401 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 402 .direction = DMA_MEM_TO_DEV, 403 .dst_maxburst = uap->fifosize >> 1, 404 .device_fc = false, 405 }; 406 struct dma_chan *chan; 407 dma_cap_mask_t mask; 408 409 uap->dma_probed = true; 410 chan = dma_request_slave_channel_reason(dev, "tx"); 411 if (IS_ERR(chan)) { 412 if (PTR_ERR(chan) == -EPROBE_DEFER) { 413 uap->dma_probed = false; 414 return; 415 } 416 417 /* We need platform data */ 418 if (!plat || !plat->dma_filter) { 419 dev_info(uap->port.dev, "no DMA platform data\n"); 420 return; 421 } 422 423 /* Try to acquire a generic DMA engine slave TX channel */ 424 dma_cap_zero(mask); 425 dma_cap_set(DMA_SLAVE, mask); 426 427 chan = dma_request_channel(mask, plat->dma_filter, 428 plat->dma_tx_param); 429 if (!chan) { 430 dev_err(uap->port.dev, "no TX DMA channel!\n"); 431 return; 432 } 433 } 434 435 dmaengine_slave_config(chan, &tx_conf); 436 uap->dmatx.chan = chan; 437 438 dev_info(uap->port.dev, "DMA channel TX %s\n", 439 dma_chan_name(uap->dmatx.chan)); 440 441 /* Optionally make use of an RX channel as well */ 442 chan = dma_request_slave_channel(dev, "rx"); 443 444 if (!chan && plat && plat->dma_rx_param) { 445 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 446 447 if (!chan) { 448 dev_err(uap->port.dev, "no RX DMA channel!\n"); 449 return; 450 } 451 } 452 453 if (chan) { 454 struct dma_slave_config rx_conf = { 455 .src_addr = uap->port.mapbase + 456 pl011_reg_to_offset(uap, REG_DR), 457 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 458 .direction = DMA_DEV_TO_MEM, 459 .src_maxburst = uap->fifosize >> 2, 460 .device_fc = false, 461 }; 462 struct dma_slave_caps caps; 463 464 /* 465 * Some DMA controllers provide information on their capabilities. 466 * If the controller does, check for suitable residue processing 467 * otherwise assime all is well. 468 */ 469 if (0 == dma_get_slave_caps(chan, &caps)) { 470 if (caps.residue_granularity == 471 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { 472 dma_release_channel(chan); 473 dev_info(uap->port.dev, 474 "RX DMA disabled - no residue processing\n"); 475 return; 476 } 477 } 478 dmaengine_slave_config(chan, &rx_conf); 479 uap->dmarx.chan = chan; 480 481 uap->dmarx.auto_poll_rate = false; 482 if (plat && plat->dma_rx_poll_enable) { 483 /* Set poll rate if specified. */ 484 if (plat->dma_rx_poll_rate) { 485 uap->dmarx.auto_poll_rate = false; 486 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; 487 } else { 488 /* 489 * 100 ms defaults to poll rate if not 490 * specified. This will be adjusted with 491 * the baud rate at set_termios. 492 */ 493 uap->dmarx.auto_poll_rate = true; 494 uap->dmarx.poll_rate = 100; 495 } 496 /* 3 secs defaults poll_timeout if not specified. */ 497 if (plat->dma_rx_poll_timeout) 498 uap->dmarx.poll_timeout = 499 plat->dma_rx_poll_timeout; 500 else 501 uap->dmarx.poll_timeout = 3000; 502 } else if (!plat && dev->of_node) { 503 uap->dmarx.auto_poll_rate = of_property_read_bool( 504 dev->of_node, "auto-poll"); 505 if (uap->dmarx.auto_poll_rate) { 506 u32 x; 507 508 if (0 == of_property_read_u32(dev->of_node, 509 "poll-rate-ms", &x)) 510 uap->dmarx.poll_rate = x; 511 else 512 uap->dmarx.poll_rate = 100; 513 if (0 == of_property_read_u32(dev->of_node, 514 "poll-timeout-ms", &x)) 515 uap->dmarx.poll_timeout = x; 516 else 517 uap->dmarx.poll_timeout = 3000; 518 } 519 } 520 dev_info(uap->port.dev, "DMA channel RX %s\n", 521 dma_chan_name(uap->dmarx.chan)); 522 } 523 } 524 525 static void pl011_dma_remove(struct uart_amba_port *uap) 526 { 527 if (uap->dmatx.chan) 528 dma_release_channel(uap->dmatx.chan); 529 if (uap->dmarx.chan) 530 dma_release_channel(uap->dmarx.chan); 531 } 532 533 /* Forward declare these for the refill routine */ 534 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 535 static void pl011_start_tx_pio(struct uart_amba_port *uap); 536 537 /* 538 * The current DMA TX buffer has been sent. 539 * Try to queue up another DMA buffer. 540 */ 541 static void pl011_dma_tx_callback(void *data) 542 { 543 struct uart_amba_port *uap = data; 544 struct pl011_dmatx_data *dmatx = &uap->dmatx; 545 unsigned long flags; 546 u16 dmacr; 547 548 spin_lock_irqsave(&uap->port.lock, flags); 549 if (uap->dmatx.queued) 550 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, 551 DMA_TO_DEVICE); 552 553 dmacr = uap->dmacr; 554 uap->dmacr = dmacr & ~UART011_TXDMAE; 555 pl011_write(uap->dmacr, uap, REG_DMACR); 556 557 /* 558 * If TX DMA was disabled, it means that we've stopped the DMA for 559 * some reason (eg, XOFF received, or we want to send an X-char.) 560 * 561 * Note: we need to be careful here of a potential race between DMA 562 * and the rest of the driver - if the driver disables TX DMA while 563 * a TX buffer completing, we must update the tx queued status to 564 * get further refills (hence we check dmacr). 565 */ 566 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || 567 uart_circ_empty(&uap->port.state->xmit)) { 568 uap->dmatx.queued = false; 569 spin_unlock_irqrestore(&uap->port.lock, flags); 570 return; 571 } 572 573 if (pl011_dma_tx_refill(uap) <= 0) 574 /* 575 * We didn't queue a DMA buffer for some reason, but we 576 * have data pending to be sent. Re-enable the TX IRQ. 577 */ 578 pl011_start_tx_pio(uap); 579 580 spin_unlock_irqrestore(&uap->port.lock, flags); 581 } 582 583 /* 584 * Try to refill the TX DMA buffer. 585 * Locking: called with port lock held and IRQs disabled. 586 * Returns: 587 * 1 if we queued up a TX DMA buffer. 588 * 0 if we didn't want to handle this by DMA 589 * <0 on error 590 */ 591 static int pl011_dma_tx_refill(struct uart_amba_port *uap) 592 { 593 struct pl011_dmatx_data *dmatx = &uap->dmatx; 594 struct dma_chan *chan = dmatx->chan; 595 struct dma_device *dma_dev = chan->device; 596 struct dma_async_tx_descriptor *desc; 597 struct circ_buf *xmit = &uap->port.state->xmit; 598 unsigned int count; 599 600 /* 601 * Try to avoid the overhead involved in using DMA if the 602 * transaction fits in the first half of the FIFO, by using 603 * the standard interrupt handling. This ensures that we 604 * issue a uart_write_wakeup() at the appropriate time. 605 */ 606 count = uart_circ_chars_pending(xmit); 607 if (count < (uap->fifosize >> 1)) { 608 uap->dmatx.queued = false; 609 return 0; 610 } 611 612 /* 613 * Bodge: don't send the last character by DMA, as this 614 * will prevent XON from notifying us to restart DMA. 615 */ 616 count -= 1; 617 618 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ 619 if (count > PL011_DMA_BUFFER_SIZE) 620 count = PL011_DMA_BUFFER_SIZE; 621 622 if (xmit->tail < xmit->head) 623 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); 624 else { 625 size_t first = UART_XMIT_SIZE - xmit->tail; 626 size_t second; 627 628 if (first > count) 629 first = count; 630 second = count - first; 631 632 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); 633 if (second) 634 memcpy(&dmatx->buf[first], &xmit->buf[0], second); 635 } 636 637 dmatx->sg.length = count; 638 639 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { 640 uap->dmatx.queued = false; 641 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 642 return -EBUSY; 643 } 644 645 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, 646 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 647 if (!desc) { 648 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 649 uap->dmatx.queued = false; 650 /* 651 * If DMA cannot be used right now, we complete this 652 * transaction via IRQ and let the TTY layer retry. 653 */ 654 dev_dbg(uap->port.dev, "TX DMA busy\n"); 655 return -EBUSY; 656 } 657 658 /* Some data to go along to the callback */ 659 desc->callback = pl011_dma_tx_callback; 660 desc->callback_param = uap; 661 662 /* All errors should happen at prepare time */ 663 dmaengine_submit(desc); 664 665 /* Fire the DMA transaction */ 666 dma_dev->device_issue_pending(chan); 667 668 uap->dmacr |= UART011_TXDMAE; 669 pl011_write(uap->dmacr, uap, REG_DMACR); 670 uap->dmatx.queued = true; 671 672 /* 673 * Now we know that DMA will fire, so advance the ring buffer 674 * with the stuff we just dispatched. 675 */ 676 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 677 uap->port.icount.tx += count; 678 679 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 680 uart_write_wakeup(&uap->port); 681 682 return 1; 683 } 684 685 /* 686 * We received a transmit interrupt without a pending X-char but with 687 * pending characters. 688 * Locking: called with port lock held and IRQs disabled. 689 * Returns: 690 * false if we want to use PIO to transmit 691 * true if we queued a DMA buffer 692 */ 693 static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 694 { 695 if (!uap->using_tx_dma) 696 return false; 697 698 /* 699 * If we already have a TX buffer queued, but received a 700 * TX interrupt, it will be because we've just sent an X-char. 701 * Ensure the TX DMA is enabled and the TX IRQ is disabled. 702 */ 703 if (uap->dmatx.queued) { 704 uap->dmacr |= UART011_TXDMAE; 705 pl011_write(uap->dmacr, uap, REG_DMACR); 706 uap->im &= ~UART011_TXIM; 707 pl011_write(uap->im, uap, REG_IMSC); 708 return true; 709 } 710 711 /* 712 * We don't have a TX buffer queued, so try to queue one. 713 * If we successfully queued a buffer, mask the TX IRQ. 714 */ 715 if (pl011_dma_tx_refill(uap) > 0) { 716 uap->im &= ~UART011_TXIM; 717 pl011_write(uap->im, uap, REG_IMSC); 718 return true; 719 } 720 return false; 721 } 722 723 /* 724 * Stop the DMA transmit (eg, due to received XOFF). 725 * Locking: called with port lock held and IRQs disabled. 726 */ 727 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 728 { 729 if (uap->dmatx.queued) { 730 uap->dmacr &= ~UART011_TXDMAE; 731 pl011_write(uap->dmacr, uap, REG_DMACR); 732 } 733 } 734 735 /* 736 * Try to start a DMA transmit, or in the case of an XON/OFF 737 * character queued for send, try to get that character out ASAP. 738 * Locking: called with port lock held and IRQs disabled. 739 * Returns: 740 * false if we want the TX IRQ to be enabled 741 * true if we have a buffer queued 742 */ 743 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 744 { 745 u16 dmacr; 746 747 if (!uap->using_tx_dma) 748 return false; 749 750 if (!uap->port.x_char) { 751 /* no X-char, try to push chars out in DMA mode */ 752 bool ret = true; 753 754 if (!uap->dmatx.queued) { 755 if (pl011_dma_tx_refill(uap) > 0) { 756 uap->im &= ~UART011_TXIM; 757 pl011_write(uap->im, uap, REG_IMSC); 758 } else 759 ret = false; 760 } else if (!(uap->dmacr & UART011_TXDMAE)) { 761 uap->dmacr |= UART011_TXDMAE; 762 pl011_write(uap->dmacr, uap, REG_DMACR); 763 } 764 return ret; 765 } 766 767 /* 768 * We have an X-char to send. Disable DMA to prevent it loading 769 * the TX fifo, and then see if we can stuff it into the FIFO. 770 */ 771 dmacr = uap->dmacr; 772 uap->dmacr &= ~UART011_TXDMAE; 773 pl011_write(uap->dmacr, uap, REG_DMACR); 774 775 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) { 776 /* 777 * No space in the FIFO, so enable the transmit interrupt 778 * so we know when there is space. Note that once we've 779 * loaded the character, we should just re-enable DMA. 780 */ 781 return false; 782 } 783 784 pl011_write(uap->port.x_char, uap, REG_DR); 785 uap->port.icount.tx++; 786 uap->port.x_char = 0; 787 788 /* Success - restore the DMA state */ 789 uap->dmacr = dmacr; 790 pl011_write(dmacr, uap, REG_DMACR); 791 792 return true; 793 } 794 795 /* 796 * Flush the transmit buffer. 797 * Locking: called with port lock held and IRQs disabled. 798 */ 799 static void pl011_dma_flush_buffer(struct uart_port *port) 800 __releases(&uap->port.lock) 801 __acquires(&uap->port.lock) 802 { 803 struct uart_amba_port *uap = 804 container_of(port, struct uart_amba_port, port); 805 806 if (!uap->using_tx_dma) 807 return; 808 809 /* Avoid deadlock with the DMA engine callback */ 810 spin_unlock(&uap->port.lock); 811 dmaengine_terminate_all(uap->dmatx.chan); 812 spin_lock(&uap->port.lock); 813 if (uap->dmatx.queued) { 814 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 815 DMA_TO_DEVICE); 816 uap->dmatx.queued = false; 817 uap->dmacr &= ~UART011_TXDMAE; 818 pl011_write(uap->dmacr, uap, REG_DMACR); 819 } 820 } 821 822 static void pl011_dma_rx_callback(void *data); 823 824 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 825 { 826 struct dma_chan *rxchan = uap->dmarx.chan; 827 struct pl011_dmarx_data *dmarx = &uap->dmarx; 828 struct dma_async_tx_descriptor *desc; 829 struct pl011_sgbuf *sgbuf; 830 831 if (!rxchan) 832 return -EIO; 833 834 /* Start the RX DMA job */ 835 sgbuf = uap->dmarx.use_buf_b ? 836 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 837 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, 838 DMA_DEV_TO_MEM, 839 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 840 /* 841 * If the DMA engine is busy and cannot prepare a 842 * channel, no big deal, the driver will fall back 843 * to interrupt mode as a result of this error code. 844 */ 845 if (!desc) { 846 uap->dmarx.running = false; 847 dmaengine_terminate_all(rxchan); 848 return -EBUSY; 849 } 850 851 /* Some data to go along to the callback */ 852 desc->callback = pl011_dma_rx_callback; 853 desc->callback_param = uap; 854 dmarx->cookie = dmaengine_submit(desc); 855 dma_async_issue_pending(rxchan); 856 857 uap->dmacr |= UART011_RXDMAE; 858 pl011_write(uap->dmacr, uap, REG_DMACR); 859 uap->dmarx.running = true; 860 861 uap->im &= ~UART011_RXIM; 862 pl011_write(uap->im, uap, REG_IMSC); 863 864 return 0; 865 } 866 867 /* 868 * This is called when either the DMA job is complete, or 869 * the FIFO timeout interrupt occurred. This must be called 870 * with the port spinlock uap->port.lock held. 871 */ 872 static void pl011_dma_rx_chars(struct uart_amba_port *uap, 873 u32 pending, bool use_buf_b, 874 bool readfifo) 875 { 876 struct tty_port *port = &uap->port.state->port; 877 struct pl011_sgbuf *sgbuf = use_buf_b ? 878 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 879 int dma_count = 0; 880 u32 fifotaken = 0; /* only used for vdbg() */ 881 882 struct pl011_dmarx_data *dmarx = &uap->dmarx; 883 int dmataken = 0; 884 885 if (uap->dmarx.poll_rate) { 886 /* The data can be taken by polling */ 887 dmataken = sgbuf->sg.length - dmarx->last_residue; 888 /* Recalculate the pending size */ 889 if (pending >= dmataken) 890 pending -= dmataken; 891 } 892 893 /* Pick the remain data from the DMA */ 894 if (pending) { 895 896 /* 897 * First take all chars in the DMA pipe, then look in the FIFO. 898 * Note that tty_insert_flip_buf() tries to take as many chars 899 * as it can. 900 */ 901 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 902 pending); 903 904 uap->port.icount.rx += dma_count; 905 if (dma_count < pending) 906 dev_warn(uap->port.dev, 907 "couldn't insert all characters (TTY is full?)\n"); 908 } 909 910 /* Reset the last_residue for Rx DMA poll */ 911 if (uap->dmarx.poll_rate) 912 dmarx->last_residue = sgbuf->sg.length; 913 914 /* 915 * Only continue with trying to read the FIFO if all DMA chars have 916 * been taken first. 917 */ 918 if (dma_count == pending && readfifo) { 919 /* Clear any error flags */ 920 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | 921 UART011_FEIS, uap, REG_ICR); 922 923 /* 924 * If we read all the DMA'd characters, and we had an 925 * incomplete buffer, that could be due to an rx error, or 926 * maybe we just timed out. Read any pending chars and check 927 * the error status. 928 * 929 * Error conditions will only occur in the FIFO, these will 930 * trigger an immediate interrupt and stop the DMA job, so we 931 * will always find the error in the FIFO, never in the DMA 932 * buffer. 933 */ 934 fifotaken = pl011_fifo_to_tty(uap); 935 } 936 937 spin_unlock(&uap->port.lock); 938 dev_vdbg(uap->port.dev, 939 "Took %d chars from DMA buffer and %d chars from the FIFO\n", 940 dma_count, fifotaken); 941 tty_flip_buffer_push(port); 942 spin_lock(&uap->port.lock); 943 } 944 945 static void pl011_dma_rx_irq(struct uart_amba_port *uap) 946 { 947 struct pl011_dmarx_data *dmarx = &uap->dmarx; 948 struct dma_chan *rxchan = dmarx->chan; 949 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 950 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 951 size_t pending; 952 struct dma_tx_state state; 953 enum dma_status dmastat; 954 955 /* 956 * Pause the transfer so we can trust the current counter, 957 * do this before we pause the PL011 block, else we may 958 * overflow the FIFO. 959 */ 960 if (dmaengine_pause(rxchan)) 961 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 962 dmastat = rxchan->device->device_tx_status(rxchan, 963 dmarx->cookie, &state); 964 if (dmastat != DMA_PAUSED) 965 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 966 967 /* Disable RX DMA - incoming data will wait in the FIFO */ 968 uap->dmacr &= ~UART011_RXDMAE; 969 pl011_write(uap->dmacr, uap, REG_DMACR); 970 uap->dmarx.running = false; 971 972 pending = sgbuf->sg.length - state.residue; 973 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 974 /* Then we terminate the transfer - we now know our residue */ 975 dmaengine_terminate_all(rxchan); 976 977 /* 978 * This will take the chars we have so far and insert 979 * into the framework. 980 */ 981 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); 982 983 /* Switch buffer & re-trigger DMA job */ 984 dmarx->use_buf_b = !dmarx->use_buf_b; 985 if (pl011_dma_rx_trigger_dma(uap)) { 986 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 987 "fall back to interrupt mode\n"); 988 uap->im |= UART011_RXIM; 989 pl011_write(uap->im, uap, REG_IMSC); 990 } 991 } 992 993 static void pl011_dma_rx_callback(void *data) 994 { 995 struct uart_amba_port *uap = data; 996 struct pl011_dmarx_data *dmarx = &uap->dmarx; 997 struct dma_chan *rxchan = dmarx->chan; 998 bool lastbuf = dmarx->use_buf_b; 999 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 1000 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 1001 size_t pending; 1002 struct dma_tx_state state; 1003 int ret; 1004 1005 /* 1006 * This completion interrupt occurs typically when the 1007 * RX buffer is totally stuffed but no timeout has yet 1008 * occurred. When that happens, we just want the RX 1009 * routine to flush out the secondary DMA buffer while 1010 * we immediately trigger the next DMA job. 1011 */ 1012 spin_lock_irq(&uap->port.lock); 1013 /* 1014 * Rx data can be taken by the UART interrupts during 1015 * the DMA irq handler. So we check the residue here. 1016 */ 1017 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 1018 pending = sgbuf->sg.length - state.residue; 1019 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 1020 /* Then we terminate the transfer - we now know our residue */ 1021 dmaengine_terminate_all(rxchan); 1022 1023 uap->dmarx.running = false; 1024 dmarx->use_buf_b = !lastbuf; 1025 ret = pl011_dma_rx_trigger_dma(uap); 1026 1027 pl011_dma_rx_chars(uap, pending, lastbuf, false); 1028 spin_unlock_irq(&uap->port.lock); 1029 /* 1030 * Do this check after we picked the DMA chars so we don't 1031 * get some IRQ immediately from RX. 1032 */ 1033 if (ret) { 1034 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 1035 "fall back to interrupt mode\n"); 1036 uap->im |= UART011_RXIM; 1037 pl011_write(uap->im, uap, REG_IMSC); 1038 } 1039 } 1040 1041 /* 1042 * Stop accepting received characters, when we're shutting down or 1043 * suspending this port. 1044 * Locking: called with port lock held and IRQs disabled. 1045 */ 1046 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1047 { 1048 /* FIXME. Just disable the DMA enable */ 1049 uap->dmacr &= ~UART011_RXDMAE; 1050 pl011_write(uap->dmacr, uap, REG_DMACR); 1051 } 1052 1053 /* 1054 * Timer handler for Rx DMA polling. 1055 * Every polling, It checks the residue in the dma buffer and transfer 1056 * data to the tty. Also, last_residue is updated for the next polling. 1057 */ 1058 static void pl011_dma_rx_poll(unsigned long args) 1059 { 1060 struct uart_amba_port *uap = (struct uart_amba_port *)args; 1061 struct tty_port *port = &uap->port.state->port; 1062 struct pl011_dmarx_data *dmarx = &uap->dmarx; 1063 struct dma_chan *rxchan = uap->dmarx.chan; 1064 unsigned long flags = 0; 1065 unsigned int dmataken = 0; 1066 unsigned int size = 0; 1067 struct pl011_sgbuf *sgbuf; 1068 int dma_count; 1069 struct dma_tx_state state; 1070 1071 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 1072 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 1073 if (likely(state.residue < dmarx->last_residue)) { 1074 dmataken = sgbuf->sg.length - dmarx->last_residue; 1075 size = dmarx->last_residue - state.residue; 1076 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 1077 size); 1078 if (dma_count == size) 1079 dmarx->last_residue = state.residue; 1080 dmarx->last_jiffies = jiffies; 1081 } 1082 tty_flip_buffer_push(port); 1083 1084 /* 1085 * If no data is received in poll_timeout, the driver will fall back 1086 * to interrupt mode. We will retrigger DMA at the first interrupt. 1087 */ 1088 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) 1089 > uap->dmarx.poll_timeout) { 1090 1091 spin_lock_irqsave(&uap->port.lock, flags); 1092 pl011_dma_rx_stop(uap); 1093 uap->im |= UART011_RXIM; 1094 pl011_write(uap->im, uap, REG_IMSC); 1095 spin_unlock_irqrestore(&uap->port.lock, flags); 1096 1097 uap->dmarx.running = false; 1098 dmaengine_terminate_all(rxchan); 1099 del_timer(&uap->dmarx.timer); 1100 } else { 1101 mod_timer(&uap->dmarx.timer, 1102 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 1103 } 1104 } 1105 1106 static void pl011_dma_startup(struct uart_amba_port *uap) 1107 { 1108 int ret; 1109 1110 if (!uap->dma_probed) 1111 pl011_dma_probe(uap); 1112 1113 if (!uap->dmatx.chan) 1114 return; 1115 1116 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); 1117 if (!uap->dmatx.buf) { 1118 dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); 1119 uap->port.fifosize = uap->fifosize; 1120 return; 1121 } 1122 1123 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); 1124 1125 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 1126 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 1127 uap->using_tx_dma = true; 1128 1129 if (!uap->dmarx.chan) 1130 goto skip_rx; 1131 1132 /* Allocate and map DMA RX buffers */ 1133 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 1134 DMA_FROM_DEVICE); 1135 if (ret) { 1136 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1137 "RX buffer A", ret); 1138 goto skip_rx; 1139 } 1140 1141 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, 1142 DMA_FROM_DEVICE); 1143 if (ret) { 1144 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1145 "RX buffer B", ret); 1146 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 1147 DMA_FROM_DEVICE); 1148 goto skip_rx; 1149 } 1150 1151 uap->using_rx_dma = true; 1152 1153 skip_rx: 1154 /* Turn on DMA error (RX/TX will be enabled on demand) */ 1155 uap->dmacr |= UART011_DMAONERR; 1156 pl011_write(uap->dmacr, uap, REG_DMACR); 1157 1158 /* 1159 * ST Micro variants has some specific dma burst threshold 1160 * compensation. Set this to 16 bytes, so burst will only 1161 * be issued above/below 16 bytes. 1162 */ 1163 if (uap->vendor->dma_threshold) 1164 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 1165 uap, REG_ST_DMAWM); 1166 1167 if (uap->using_rx_dma) { 1168 if (pl011_dma_rx_trigger_dma(uap)) 1169 dev_dbg(uap->port.dev, "could not trigger initial " 1170 "RX DMA job, fall back to interrupt mode\n"); 1171 if (uap->dmarx.poll_rate) { 1172 init_timer(&(uap->dmarx.timer)); 1173 uap->dmarx.timer.function = pl011_dma_rx_poll; 1174 uap->dmarx.timer.data = (unsigned long)uap; 1175 mod_timer(&uap->dmarx.timer, 1176 jiffies + 1177 msecs_to_jiffies(uap->dmarx.poll_rate)); 1178 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1179 uap->dmarx.last_jiffies = jiffies; 1180 } 1181 } 1182 } 1183 1184 static void pl011_dma_shutdown(struct uart_amba_port *uap) 1185 { 1186 if (!(uap->using_tx_dma || uap->using_rx_dma)) 1187 return; 1188 1189 /* Disable RX and TX DMA */ 1190 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) 1191 cpu_relax(); 1192 1193 spin_lock_irq(&uap->port.lock); 1194 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); 1195 pl011_write(uap->dmacr, uap, REG_DMACR); 1196 spin_unlock_irq(&uap->port.lock); 1197 1198 if (uap->using_tx_dma) { 1199 /* In theory, this should already be done by pl011_dma_flush_buffer */ 1200 dmaengine_terminate_all(uap->dmatx.chan); 1201 if (uap->dmatx.queued) { 1202 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 1203 DMA_TO_DEVICE); 1204 uap->dmatx.queued = false; 1205 } 1206 1207 kfree(uap->dmatx.buf); 1208 uap->using_tx_dma = false; 1209 } 1210 1211 if (uap->using_rx_dma) { 1212 dmaengine_terminate_all(uap->dmarx.chan); 1213 /* Clean up the RX DMA */ 1214 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 1215 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); 1216 if (uap->dmarx.poll_rate) 1217 del_timer_sync(&uap->dmarx.timer); 1218 uap->using_rx_dma = false; 1219 } 1220 } 1221 1222 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1223 { 1224 return uap->using_rx_dma; 1225 } 1226 1227 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1228 { 1229 return uap->using_rx_dma && uap->dmarx.running; 1230 } 1231 1232 #else 1233 /* Blank functions if the DMA engine is not available */ 1234 static inline void pl011_dma_probe(struct uart_amba_port *uap) 1235 { 1236 } 1237 1238 static inline void pl011_dma_remove(struct uart_amba_port *uap) 1239 { 1240 } 1241 1242 static inline void pl011_dma_startup(struct uart_amba_port *uap) 1243 { 1244 } 1245 1246 static inline void pl011_dma_shutdown(struct uart_amba_port *uap) 1247 { 1248 } 1249 1250 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) 1251 { 1252 return false; 1253 } 1254 1255 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 1256 { 1257 } 1258 1259 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 1260 { 1261 return false; 1262 } 1263 1264 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) 1265 { 1266 } 1267 1268 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1269 { 1270 } 1271 1272 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 1273 { 1274 return -EIO; 1275 } 1276 1277 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1278 { 1279 return false; 1280 } 1281 1282 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1283 { 1284 return false; 1285 } 1286 1287 #define pl011_dma_flush_buffer NULL 1288 #endif 1289 1290 static void pl011_stop_tx(struct uart_port *port) 1291 { 1292 struct uart_amba_port *uap = 1293 container_of(port, struct uart_amba_port, port); 1294 1295 uap->im &= ~UART011_TXIM; 1296 pl011_write(uap->im, uap, REG_IMSC); 1297 pl011_dma_tx_stop(uap); 1298 } 1299 1300 static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); 1301 1302 /* Start TX with programmed I/O only (no DMA) */ 1303 static void pl011_start_tx_pio(struct uart_amba_port *uap) 1304 { 1305 uap->im |= UART011_TXIM; 1306 pl011_write(uap->im, uap, REG_IMSC); 1307 pl011_tx_chars(uap, false); 1308 } 1309 1310 static void pl011_start_tx(struct uart_port *port) 1311 { 1312 struct uart_amba_port *uap = 1313 container_of(port, struct uart_amba_port, port); 1314 1315 if (!pl011_dma_tx_start(uap)) 1316 pl011_start_tx_pio(uap); 1317 } 1318 1319 static void pl011_stop_rx(struct uart_port *port) 1320 { 1321 struct uart_amba_port *uap = 1322 container_of(port, struct uart_amba_port, port); 1323 1324 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| 1325 UART011_PEIM|UART011_BEIM|UART011_OEIM); 1326 pl011_write(uap->im, uap, REG_IMSC); 1327 1328 pl011_dma_rx_stop(uap); 1329 } 1330 1331 static void pl011_enable_ms(struct uart_port *port) 1332 { 1333 struct uart_amba_port *uap = 1334 container_of(port, struct uart_amba_port, port); 1335 1336 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; 1337 pl011_write(uap->im, uap, REG_IMSC); 1338 } 1339 1340 static void pl011_rx_chars(struct uart_amba_port *uap) 1341 __releases(&uap->port.lock) 1342 __acquires(&uap->port.lock) 1343 { 1344 pl011_fifo_to_tty(uap); 1345 1346 spin_unlock(&uap->port.lock); 1347 tty_flip_buffer_push(&uap->port.state->port); 1348 /* 1349 * If we were temporarily out of DMA mode for a while, 1350 * attempt to switch back to DMA mode again. 1351 */ 1352 if (pl011_dma_rx_available(uap)) { 1353 if (pl011_dma_rx_trigger_dma(uap)) { 1354 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1355 "fall back to interrupt mode again\n"); 1356 uap->im |= UART011_RXIM; 1357 pl011_write(uap->im, uap, REG_IMSC); 1358 } else { 1359 #ifdef CONFIG_DMA_ENGINE 1360 /* Start Rx DMA poll */ 1361 if (uap->dmarx.poll_rate) { 1362 uap->dmarx.last_jiffies = jiffies; 1363 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1364 mod_timer(&uap->dmarx.timer, 1365 jiffies + 1366 msecs_to_jiffies(uap->dmarx.poll_rate)); 1367 } 1368 #endif 1369 } 1370 } 1371 spin_lock(&uap->port.lock); 1372 } 1373 1374 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, 1375 bool from_irq) 1376 { 1377 if (unlikely(!from_irq) && 1378 pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 1379 return false; /* unable to transmit character */ 1380 1381 pl011_write(c, uap, REG_DR); 1382 uap->port.icount.tx++; 1383 1384 return true; 1385 } 1386 1387 static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) 1388 { 1389 struct circ_buf *xmit = &uap->port.state->xmit; 1390 int count = uap->fifosize >> 1; 1391 1392 if (uap->port.x_char) { 1393 if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) 1394 return; 1395 uap->port.x_char = 0; 1396 --count; 1397 } 1398 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { 1399 pl011_stop_tx(&uap->port); 1400 return; 1401 } 1402 1403 /* If we are using DMA mode, try to send some characters. */ 1404 if (pl011_dma_tx_irq(uap)) 1405 return; 1406 1407 do { 1408 if (likely(from_irq) && count-- == 0) 1409 break; 1410 1411 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq)) 1412 break; 1413 1414 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 1415 } while (!uart_circ_empty(xmit)); 1416 1417 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1418 uart_write_wakeup(&uap->port); 1419 1420 if (uart_circ_empty(xmit)) 1421 pl011_stop_tx(&uap->port); 1422 } 1423 1424 static void pl011_modem_status(struct uart_amba_port *uap) 1425 { 1426 unsigned int status, delta; 1427 1428 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; 1429 1430 delta = status ^ uap->old_status; 1431 uap->old_status = status; 1432 1433 if (!delta) 1434 return; 1435 1436 if (delta & UART01x_FR_DCD) 1437 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); 1438 1439 if (delta & uap->vendor->fr_dsr) 1440 uap->port.icount.dsr++; 1441 1442 if (delta & uap->vendor->fr_cts) 1443 uart_handle_cts_change(&uap->port, 1444 status & uap->vendor->fr_cts); 1445 1446 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1447 } 1448 1449 static void check_apply_cts_event_workaround(struct uart_amba_port *uap) 1450 { 1451 unsigned int dummy_read; 1452 1453 if (!uap->vendor->cts_event_workaround) 1454 return; 1455 1456 /* workaround to make sure that all bits are unlocked.. */ 1457 pl011_write(0x00, uap, REG_ICR); 1458 1459 /* 1460 * WA: introduce 26ns(1 uart clk) delay before W1C; 1461 * single apb access will incur 2 pclk(133.12Mhz) delay, 1462 * so add 2 dummy reads 1463 */ 1464 dummy_read = pl011_read(uap, REG_ICR); 1465 dummy_read = pl011_read(uap, REG_ICR); 1466 } 1467 1468 static irqreturn_t pl011_int(int irq, void *dev_id) 1469 { 1470 struct uart_amba_port *uap = dev_id; 1471 unsigned long flags; 1472 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1473 u16 imsc; 1474 int handled = 0; 1475 1476 spin_lock_irqsave(&uap->port.lock, flags); 1477 imsc = pl011_read(uap, REG_IMSC); 1478 status = pl011_read(uap, REG_RIS) & imsc; 1479 if (status) { 1480 do { 1481 check_apply_cts_event_workaround(uap); 1482 1483 pl011_write(status & ~(UART011_TXIS|UART011_RTIS| 1484 UART011_RXIS), 1485 uap, REG_ICR); 1486 1487 if (status & (UART011_RTIS|UART011_RXIS)) { 1488 if (pl011_dma_rx_running(uap)) 1489 pl011_dma_rx_irq(uap); 1490 else 1491 pl011_rx_chars(uap); 1492 } 1493 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1494 UART011_CTSMIS|UART011_RIMIS)) 1495 pl011_modem_status(uap); 1496 if (status & UART011_TXIS) 1497 pl011_tx_chars(uap, true); 1498 1499 if (pass_counter-- == 0) 1500 break; 1501 1502 status = pl011_read(uap, REG_RIS) & imsc; 1503 } while (status != 0); 1504 handled = 1; 1505 } 1506 1507 spin_unlock_irqrestore(&uap->port.lock, flags); 1508 1509 return IRQ_RETVAL(handled); 1510 } 1511 1512 static unsigned int pl011_tx_empty(struct uart_port *port) 1513 { 1514 struct uart_amba_port *uap = 1515 container_of(port, struct uart_amba_port, port); 1516 unsigned int status = pl011_read(uap, REG_FR); 1517 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? 1518 0 : TIOCSER_TEMT; 1519 } 1520 1521 static unsigned int pl011_get_mctrl(struct uart_port *port) 1522 { 1523 struct uart_amba_port *uap = 1524 container_of(port, struct uart_amba_port, port); 1525 unsigned int result = 0; 1526 unsigned int status = pl011_read(uap, REG_FR); 1527 1528 #define TIOCMBIT(uartbit, tiocmbit) \ 1529 if (status & uartbit) \ 1530 result |= tiocmbit 1531 1532 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR); 1533 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR); 1534 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS); 1535 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG); 1536 #undef TIOCMBIT 1537 return result; 1538 } 1539 1540 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) 1541 { 1542 struct uart_amba_port *uap = 1543 container_of(port, struct uart_amba_port, port); 1544 unsigned int cr; 1545 1546 cr = pl011_read(uap, REG_CR); 1547 1548 #define TIOCMBIT(tiocmbit, uartbit) \ 1549 if (mctrl & tiocmbit) \ 1550 cr |= uartbit; \ 1551 else \ 1552 cr &= ~uartbit 1553 1554 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS); 1555 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR); 1556 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1); 1557 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2); 1558 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE); 1559 1560 if (uap->autorts) { 1561 /* We need to disable auto-RTS if we want to turn RTS off */ 1562 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN); 1563 } 1564 #undef TIOCMBIT 1565 1566 pl011_write(cr, uap, REG_CR); 1567 } 1568 1569 static void pl011_break_ctl(struct uart_port *port, int break_state) 1570 { 1571 struct uart_amba_port *uap = 1572 container_of(port, struct uart_amba_port, port); 1573 unsigned long flags; 1574 unsigned int lcr_h; 1575 1576 spin_lock_irqsave(&uap->port.lock, flags); 1577 lcr_h = pl011_read(uap, REG_LCRH_TX); 1578 if (break_state == -1) 1579 lcr_h |= UART01x_LCRH_BRK; 1580 else 1581 lcr_h &= ~UART01x_LCRH_BRK; 1582 pl011_write(lcr_h, uap, REG_LCRH_TX); 1583 spin_unlock_irqrestore(&uap->port.lock, flags); 1584 } 1585 1586 #ifdef CONFIG_CONSOLE_POLL 1587 1588 static void pl011_quiesce_irqs(struct uart_port *port) 1589 { 1590 struct uart_amba_port *uap = 1591 container_of(port, struct uart_amba_port, port); 1592 1593 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR); 1594 /* 1595 * There is no way to clear TXIM as this is "ready to transmit IRQ", so 1596 * we simply mask it. start_tx() will unmask it. 1597 * 1598 * Note we can race with start_tx(), and if the race happens, the 1599 * polling user might get another interrupt just after we clear it. 1600 * But it should be OK and can happen even w/o the race, e.g. 1601 * controller immediately got some new data and raised the IRQ. 1602 * 1603 * And whoever uses polling routines assumes that it manages the device 1604 * (including tx queue), so we're also fine with start_tx()'s caller 1605 * side. 1606 */ 1607 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap, 1608 REG_IMSC); 1609 } 1610 1611 static int pl011_get_poll_char(struct uart_port *port) 1612 { 1613 struct uart_amba_port *uap = 1614 container_of(port, struct uart_amba_port, port); 1615 unsigned int status; 1616 1617 /* 1618 * The caller might need IRQs lowered, e.g. if used with KDB NMI 1619 * debugger. 1620 */ 1621 pl011_quiesce_irqs(port); 1622 1623 status = pl011_read(uap, REG_FR); 1624 if (status & UART01x_FR_RXFE) 1625 return NO_POLL_CHAR; 1626 1627 return pl011_read(uap, REG_DR); 1628 } 1629 1630 static void pl011_put_poll_char(struct uart_port *port, 1631 unsigned char ch) 1632 { 1633 struct uart_amba_port *uap = 1634 container_of(port, struct uart_amba_port, port); 1635 1636 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 1637 cpu_relax(); 1638 1639 pl011_write(ch, uap, REG_DR); 1640 } 1641 1642 #endif /* CONFIG_CONSOLE_POLL */ 1643 1644 static int pl011_hwinit(struct uart_port *port) 1645 { 1646 struct uart_amba_port *uap = 1647 container_of(port, struct uart_amba_port, port); 1648 int retval; 1649 1650 /* Optionaly enable pins to be muxed in and configured */ 1651 pinctrl_pm_select_default_state(port->dev); 1652 1653 /* 1654 * Try to enable the clock producer. 1655 */ 1656 retval = clk_prepare_enable(uap->clk); 1657 if (retval) 1658 return retval; 1659 1660 uap->port.uartclk = clk_get_rate(uap->clk); 1661 1662 /* Clear pending error and receive interrupts */ 1663 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | 1664 UART011_FEIS | UART011_RTIS | UART011_RXIS, 1665 uap, REG_ICR); 1666 1667 /* 1668 * Save interrupts enable mask, and enable RX interrupts in case if 1669 * the interrupt is used for NMI entry. 1670 */ 1671 uap->im = pl011_read(uap, REG_IMSC); 1672 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC); 1673 1674 if (dev_get_platdata(uap->port.dev)) { 1675 struct amba_pl011_data *plat; 1676 1677 plat = dev_get_platdata(uap->port.dev); 1678 if (plat->init) 1679 plat->init(); 1680 } 1681 return 0; 1682 } 1683 1684 static bool pl011_split_lcrh(const struct uart_amba_port *uap) 1685 { 1686 return pl011_reg_to_offset(uap, REG_LCRH_RX) != 1687 pl011_reg_to_offset(uap, REG_LCRH_TX); 1688 } 1689 1690 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) 1691 { 1692 pl011_write(lcr_h, uap, REG_LCRH_RX); 1693 if (pl011_split_lcrh(uap)) { 1694 int i; 1695 /* 1696 * Wait 10 PCLKs before writing LCRH_TX register, 1697 * to get this delay write read only register 10 times 1698 */ 1699 for (i = 0; i < 10; ++i) 1700 pl011_write(0xff, uap, REG_MIS); 1701 pl011_write(lcr_h, uap, REG_LCRH_TX); 1702 } 1703 } 1704 1705 static int pl011_allocate_irq(struct uart_amba_port *uap) 1706 { 1707 pl011_write(uap->im, uap, REG_IMSC); 1708 1709 return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); 1710 } 1711 1712 /* 1713 * Enable interrupts, only timeouts when using DMA 1714 * if initial RX DMA job failed, start in interrupt mode 1715 * as well. 1716 */ 1717 static void pl011_enable_interrupts(struct uart_amba_port *uap) 1718 { 1719 spin_lock_irq(&uap->port.lock); 1720 1721 /* Clear out any spuriously appearing RX interrupts */ 1722 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); 1723 uap->im = UART011_RTIM; 1724 if (!pl011_dma_rx_running(uap)) 1725 uap->im |= UART011_RXIM; 1726 pl011_write(uap->im, uap, REG_IMSC); 1727 spin_unlock_irq(&uap->port.lock); 1728 } 1729 1730 static int pl011_startup(struct uart_port *port) 1731 { 1732 struct uart_amba_port *uap = 1733 container_of(port, struct uart_amba_port, port); 1734 unsigned int cr; 1735 int retval; 1736 1737 retval = pl011_hwinit(port); 1738 if (retval) 1739 goto clk_dis; 1740 1741 retval = pl011_allocate_irq(uap); 1742 if (retval) 1743 goto clk_dis; 1744 1745 pl011_write(uap->vendor->ifls, uap, REG_IFLS); 1746 1747 spin_lock_irq(&uap->port.lock); 1748 1749 /* restore RTS and DTR */ 1750 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); 1751 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; 1752 pl011_write(cr, uap, REG_CR); 1753 1754 spin_unlock_irq(&uap->port.lock); 1755 1756 /* 1757 * initialise the old status of the modem signals 1758 */ 1759 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; 1760 1761 /* Startup DMA */ 1762 pl011_dma_startup(uap); 1763 1764 pl011_enable_interrupts(uap); 1765 1766 return 0; 1767 1768 clk_dis: 1769 clk_disable_unprepare(uap->clk); 1770 return retval; 1771 } 1772 1773 static int sbsa_uart_startup(struct uart_port *port) 1774 { 1775 struct uart_amba_port *uap = 1776 container_of(port, struct uart_amba_port, port); 1777 int retval; 1778 1779 retval = pl011_hwinit(port); 1780 if (retval) 1781 return retval; 1782 1783 retval = pl011_allocate_irq(uap); 1784 if (retval) 1785 return retval; 1786 1787 /* The SBSA UART does not support any modem status lines. */ 1788 uap->old_status = 0; 1789 1790 pl011_enable_interrupts(uap); 1791 1792 return 0; 1793 } 1794 1795 static void pl011_shutdown_channel(struct uart_amba_port *uap, 1796 unsigned int lcrh) 1797 { 1798 unsigned long val; 1799 1800 val = pl011_read(uap, lcrh); 1801 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1802 pl011_write(val, uap, lcrh); 1803 } 1804 1805 /* 1806 * disable the port. It should not disable RTS and DTR. 1807 * Also RTS and DTR state should be preserved to restore 1808 * it during startup(). 1809 */ 1810 static void pl011_disable_uart(struct uart_amba_port *uap) 1811 { 1812 unsigned int cr; 1813 1814 uap->autorts = false; 1815 spin_lock_irq(&uap->port.lock); 1816 cr = pl011_read(uap, REG_CR); 1817 uap->old_cr = cr; 1818 cr &= UART011_CR_RTS | UART011_CR_DTR; 1819 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1820 pl011_write(cr, uap, REG_CR); 1821 spin_unlock_irq(&uap->port.lock); 1822 1823 /* 1824 * disable break condition and fifos 1825 */ 1826 pl011_shutdown_channel(uap, REG_LCRH_RX); 1827 if (pl011_split_lcrh(uap)) 1828 pl011_shutdown_channel(uap, REG_LCRH_TX); 1829 } 1830 1831 static void pl011_disable_interrupts(struct uart_amba_port *uap) 1832 { 1833 spin_lock_irq(&uap->port.lock); 1834 1835 /* mask all interrupts and clear all pending ones */ 1836 uap->im = 0; 1837 pl011_write(uap->im, uap, REG_IMSC); 1838 pl011_write(0xffff, uap, REG_ICR); 1839 1840 spin_unlock_irq(&uap->port.lock); 1841 } 1842 1843 static void pl011_shutdown(struct uart_port *port) 1844 { 1845 struct uart_amba_port *uap = 1846 container_of(port, struct uart_amba_port, port); 1847 1848 pl011_disable_interrupts(uap); 1849 1850 pl011_dma_shutdown(uap); 1851 1852 free_irq(uap->port.irq, uap); 1853 1854 pl011_disable_uart(uap); 1855 1856 /* 1857 * Shut down the clock producer 1858 */ 1859 clk_disable_unprepare(uap->clk); 1860 /* Optionally let pins go into sleep states */ 1861 pinctrl_pm_select_sleep_state(port->dev); 1862 1863 if (dev_get_platdata(uap->port.dev)) { 1864 struct amba_pl011_data *plat; 1865 1866 plat = dev_get_platdata(uap->port.dev); 1867 if (plat->exit) 1868 plat->exit(); 1869 } 1870 1871 if (uap->port.ops->flush_buffer) 1872 uap->port.ops->flush_buffer(port); 1873 } 1874 1875 static void sbsa_uart_shutdown(struct uart_port *port) 1876 { 1877 struct uart_amba_port *uap = 1878 container_of(port, struct uart_amba_port, port); 1879 1880 pl011_disable_interrupts(uap); 1881 1882 free_irq(uap->port.irq, uap); 1883 1884 if (uap->port.ops->flush_buffer) 1885 uap->port.ops->flush_buffer(port); 1886 } 1887 1888 static void 1889 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios) 1890 { 1891 port->read_status_mask = UART011_DR_OE | 255; 1892 if (termios->c_iflag & INPCK) 1893 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 1894 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1895 port->read_status_mask |= UART011_DR_BE; 1896 1897 /* 1898 * Characters to ignore 1899 */ 1900 port->ignore_status_mask = 0; 1901 if (termios->c_iflag & IGNPAR) 1902 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; 1903 if (termios->c_iflag & IGNBRK) { 1904 port->ignore_status_mask |= UART011_DR_BE; 1905 /* 1906 * If we're ignoring parity and break indicators, 1907 * ignore overruns too (for real raw support). 1908 */ 1909 if (termios->c_iflag & IGNPAR) 1910 port->ignore_status_mask |= UART011_DR_OE; 1911 } 1912 1913 /* 1914 * Ignore all characters if CREAD is not set. 1915 */ 1916 if ((termios->c_cflag & CREAD) == 0) 1917 port->ignore_status_mask |= UART_DUMMY_DR_RX; 1918 } 1919 1920 static void 1921 pl011_set_termios(struct uart_port *port, struct ktermios *termios, 1922 struct ktermios *old) 1923 { 1924 struct uart_amba_port *uap = 1925 container_of(port, struct uart_amba_port, port); 1926 unsigned int lcr_h, old_cr; 1927 unsigned long flags; 1928 unsigned int baud, quot, clkdiv; 1929 1930 if (uap->vendor->oversampling) 1931 clkdiv = 8; 1932 else 1933 clkdiv = 16; 1934 1935 /* 1936 * Ask the core to calculate the divisor for us. 1937 */ 1938 baud = uart_get_baud_rate(port, termios, old, 0, 1939 port->uartclk / clkdiv); 1940 #ifdef CONFIG_DMA_ENGINE 1941 /* 1942 * Adjust RX DMA polling rate with baud rate if not specified. 1943 */ 1944 if (uap->dmarx.auto_poll_rate) 1945 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); 1946 #endif 1947 1948 if (baud > port->uartclk/16) 1949 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1950 else 1951 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 1952 1953 switch (termios->c_cflag & CSIZE) { 1954 case CS5: 1955 lcr_h = UART01x_LCRH_WLEN_5; 1956 break; 1957 case CS6: 1958 lcr_h = UART01x_LCRH_WLEN_6; 1959 break; 1960 case CS7: 1961 lcr_h = UART01x_LCRH_WLEN_7; 1962 break; 1963 default: // CS8 1964 lcr_h = UART01x_LCRH_WLEN_8; 1965 break; 1966 } 1967 if (termios->c_cflag & CSTOPB) 1968 lcr_h |= UART01x_LCRH_STP2; 1969 if (termios->c_cflag & PARENB) { 1970 lcr_h |= UART01x_LCRH_PEN; 1971 if (!(termios->c_cflag & PARODD)) 1972 lcr_h |= UART01x_LCRH_EPS; 1973 if (termios->c_cflag & CMSPAR) 1974 lcr_h |= UART011_LCRH_SPS; 1975 } 1976 if (uap->fifosize > 1) 1977 lcr_h |= UART01x_LCRH_FEN; 1978 1979 spin_lock_irqsave(&port->lock, flags); 1980 1981 /* 1982 * Update the per-port timeout. 1983 */ 1984 uart_update_timeout(port, termios->c_cflag, baud); 1985 1986 pl011_setup_status_masks(port, termios); 1987 1988 if (UART_ENABLE_MS(port, termios->c_cflag)) 1989 pl011_enable_ms(port); 1990 1991 /* first, disable everything */ 1992 old_cr = pl011_read(uap, REG_CR); 1993 pl011_write(0, uap, REG_CR); 1994 1995 if (termios->c_cflag & CRTSCTS) { 1996 if (old_cr & UART011_CR_RTS) 1997 old_cr |= UART011_CR_RTSEN; 1998 1999 old_cr |= UART011_CR_CTSEN; 2000 uap->autorts = true; 2001 } else { 2002 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); 2003 uap->autorts = false; 2004 } 2005 2006 if (uap->vendor->oversampling) { 2007 if (baud > port->uartclk / 16) 2008 old_cr |= ST_UART011_CR_OVSFACT; 2009 else 2010 old_cr &= ~ST_UART011_CR_OVSFACT; 2011 } 2012 2013 /* 2014 * Workaround for the ST Micro oversampling variants to 2015 * increase the bitrate slightly, by lowering the divisor, 2016 * to avoid delayed sampling of start bit at high speeds, 2017 * else we see data corruption. 2018 */ 2019 if (uap->vendor->oversampling) { 2020 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) 2021 quot -= 1; 2022 else if ((baud > 3250000) && (quot > 2)) 2023 quot -= 2; 2024 } 2025 /* Set baud rate */ 2026 pl011_write(quot & 0x3f, uap, REG_FBRD); 2027 pl011_write(quot >> 6, uap, REG_IBRD); 2028 2029 /* 2030 * ----------v----------v----------v----------v----- 2031 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER 2032 * REG_FBRD & REG_IBRD. 2033 * ----------^----------^----------^----------^----- 2034 */ 2035 pl011_write_lcr_h(uap, lcr_h); 2036 pl011_write(old_cr, uap, REG_CR); 2037 2038 spin_unlock_irqrestore(&port->lock, flags); 2039 } 2040 2041 static void 2042 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, 2043 struct ktermios *old) 2044 { 2045 struct uart_amba_port *uap = 2046 container_of(port, struct uart_amba_port, port); 2047 unsigned long flags; 2048 2049 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud); 2050 2051 /* The SBSA UART only supports 8n1 without hardware flow control. */ 2052 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); 2053 termios->c_cflag &= ~(CMSPAR | CRTSCTS); 2054 termios->c_cflag |= CS8 | CLOCAL; 2055 2056 spin_lock_irqsave(&port->lock, flags); 2057 uart_update_timeout(port, CS8, uap->fixed_baud); 2058 pl011_setup_status_masks(port, termios); 2059 spin_unlock_irqrestore(&port->lock, flags); 2060 } 2061 2062 static const char *pl011_type(struct uart_port *port) 2063 { 2064 struct uart_amba_port *uap = 2065 container_of(port, struct uart_amba_port, port); 2066 return uap->port.type == PORT_AMBA ? uap->type : NULL; 2067 } 2068 2069 /* 2070 * Release the memory region(s) being used by 'port' 2071 */ 2072 static void pl011_release_port(struct uart_port *port) 2073 { 2074 release_mem_region(port->mapbase, SZ_4K); 2075 } 2076 2077 /* 2078 * Request the memory region(s) being used by 'port' 2079 */ 2080 static int pl011_request_port(struct uart_port *port) 2081 { 2082 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") 2083 != NULL ? 0 : -EBUSY; 2084 } 2085 2086 /* 2087 * Configure/autoconfigure the port. 2088 */ 2089 static void pl011_config_port(struct uart_port *port, int flags) 2090 { 2091 if (flags & UART_CONFIG_TYPE) { 2092 port->type = PORT_AMBA; 2093 pl011_request_port(port); 2094 } 2095 } 2096 2097 /* 2098 * verify the new serial_struct (for TIOCSSERIAL). 2099 */ 2100 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) 2101 { 2102 int ret = 0; 2103 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 2104 ret = -EINVAL; 2105 if (ser->irq < 0 || ser->irq >= nr_irqs) 2106 ret = -EINVAL; 2107 if (ser->baud_base < 9600) 2108 ret = -EINVAL; 2109 return ret; 2110 } 2111 2112 static struct uart_ops amba_pl011_pops = { 2113 .tx_empty = pl011_tx_empty, 2114 .set_mctrl = pl011_set_mctrl, 2115 .get_mctrl = pl011_get_mctrl, 2116 .stop_tx = pl011_stop_tx, 2117 .start_tx = pl011_start_tx, 2118 .stop_rx = pl011_stop_rx, 2119 .enable_ms = pl011_enable_ms, 2120 .break_ctl = pl011_break_ctl, 2121 .startup = pl011_startup, 2122 .shutdown = pl011_shutdown, 2123 .flush_buffer = pl011_dma_flush_buffer, 2124 .set_termios = pl011_set_termios, 2125 .type = pl011_type, 2126 .release_port = pl011_release_port, 2127 .request_port = pl011_request_port, 2128 .config_port = pl011_config_port, 2129 .verify_port = pl011_verify_port, 2130 #ifdef CONFIG_CONSOLE_POLL 2131 .poll_init = pl011_hwinit, 2132 .poll_get_char = pl011_get_poll_char, 2133 .poll_put_char = pl011_put_poll_char, 2134 #endif 2135 }; 2136 2137 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) 2138 { 2139 } 2140 2141 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port) 2142 { 2143 return 0; 2144 } 2145 2146 static const struct uart_ops sbsa_uart_pops = { 2147 .tx_empty = pl011_tx_empty, 2148 .set_mctrl = sbsa_uart_set_mctrl, 2149 .get_mctrl = sbsa_uart_get_mctrl, 2150 .stop_tx = pl011_stop_tx, 2151 .start_tx = pl011_start_tx, 2152 .stop_rx = pl011_stop_rx, 2153 .startup = sbsa_uart_startup, 2154 .shutdown = sbsa_uart_shutdown, 2155 .set_termios = sbsa_uart_set_termios, 2156 .type = pl011_type, 2157 .release_port = pl011_release_port, 2158 .request_port = pl011_request_port, 2159 .config_port = pl011_config_port, 2160 .verify_port = pl011_verify_port, 2161 #ifdef CONFIG_CONSOLE_POLL 2162 .poll_init = pl011_hwinit, 2163 .poll_get_char = pl011_get_poll_char, 2164 .poll_put_char = pl011_put_poll_char, 2165 #endif 2166 }; 2167 2168 static struct uart_amba_port *amba_ports[UART_NR]; 2169 2170 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE 2171 2172 static void pl011_console_putchar(struct uart_port *port, int ch) 2173 { 2174 struct uart_amba_port *uap = 2175 container_of(port, struct uart_amba_port, port); 2176 2177 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) 2178 cpu_relax(); 2179 pl011_write(ch, uap, REG_DR); 2180 } 2181 2182 static void 2183 pl011_console_write(struct console *co, const char *s, unsigned int count) 2184 { 2185 struct uart_amba_port *uap = amba_ports[co->index]; 2186 unsigned int old_cr = 0, new_cr; 2187 unsigned long flags; 2188 int locked = 1; 2189 2190 clk_enable(uap->clk); 2191 2192 local_irq_save(flags); 2193 if (uap->port.sysrq) 2194 locked = 0; 2195 else if (oops_in_progress) 2196 locked = spin_trylock(&uap->port.lock); 2197 else 2198 spin_lock(&uap->port.lock); 2199 2200 /* 2201 * First save the CR then disable the interrupts 2202 */ 2203 if (!uap->vendor->always_enabled) { 2204 old_cr = pl011_read(uap, REG_CR); 2205 new_cr = old_cr & ~UART011_CR_CTSEN; 2206 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 2207 pl011_write(new_cr, uap, REG_CR); 2208 } 2209 2210 uart_console_write(&uap->port, s, count, pl011_console_putchar); 2211 2212 /* 2213 * Finally, wait for transmitter to become empty 2214 * and restore the TCR 2215 */ 2216 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) 2217 cpu_relax(); 2218 if (!uap->vendor->always_enabled) 2219 pl011_write(old_cr, uap, REG_CR); 2220 2221 if (locked) 2222 spin_unlock(&uap->port.lock); 2223 local_irq_restore(flags); 2224 2225 clk_disable(uap->clk); 2226 } 2227 2228 static void __init 2229 pl011_console_get_options(struct uart_amba_port *uap, int *baud, 2230 int *parity, int *bits) 2231 { 2232 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { 2233 unsigned int lcr_h, ibrd, fbrd; 2234 2235 lcr_h = pl011_read(uap, REG_LCRH_TX); 2236 2237 *parity = 'n'; 2238 if (lcr_h & UART01x_LCRH_PEN) { 2239 if (lcr_h & UART01x_LCRH_EPS) 2240 *parity = 'e'; 2241 else 2242 *parity = 'o'; 2243 } 2244 2245 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) 2246 *bits = 7; 2247 else 2248 *bits = 8; 2249 2250 ibrd = pl011_read(uap, REG_IBRD); 2251 fbrd = pl011_read(uap, REG_FBRD); 2252 2253 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); 2254 2255 if (uap->vendor->oversampling) { 2256 if (pl011_read(uap, REG_CR) 2257 & ST_UART011_CR_OVSFACT) 2258 *baud *= 2; 2259 } 2260 } 2261 } 2262 2263 static int __init pl011_console_setup(struct console *co, char *options) 2264 { 2265 struct uart_amba_port *uap; 2266 int baud = 38400; 2267 int bits = 8; 2268 int parity = 'n'; 2269 int flow = 'n'; 2270 int ret; 2271 2272 /* 2273 * Check whether an invalid uart number has been specified, and 2274 * if so, search for the first available port that does have 2275 * console support. 2276 */ 2277 if (co->index >= UART_NR) 2278 co->index = 0; 2279 uap = amba_ports[co->index]; 2280 if (!uap) 2281 return -ENODEV; 2282 2283 /* Allow pins to be muxed in and configured */ 2284 pinctrl_pm_select_default_state(uap->port.dev); 2285 2286 ret = clk_prepare(uap->clk); 2287 if (ret) 2288 return ret; 2289 2290 if (dev_get_platdata(uap->port.dev)) { 2291 struct amba_pl011_data *plat; 2292 2293 plat = dev_get_platdata(uap->port.dev); 2294 if (plat->init) 2295 plat->init(); 2296 } 2297 2298 uap->port.uartclk = clk_get_rate(uap->clk); 2299 2300 if (uap->vendor->fixed_options) { 2301 baud = uap->fixed_baud; 2302 } else { 2303 if (options) 2304 uart_parse_options(options, 2305 &baud, &parity, &bits, &flow); 2306 else 2307 pl011_console_get_options(uap, &baud, &parity, &bits); 2308 } 2309 2310 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 2311 } 2312 2313 static struct uart_driver amba_reg; 2314 static struct console amba_console = { 2315 .name = "ttyAMA", 2316 .write = pl011_console_write, 2317 .device = uart_console_device, 2318 .setup = pl011_console_setup, 2319 .flags = CON_PRINTBUFFER, 2320 .index = -1, 2321 .data = &amba_reg, 2322 }; 2323 2324 #define AMBA_CONSOLE (&amba_console) 2325 2326 static void pl011_putc(struct uart_port *port, int c) 2327 { 2328 struct uart_amba_port *uap = 2329 container_of(port, struct uart_amba_port, port); 2330 2331 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) 2332 cpu_relax(); 2333 if (port->iotype == UPIO_MEM32) 2334 writel(c, port->membase + UART01x_DR); 2335 else 2336 writeb(c, port->membase + UART01x_DR); 2337 while (readl(port->membase + UART01x_FR) & uap->vendor->fr_busy) 2338 cpu_relax(); 2339 } 2340 2341 static void pl011_early_write(struct console *con, const char *s, unsigned n) 2342 { 2343 struct earlycon_device *dev = con->data; 2344 2345 uart_console_write(&dev->port, s, n, pl011_putc); 2346 } 2347 2348 static int __init pl011_early_console_setup(struct earlycon_device *device, 2349 const char *opt) 2350 { 2351 if (!device->port.membase) 2352 return -ENODEV; 2353 2354 device->con->write = pl011_early_write; 2355 return 0; 2356 } 2357 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2358 2359 #else 2360 #define AMBA_CONSOLE NULL 2361 #endif 2362 2363 static struct uart_driver amba_reg = { 2364 .owner = THIS_MODULE, 2365 .driver_name = "ttyAMA", 2366 .dev_name = "ttyAMA", 2367 .major = SERIAL_AMBA_MAJOR, 2368 .minor = SERIAL_AMBA_MINOR, 2369 .nr = UART_NR, 2370 .cons = AMBA_CONSOLE, 2371 }; 2372 2373 static int pl011_probe_dt_alias(int index, struct device *dev) 2374 { 2375 struct device_node *np; 2376 static bool seen_dev_with_alias = false; 2377 static bool seen_dev_without_alias = false; 2378 int ret = index; 2379 2380 if (!IS_ENABLED(CONFIG_OF)) 2381 return ret; 2382 2383 np = dev->of_node; 2384 if (!np) 2385 return ret; 2386 2387 ret = of_alias_get_id(np, "serial"); 2388 if (ret < 0) { 2389 seen_dev_without_alias = true; 2390 ret = index; 2391 } else { 2392 seen_dev_with_alias = true; 2393 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) { 2394 dev_warn(dev, "requested serial port %d not available.\n", ret); 2395 ret = index; 2396 } 2397 } 2398 2399 if (seen_dev_with_alias && seen_dev_without_alias) 2400 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); 2401 2402 return ret; 2403 } 2404 2405 /* unregisters the driver also if no more ports are left */ 2406 static void pl011_unregister_port(struct uart_amba_port *uap) 2407 { 2408 int i; 2409 bool busy = false; 2410 2411 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { 2412 if (amba_ports[i] == uap) 2413 amba_ports[i] = NULL; 2414 else if (amba_ports[i]) 2415 busy = true; 2416 } 2417 pl011_dma_remove(uap); 2418 if (!busy) 2419 uart_unregister_driver(&amba_reg); 2420 } 2421 2422 static int pl011_find_free_port(void) 2423 { 2424 int i; 2425 2426 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2427 if (amba_ports[i] == NULL) 2428 return i; 2429 2430 return -EBUSY; 2431 } 2432 2433 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, 2434 struct resource *mmiobase, int index) 2435 { 2436 void __iomem *base; 2437 2438 base = devm_ioremap_resource(dev, mmiobase); 2439 if (IS_ERR(base)) 2440 return PTR_ERR(base); 2441 2442 index = pl011_probe_dt_alias(index, dev); 2443 2444 uap->old_cr = 0; 2445 uap->port.dev = dev; 2446 uap->port.mapbase = mmiobase->start; 2447 uap->port.membase = base; 2448 uap->port.fifosize = uap->fifosize; 2449 uap->port.flags = UPF_BOOT_AUTOCONF; 2450 uap->port.line = index; 2451 2452 amba_ports[index] = uap; 2453 2454 return 0; 2455 } 2456 2457 static int pl011_register_port(struct uart_amba_port *uap) 2458 { 2459 int ret; 2460 2461 /* Ensure interrupts from this UART are masked and cleared */ 2462 pl011_write(0, uap, REG_IMSC); 2463 pl011_write(0xffff, uap, REG_ICR); 2464 2465 if (!amba_reg.state) { 2466 ret = uart_register_driver(&amba_reg); 2467 if (ret < 0) { 2468 dev_err(uap->port.dev, 2469 "Failed to register AMBA-PL011 driver\n"); 2470 return ret; 2471 } 2472 } 2473 2474 ret = uart_add_one_port(&amba_reg, &uap->port); 2475 if (ret) 2476 pl011_unregister_port(uap); 2477 2478 return ret; 2479 } 2480 2481 static int pl011_probe(struct amba_device *dev, const struct amba_id *id) 2482 { 2483 struct uart_amba_port *uap; 2484 struct vendor_data *vendor = id->data; 2485 int portnr, ret; 2486 2487 portnr = pl011_find_free_port(); 2488 if (portnr < 0) 2489 return portnr; 2490 2491 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), 2492 GFP_KERNEL); 2493 if (!uap) 2494 return -ENOMEM; 2495 2496 uap->clk = devm_clk_get(&dev->dev, NULL); 2497 if (IS_ERR(uap->clk)) 2498 return PTR_ERR(uap->clk); 2499 2500 uap->reg_offset = vendor->reg_offset; 2501 uap->vendor = vendor; 2502 uap->fifosize = vendor->get_fifosize(dev); 2503 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; 2504 uap->port.irq = dev->irq[0]; 2505 uap->port.ops = &amba_pl011_pops; 2506 2507 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 2508 2509 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); 2510 if (ret) 2511 return ret; 2512 2513 amba_set_drvdata(dev, uap); 2514 2515 return pl011_register_port(uap); 2516 } 2517 2518 static int pl011_remove(struct amba_device *dev) 2519 { 2520 struct uart_amba_port *uap = amba_get_drvdata(dev); 2521 2522 uart_remove_one_port(&amba_reg, &uap->port); 2523 pl011_unregister_port(uap); 2524 return 0; 2525 } 2526 2527 #ifdef CONFIG_PM_SLEEP 2528 static int pl011_suspend(struct device *dev) 2529 { 2530 struct uart_amba_port *uap = dev_get_drvdata(dev); 2531 2532 if (!uap) 2533 return -EINVAL; 2534 2535 return uart_suspend_port(&amba_reg, &uap->port); 2536 } 2537 2538 static int pl011_resume(struct device *dev) 2539 { 2540 struct uart_amba_port *uap = dev_get_drvdata(dev); 2541 2542 if (!uap) 2543 return -EINVAL; 2544 2545 return uart_resume_port(&amba_reg, &uap->port); 2546 } 2547 #endif 2548 2549 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume); 2550 2551 static int sbsa_uart_probe(struct platform_device *pdev) 2552 { 2553 struct uart_amba_port *uap; 2554 struct resource *r; 2555 int portnr, ret; 2556 int baudrate; 2557 2558 /* 2559 * Check the mandatory baud rate parameter in the DT node early 2560 * so that we can easily exit with the error. 2561 */ 2562 if (pdev->dev.of_node) { 2563 struct device_node *np = pdev->dev.of_node; 2564 2565 ret = of_property_read_u32(np, "current-speed", &baudrate); 2566 if (ret) 2567 return ret; 2568 } else { 2569 baudrate = 115200; 2570 } 2571 2572 portnr = pl011_find_free_port(); 2573 if (portnr < 0) 2574 return portnr; 2575 2576 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port), 2577 GFP_KERNEL); 2578 if (!uap) 2579 return -ENOMEM; 2580 2581 ret = platform_get_irq(pdev, 0); 2582 if (ret < 0) { 2583 dev_err(&pdev->dev, "cannot obtain irq\n"); 2584 return ret; 2585 } 2586 uap->port.irq = ret; 2587 2588 uap->reg_offset = vendor_sbsa.reg_offset; 2589 uap->vendor = &vendor_sbsa; 2590 uap->fifosize = 32; 2591 uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; 2592 uap->port.ops = &sbsa_uart_pops; 2593 uap->fixed_baud = baudrate; 2594 2595 snprintf(uap->type, sizeof(uap->type), "SBSA"); 2596 2597 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2598 2599 ret = pl011_setup_port(&pdev->dev, uap, r, portnr); 2600 if (ret) 2601 return ret; 2602 2603 platform_set_drvdata(pdev, uap); 2604 2605 return pl011_register_port(uap); 2606 } 2607 2608 static int sbsa_uart_remove(struct platform_device *pdev) 2609 { 2610 struct uart_amba_port *uap = platform_get_drvdata(pdev); 2611 2612 uart_remove_one_port(&amba_reg, &uap->port); 2613 pl011_unregister_port(uap); 2614 return 0; 2615 } 2616 2617 static const struct of_device_id sbsa_uart_of_match[] = { 2618 { .compatible = "arm,sbsa-uart", }, 2619 {}, 2620 }; 2621 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); 2622 2623 static const struct acpi_device_id sbsa_uart_acpi_match[] = { 2624 { "ARMH0011", 0 }, 2625 {}, 2626 }; 2627 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); 2628 2629 static struct platform_driver arm_sbsa_uart_platform_driver = { 2630 .probe = sbsa_uart_probe, 2631 .remove = sbsa_uart_remove, 2632 .driver = { 2633 .name = "sbsa-uart", 2634 .of_match_table = of_match_ptr(sbsa_uart_of_match), 2635 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match), 2636 }, 2637 }; 2638 2639 static struct amba_id pl011_ids[] = { 2640 { 2641 .id = 0x00041011, 2642 .mask = 0x000fffff, 2643 .data = &vendor_arm, 2644 }, 2645 { 2646 .id = 0x00380802, 2647 .mask = 0x00ffffff, 2648 .data = &vendor_st, 2649 }, 2650 { 0, 0 }, 2651 }; 2652 2653 MODULE_DEVICE_TABLE(amba, pl011_ids); 2654 2655 static struct amba_driver pl011_driver = { 2656 .drv = { 2657 .name = "uart-pl011", 2658 .pm = &pl011_dev_pm_ops, 2659 }, 2660 .id_table = pl011_ids, 2661 .probe = pl011_probe, 2662 .remove = pl011_remove, 2663 }; 2664 2665 static int __init pl011_init(void) 2666 { 2667 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n"); 2668 2669 if (platform_driver_register(&arm_sbsa_uart_platform_driver)) 2670 pr_warn("could not register SBSA UART platform driver\n"); 2671 return amba_driver_register(&pl011_driver); 2672 } 2673 2674 static void __exit pl011_exit(void) 2675 { 2676 platform_driver_unregister(&arm_sbsa_uart_platform_driver); 2677 amba_driver_unregister(&pl011_driver); 2678 } 2679 2680 /* 2681 * While this can be a module, if builtin it's most likely the console 2682 * So let's leave module_exit but move module_init to an earlier place 2683 */ 2684 arch_initcall(pl011_init); 2685 module_exit(pl011_exit); 2686 2687 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 2688 MODULE_DESCRIPTION("ARM AMBA serial port driver"); 2689 MODULE_LICENSE("GPL"); 2690