Lines Matching +full:tx +full:- +full:clk +full:- +full:10 +full:- +full:inverted

1 // SPDX-License-Identifier: GPL-2.0+
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
32 #include <linux/clk.h>
35 #include <linux/dma-mapping.h>
82 /* The size of the array - must be last */
264 struct clk *clk; member
268 unsigned int fifosize; /* vendor-specific */
269 unsigned int fixed_baud; /* vendor-set fixed baud rate */
292 return uap->reg_offset[reg]; in pl011_reg_to_offset()
298 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); in pl011_read()
300 return (uap->port.iotype == UPIO_MEM32) ? in pl011_read()
307 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); in pl011_write()
309 if (uap->port.iotype == UPIO_MEM32) in pl011_write()
335 uap->port.icount.rx++; in pl011_fifo_to_tty()
340 uap->port.icount.brk++; in pl011_fifo_to_tty()
341 if (uart_handle_break(&uap->port)) in pl011_fifo_to_tty()
344 uap->port.icount.parity++; in pl011_fifo_to_tty()
346 uap->port.icount.frame++; in pl011_fifo_to_tty()
349 uap->port.icount.overrun++; in pl011_fifo_to_tty()
351 ch &= uap->port.read_status_mask; in pl011_fifo_to_tty()
361 sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255); in pl011_fifo_to_tty()
363 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); in pl011_fifo_to_tty()
381 db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE, in pl011_dmabuf_init()
382 &db->dma, GFP_KERNEL); in pl011_dmabuf_init()
383 if (!db->buf) in pl011_dmabuf_init()
384 return -ENOMEM; in pl011_dmabuf_init()
385 db->len = PL011_DMA_BUFFER_SIZE; in pl011_dmabuf_init()
393 if (db->buf) { in pl011_dmabuf_free()
394 dma_free_coherent(chan->device->dev, in pl011_dmabuf_free()
395 PL011_DMA_BUFFER_SIZE, db->buf, db->dma); in pl011_dmabuf_free()
402 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); in pl011_dma_probe()
403 struct device *dev = uap->port.dev; in pl011_dma_probe()
405 .dst_addr = uap->port.mapbase + in pl011_dma_probe()
409 .dst_maxburst = uap->fifosize >> 1, in pl011_dma_probe()
415 uap->dma_probed = true; in pl011_dma_probe()
416 chan = dma_request_chan(dev, "tx"); in pl011_dma_probe()
418 if (PTR_ERR(chan) == -EPROBE_DEFER) { in pl011_dma_probe()
419 uap->dma_probed = false; in pl011_dma_probe()
424 if (!plat || !plat->dma_filter) { in pl011_dma_probe()
425 dev_dbg(uap->port.dev, "no DMA platform data\n"); in pl011_dma_probe()
429 /* Try to acquire a generic DMA engine slave TX channel */ in pl011_dma_probe()
433 chan = dma_request_channel(mask, plat->dma_filter, in pl011_dma_probe()
434 plat->dma_tx_param); in pl011_dma_probe()
436 dev_err(uap->port.dev, "no TX DMA channel!\n"); in pl011_dma_probe()
442 uap->dmatx.chan = chan; in pl011_dma_probe()
444 dev_info(uap->port.dev, "DMA channel TX %s\n", in pl011_dma_probe()
445 dma_chan_name(uap->dmatx.chan)); in pl011_dma_probe()
450 if (IS_ERR(chan) && plat && plat->dma_rx_param) { in pl011_dma_probe()
451 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); in pl011_dma_probe()
454 dev_err(uap->port.dev, "no RX DMA channel!\n"); in pl011_dma_probe()
461 .src_addr = uap->port.mapbase + in pl011_dma_probe()
465 .src_maxburst = uap->fifosize >> 2, in pl011_dma_probe()
479 dev_info(uap->port.dev, in pl011_dma_probe()
480 "RX DMA disabled - no residue processing\n"); in pl011_dma_probe()
485 uap->dmarx.chan = chan; in pl011_dma_probe()
487 uap->dmarx.auto_poll_rate = false; in pl011_dma_probe()
488 if (plat && plat->dma_rx_poll_enable) { in pl011_dma_probe()
490 if (plat->dma_rx_poll_rate) { in pl011_dma_probe()
491 uap->dmarx.auto_poll_rate = false; in pl011_dma_probe()
492 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; in pl011_dma_probe()
499 uap->dmarx.auto_poll_rate = true; in pl011_dma_probe()
500 uap->dmarx.poll_rate = 100; in pl011_dma_probe()
503 if (plat->dma_rx_poll_timeout) in pl011_dma_probe()
504 uap->dmarx.poll_timeout = in pl011_dma_probe()
505 plat->dma_rx_poll_timeout; in pl011_dma_probe()
507 uap->dmarx.poll_timeout = 3000; in pl011_dma_probe()
508 } else if (!plat && dev->of_node) { in pl011_dma_probe()
509 uap->dmarx.auto_poll_rate = in pl011_dma_probe()
510 of_property_read_bool(dev->of_node, "auto-poll"); in pl011_dma_probe()
511 if (uap->dmarx.auto_poll_rate) { in pl011_dma_probe()
514 if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0) in pl011_dma_probe()
515 uap->dmarx.poll_rate = x; in pl011_dma_probe()
517 uap->dmarx.poll_rate = 100; in pl011_dma_probe()
518 if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0) in pl011_dma_probe()
519 uap->dmarx.poll_timeout = x; in pl011_dma_probe()
521 uap->dmarx.poll_timeout = 3000; in pl011_dma_probe()
524 dev_info(uap->port.dev, "DMA channel RX %s\n", in pl011_dma_probe()
525 dma_chan_name(uap->dmarx.chan)); in pl011_dma_probe()
531 if (uap->dmatx.chan) in pl011_dma_remove()
532 dma_release_channel(uap->dmatx.chan); in pl011_dma_remove()
533 if (uap->dmarx.chan) in pl011_dma_remove()
534 dma_release_channel(uap->dmarx.chan); in pl011_dma_remove()
542 * The current DMA TX buffer has been sent.
548 struct tty_port *tport = &uap->port.state->port; in pl011_dma_tx_callback()
549 struct pl011_dmatx_data *dmatx = &uap->dmatx; in pl011_dma_tx_callback()
553 uart_port_lock_irqsave(&uap->port, &flags); in pl011_dma_tx_callback()
554 if (uap->dmatx.queued) in pl011_dma_tx_callback()
555 dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, in pl011_dma_tx_callback()
556 dmatx->len, DMA_TO_DEVICE); in pl011_dma_tx_callback()
558 dmacr = uap->dmacr; in pl011_dma_tx_callback()
559 uap->dmacr = dmacr & ~UART011_TXDMAE; in pl011_dma_tx_callback()
560 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_callback()
563 * If TX DMA was disabled, it means that we've stopped the DMA for in pl011_dma_tx_callback()
564 * some reason (eg, XOFF received, or we want to send an X-char.) in pl011_dma_tx_callback()
567 * and the rest of the driver - if the driver disables TX DMA while in pl011_dma_tx_callback()
568 * a TX buffer completing, we must update the tx queued status to in pl011_dma_tx_callback()
571 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || in pl011_dma_tx_callback()
572 kfifo_is_empty(&tport->xmit_fifo)) { in pl011_dma_tx_callback()
573 uap->dmatx.queued = false; in pl011_dma_tx_callback()
574 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_dma_tx_callback()
581 * have data pending to be sent. Re-enable the TX IRQ. in pl011_dma_tx_callback()
585 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_dma_tx_callback()
589 * Try to refill the TX DMA buffer.
592 * 1 if we queued up a TX DMA buffer.
598 struct pl011_dmatx_data *dmatx = &uap->dmatx; in pl011_dma_tx_refill()
599 struct dma_chan *chan = dmatx->chan; in pl011_dma_tx_refill()
600 struct dma_device *dma_dev = chan->device; in pl011_dma_tx_refill()
602 struct tty_port *tport = &uap->port.state->port; in pl011_dma_tx_refill()
611 count = kfifo_len(&tport->xmit_fifo); in pl011_dma_tx_refill()
612 if (count < (uap->fifosize >> 1)) { in pl011_dma_tx_refill()
613 uap->dmatx.queued = false; in pl011_dma_tx_refill()
621 count -= 1; in pl011_dma_tx_refill()
623 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ in pl011_dma_tx_refill()
627 count = kfifo_out_peek(&tport->xmit_fifo, dmatx->buf, count); in pl011_dma_tx_refill()
628 dmatx->len = count; in pl011_dma_tx_refill()
629 dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, in pl011_dma_tx_refill()
631 if (dmatx->dma == DMA_MAPPING_ERROR) { in pl011_dma_tx_refill()
632 uap->dmatx.queued = false; in pl011_dma_tx_refill()
633 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); in pl011_dma_tx_refill()
634 return -EBUSY; in pl011_dma_tx_refill()
637 desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV, in pl011_dma_tx_refill()
640 dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); in pl011_dma_tx_refill()
641 uap->dmatx.queued = false; in pl011_dma_tx_refill()
646 dev_dbg(uap->port.dev, "TX DMA busy\n"); in pl011_dma_tx_refill()
647 return -EBUSY; in pl011_dma_tx_refill()
651 desc->callback = pl011_dma_tx_callback; in pl011_dma_tx_refill()
652 desc->callback_param = uap; in pl011_dma_tx_refill()
658 dma_dev->device_issue_pending(chan); in pl011_dma_tx_refill()
660 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_refill()
661 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_refill()
662 uap->dmatx.queued = true; in pl011_dma_tx_refill()
668 uart_xmit_advance(&uap->port, count); in pl011_dma_tx_refill()
670 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) in pl011_dma_tx_refill()
671 uart_write_wakeup(&uap->port); in pl011_dma_tx_refill()
677 * We received a transmit interrupt without a pending X-char but with
686 if (!uap->using_tx_dma) in pl011_dma_tx_irq()
690 * If we already have a TX buffer queued, but received a in pl011_dma_tx_irq()
691 * TX interrupt, it will be because we've just sent an X-char. in pl011_dma_tx_irq()
692 * Ensure the TX DMA is enabled and the TX IRQ is disabled. in pl011_dma_tx_irq()
694 if (uap->dmatx.queued) { in pl011_dma_tx_irq()
695 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_irq()
696 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_irq()
697 uap->im &= ~UART011_TXIM; in pl011_dma_tx_irq()
698 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_irq()
703 * We don't have a TX buffer queued, so try to queue one. in pl011_dma_tx_irq()
704 * If we successfully queued a buffer, mask the TX IRQ. in pl011_dma_tx_irq()
707 uap->im &= ~UART011_TXIM; in pl011_dma_tx_irq()
708 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_irq()
720 if (uap->dmatx.queued) { in pl011_dma_tx_stop()
721 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_tx_stop()
722 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_stop()
731 * false if we want the TX IRQ to be enabled
738 if (!uap->using_tx_dma) in pl011_dma_tx_start()
741 if (!uap->port.x_char) { in pl011_dma_tx_start()
742 /* no X-char, try to push chars out in DMA mode */ in pl011_dma_tx_start()
745 if (!uap->dmatx.queued) { in pl011_dma_tx_start()
747 uap->im &= ~UART011_TXIM; in pl011_dma_tx_start()
748 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_start()
752 } else if (!(uap->dmacr & UART011_TXDMAE)) { in pl011_dma_tx_start()
753 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_start()
754 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_start()
760 * We have an X-char to send. Disable DMA to prevent it loading in pl011_dma_tx_start()
761 * the TX fifo, and then see if we can stuff it into the FIFO. in pl011_dma_tx_start()
763 dmacr = uap->dmacr; in pl011_dma_tx_start()
764 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_tx_start()
765 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_start()
771 * loaded the character, we should just re-enable DMA. in pl011_dma_tx_start()
776 pl011_write(uap->port.x_char, uap, REG_DR); in pl011_dma_tx_start()
777 uap->port.icount.tx++; in pl011_dma_tx_start()
778 uap->port.x_char = 0; in pl011_dma_tx_start()
780 /* Success - restore the DMA state */ in pl011_dma_tx_start()
781 uap->dmacr = dmacr; in pl011_dma_tx_start()
792 __releases(&uap->port.lock) in pl011_dma_flush_buffer()
793 __acquires(&uap->port.lock) in pl011_dma_flush_buffer()
798 if (!uap->using_tx_dma) in pl011_dma_flush_buffer()
801 dmaengine_terminate_async(uap->dmatx.chan); in pl011_dma_flush_buffer()
803 if (uap->dmatx.queued) { in pl011_dma_flush_buffer()
804 dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma, in pl011_dma_flush_buffer()
805 uap->dmatx.len, DMA_TO_DEVICE); in pl011_dma_flush_buffer()
806 uap->dmatx.queued = false; in pl011_dma_flush_buffer()
807 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_flush_buffer()
808 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_flush_buffer()
816 struct dma_chan *rxchan = uap->dmarx.chan; in pl011_dma_rx_trigger_dma()
817 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_trigger_dma()
822 return -EIO; in pl011_dma_rx_trigger_dma()
825 dbuf = uap->dmarx.use_buf_b ? in pl011_dma_rx_trigger_dma()
826 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; in pl011_dma_rx_trigger_dma()
827 desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len, in pl011_dma_rx_trigger_dma()
836 uap->dmarx.running = false; in pl011_dma_rx_trigger_dma()
838 return -EBUSY; in pl011_dma_rx_trigger_dma()
842 desc->callback = pl011_dma_rx_callback; in pl011_dma_rx_trigger_dma()
843 desc->callback_param = uap; in pl011_dma_rx_trigger_dma()
844 dmarx->cookie = dmaengine_submit(desc); in pl011_dma_rx_trigger_dma()
847 uap->dmacr |= UART011_RXDMAE; in pl011_dma_rx_trigger_dma()
848 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_trigger_dma()
849 uap->dmarx.running = true; in pl011_dma_rx_trigger_dma()
851 uap->im &= ~UART011_RXIM; in pl011_dma_rx_trigger_dma()
852 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_trigger_dma()
860 * with the port spinlock uap->port.lock held.
866 struct tty_port *port = &uap->port.state->port; in pl011_dma_rx_chars()
868 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; in pl011_dma_rx_chars()
872 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_chars()
875 if (uap->dmarx.poll_rate) { in pl011_dma_rx_chars()
877 dmataken = dbuf->len - dmarx->last_residue; in pl011_dma_rx_chars()
880 pending -= dmataken; in pl011_dma_rx_chars()
890 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending); in pl011_dma_rx_chars()
892 uap->port.icount.rx += dma_count; in pl011_dma_rx_chars()
894 dev_warn(uap->port.dev, in pl011_dma_rx_chars()
899 if (uap->dmarx.poll_rate) in pl011_dma_rx_chars()
900 dmarx->last_residue = dbuf->len; in pl011_dma_rx_chars()
925 dev_vdbg(uap->port.dev, in pl011_dma_rx_chars()
933 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_irq()
934 struct dma_chan *rxchan = dmarx->chan; in pl011_dma_rx_irq()
935 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? in pl011_dma_rx_irq()
936 &dmarx->dbuf_b : &dmarx->dbuf_a; in pl011_dma_rx_irq()
947 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); in pl011_dma_rx_irq()
948 dmastat = rxchan->device->device_tx_status(rxchan, in pl011_dma_rx_irq()
949 dmarx->cookie, &state); in pl011_dma_rx_irq()
951 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); in pl011_dma_rx_irq()
953 /* Disable RX DMA - incoming data will wait in the FIFO */ in pl011_dma_rx_irq()
954 uap->dmacr &= ~UART011_RXDMAE; in pl011_dma_rx_irq()
955 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_irq()
956 uap->dmarx.running = false; in pl011_dma_rx_irq()
958 pending = dbuf->len - state.residue; in pl011_dma_rx_irq()
960 /* Then we terminate the transfer - we now know our residue */ in pl011_dma_rx_irq()
967 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); in pl011_dma_rx_irq()
969 /* Switch buffer & re-trigger DMA job */ in pl011_dma_rx_irq()
970 dmarx->use_buf_b = !dmarx->use_buf_b; in pl011_dma_rx_irq()
972 dev_dbg(uap->port.dev, in pl011_dma_rx_irq()
974 uap->im |= UART011_RXIM; in pl011_dma_rx_irq()
975 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_irq()
982 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_callback()
983 struct dma_chan *rxchan = dmarx->chan; in pl011_dma_rx_callback()
984 bool lastbuf = dmarx->use_buf_b; in pl011_dma_rx_callback()
985 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? in pl011_dma_rx_callback()
986 &dmarx->dbuf_b : &dmarx->dbuf_a; in pl011_dma_rx_callback()
998 uart_port_lock_irq(&uap->port); in pl011_dma_rx_callback()
1003 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); in pl011_dma_rx_callback()
1004 pending = dbuf->len - state.residue; in pl011_dma_rx_callback()
1006 /* Then we terminate the transfer - we now know our residue */ in pl011_dma_rx_callback()
1009 uap->dmarx.running = false; in pl011_dma_rx_callback()
1010 dmarx->use_buf_b = !lastbuf; in pl011_dma_rx_callback()
1014 uart_unlock_and_check_sysrq(&uap->port); in pl011_dma_rx_callback()
1020 dev_dbg(uap->port.dev, in pl011_dma_rx_callback()
1022 uap->im |= UART011_RXIM; in pl011_dma_rx_callback()
1023 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_callback()
1034 if (!uap->using_rx_dma) in pl011_dma_rx_stop()
1038 uap->dmacr &= ~UART011_RXDMAE; in pl011_dma_rx_stop()
1039 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_stop()
1050 struct tty_port *port = &uap->port.state->port; in pl011_dma_rx_poll()
1051 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_poll()
1052 struct dma_chan *rxchan = uap->dmarx.chan; in pl011_dma_rx_poll()
1060 dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; in pl011_dma_rx_poll()
1061 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); in pl011_dma_rx_poll()
1062 if (likely(state.residue < dmarx->last_residue)) { in pl011_dma_rx_poll()
1063 dmataken = dbuf->len - dmarx->last_residue; in pl011_dma_rx_poll()
1064 size = dmarx->last_residue - state.residue; in pl011_dma_rx_poll()
1065 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, in pl011_dma_rx_poll()
1068 dmarx->last_residue = state.residue; in pl011_dma_rx_poll()
1069 dmarx->last_jiffies = jiffies; in pl011_dma_rx_poll()
1077 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) in pl011_dma_rx_poll()
1078 > uap->dmarx.poll_timeout) { in pl011_dma_rx_poll()
1079 uart_port_lock_irqsave(&uap->port, &flags); in pl011_dma_rx_poll()
1081 uap->im |= UART011_RXIM; in pl011_dma_rx_poll()
1082 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_poll()
1083 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_dma_rx_poll()
1085 uap->dmarx.running = false; in pl011_dma_rx_poll()
1087 timer_delete(&uap->dmarx.timer); in pl011_dma_rx_poll()
1089 mod_timer(&uap->dmarx.timer, in pl011_dma_rx_poll()
1090 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_dma_rx_poll()
1098 if (!uap->dma_probed) in pl011_dma_startup()
1101 if (!uap->dmatx.chan) in pl011_dma_startup()
1104 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); in pl011_dma_startup()
1105 if (!uap->dmatx.buf) { in pl011_dma_startup()
1106 uap->port.fifosize = uap->fifosize; in pl011_dma_startup()
1110 uap->dmatx.len = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1113 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1114 uap->using_tx_dma = true; in pl011_dma_startup()
1116 if (!uap->dmarx.chan) in pl011_dma_startup()
1120 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a, in pl011_dma_startup()
1123 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", in pl011_dma_startup()
1128 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b, in pl011_dma_startup()
1131 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", in pl011_dma_startup()
1133 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, in pl011_dma_startup()
1138 uap->using_rx_dma = true; in pl011_dma_startup()
1141 /* Turn on DMA error (RX/TX will be enabled on demand) */ in pl011_dma_startup()
1142 uap->dmacr |= UART011_DMAONERR; in pl011_dma_startup()
1143 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_startup()
1150 if (uap->vendor->dma_threshold) in pl011_dma_startup()
1154 if (uap->using_rx_dma) { in pl011_dma_startup()
1156 dev_dbg(uap->port.dev, in pl011_dma_startup()
1158 if (uap->dmarx.poll_rate) { in pl011_dma_startup()
1159 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); in pl011_dma_startup()
1160 mod_timer(&uap->dmarx.timer, in pl011_dma_startup()
1161 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_dma_startup()
1162 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1163 uap->dmarx.last_jiffies = jiffies; in pl011_dma_startup()
1170 if (!(uap->using_tx_dma || uap->using_rx_dma)) in pl011_dma_shutdown()
1173 /* Disable RX and TX DMA */ in pl011_dma_shutdown()
1174 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) in pl011_dma_shutdown()
1177 uart_port_lock_irq(&uap->port); in pl011_dma_shutdown()
1178 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); in pl011_dma_shutdown()
1179 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_shutdown()
1180 uart_port_unlock_irq(&uap->port); in pl011_dma_shutdown()
1182 if (uap->using_tx_dma) { in pl011_dma_shutdown()
1184 dmaengine_terminate_all(uap->dmatx.chan); in pl011_dma_shutdown()
1185 if (uap->dmatx.queued) { in pl011_dma_shutdown()
1186 dma_unmap_single(uap->dmatx.chan->device->dev, in pl011_dma_shutdown()
1187 uap->dmatx.dma, uap->dmatx.len, in pl011_dma_shutdown()
1189 uap->dmatx.queued = false; in pl011_dma_shutdown()
1192 kfree(uap->dmatx.buf); in pl011_dma_shutdown()
1193 uap->using_tx_dma = false; in pl011_dma_shutdown()
1196 if (uap->using_rx_dma) { in pl011_dma_shutdown()
1197 dmaengine_terminate_all(uap->dmarx.chan); in pl011_dma_shutdown()
1199 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); in pl011_dma_shutdown()
1200 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); in pl011_dma_shutdown()
1201 if (uap->dmarx.poll_rate) in pl011_dma_shutdown()
1202 timer_delete_sync(&uap->dmarx.timer); in pl011_dma_shutdown()
1203 uap->using_rx_dma = false; in pl011_dma_shutdown()
1209 return uap->using_rx_dma; in pl011_dma_rx_available()
1214 return uap->using_rx_dma && uap->dmarx.running; in pl011_dma_rx_running()
1255 return -EIO; in pl011_dma_rx_trigger_dma()
1273 struct uart_port *port = &uap->port; in pl011_rs485_tx_stop()
1276 if (uap->rs485_tx_state == SEND) in pl011_rs485_tx_stop()
1277 uap->rs485_tx_state = WAIT_AFTER_SEND; in pl011_rs485_tx_stop()
1279 if (uap->rs485_tx_state == WAIT_AFTER_SEND) { in pl011_rs485_tx_stop()
1280 /* Schedule hrtimer if tx queue not empty */ in pl011_rs485_tx_stop()
1282 hrtimer_start(&uap->trigger_stop_tx, in pl011_rs485_tx_stop()
1283 uap->rs485_tx_drain_interval, in pl011_rs485_tx_stop()
1287 if (port->rs485.delay_rts_after_send > 0) { in pl011_rs485_tx_stop()
1288 hrtimer_start(&uap->trigger_stop_tx, in pl011_rs485_tx_stop()
1289 ms_to_ktime(port->rs485.delay_rts_after_send), in pl011_rs485_tx_stop()
1294 } else if (uap->rs485_tx_state == WAIT_AFTER_RTS) { in pl011_rs485_tx_stop()
1295 hrtimer_try_to_cancel(&uap->trigger_start_tx); in pl011_rs485_tx_stop()
1300 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) in pl011_rs485_tx_stop()
1310 uap->rs485_tx_state = OFF; in pl011_rs485_tx_stop()
1318 if (port->rs485.flags & SER_RS485_ENABLED && in pl011_stop_tx()
1319 uap->rs485_tx_state == WAIT_AFTER_RTS) { in pl011_stop_tx()
1324 uap->im &= ~UART011_TXIM; in pl011_stop_tx()
1325 pl011_write(uap->im, uap, REG_IMSC); in pl011_stop_tx()
1328 if (port->rs485.flags & SER_RS485_ENABLED && in pl011_stop_tx()
1329 uap->rs485_tx_state != OFF) in pl011_stop_tx()
1335 /* Start TX with programmed I/O only (no DMA) */
1339 uap->im |= UART011_TXIM; in pl011_start_tx_pio()
1340 pl011_write(uap->im, uap, REG_IMSC); in pl011_start_tx_pio()
1346 struct uart_port *port = &uap->port; in pl011_rs485_tx_start()
1349 if (uap->rs485_tx_state == WAIT_AFTER_RTS) { in pl011_rs485_tx_start()
1350 uap->rs485_tx_state = SEND; in pl011_rs485_tx_start()
1353 if (uap->rs485_tx_state == WAIT_AFTER_SEND) { in pl011_rs485_tx_start()
1354 hrtimer_try_to_cancel(&uap->trigger_stop_tx); in pl011_rs485_tx_start()
1355 uap->rs485_tx_state = SEND; in pl011_rs485_tx_start()
1358 /* uap->rs485_tx_state == OFF */ in pl011_rs485_tx_start()
1362 /* Disable receiver if half-duplex */ in pl011_rs485_tx_start()
1363 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) in pl011_rs485_tx_start()
1366 if (port->rs485.flags & SER_RS485_RTS_ON_SEND) in pl011_rs485_tx_start()
1373 if (port->rs485.delay_rts_before_send > 0) { in pl011_rs485_tx_start()
1374 uap->rs485_tx_state = WAIT_AFTER_RTS; in pl011_rs485_tx_start()
1375 hrtimer_start(&uap->trigger_start_tx, in pl011_rs485_tx_start()
1376 ms_to_ktime(port->rs485.delay_rts_before_send), in pl011_rs485_tx_start()
1379 uap->rs485_tx_state = SEND; in pl011_rs485_tx_start()
1388 if ((uap->port.rs485.flags & SER_RS485_ENABLED) && in pl011_start_tx()
1389 uap->rs485_tx_state != SEND) { in pl011_start_tx()
1391 if (uap->rs485_tx_state == WAIT_AFTER_RTS) in pl011_start_tx()
1405 uart_port_lock_irqsave(&uap->port, &flags); in pl011_trigger_start_tx()
1406 if (uap->rs485_tx_state == WAIT_AFTER_RTS) in pl011_trigger_start_tx()
1407 pl011_start_tx(&uap->port); in pl011_trigger_start_tx()
1408 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_trigger_start_tx()
1419 uart_port_lock_irqsave(&uap->port, &flags); in pl011_trigger_stop_tx()
1420 if (uap->rs485_tx_state == WAIT_AFTER_SEND) in pl011_trigger_stop_tx()
1422 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_trigger_stop_tx()
1432 uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM | in pl011_stop_rx()
1434 pl011_write(uap->im, uap, REG_IMSC); in pl011_stop_rx()
1453 uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM; in pl011_enable_ms()
1454 pl011_write(uap->im, uap, REG_IMSC); in pl011_enable_ms()
1458 __releases(&uap->port.lock) in pl011_rx_chars()
1459 __acquires(&uap->port.lock) in pl011_rx_chars()
1463 uart_port_unlock(&uap->port); in pl011_rx_chars()
1464 tty_flip_buffer_push(&uap->port.state->port); in pl011_rx_chars()
1471 dev_dbg(uap->port.dev, in pl011_rx_chars()
1473 uap->im |= UART011_RXIM; in pl011_rx_chars()
1474 pl011_write(uap->im, uap, REG_IMSC); in pl011_rx_chars()
1478 if (uap->dmarx.poll_rate) { in pl011_rx_chars()
1479 uap->dmarx.last_jiffies = jiffies; in pl011_rx_chars()
1480 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; in pl011_rx_chars()
1481 mod_timer(&uap->dmarx.timer, in pl011_rx_chars()
1482 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_rx_chars()
1487 uart_port_lock(&uap->port); in pl011_rx_chars()
1498 uap->port.icount.tx++; in pl011_tx_char()
1503 /* Returns true if tx interrupts have to be (kept) enabled */
1506 struct tty_port *tport = &uap->port.state->port; in pl011_tx_chars()
1507 int count = uap->fifosize >> 1; in pl011_tx_chars()
1509 if (uap->port.x_char) { in pl011_tx_chars()
1510 if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) in pl011_tx_chars()
1512 uap->port.x_char = 0; in pl011_tx_chars()
1513 --count; in pl011_tx_chars()
1515 if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&uap->port)) { in pl011_tx_chars()
1516 pl011_stop_tx(&uap->port); in pl011_tx_chars()
1527 if (likely(from_irq) && count-- == 0) in pl011_tx_chars()
1530 if (!kfifo_peek(&tport->xmit_fifo, &c)) in pl011_tx_chars()
1536 kfifo_skip(&tport->xmit_fifo); in pl011_tx_chars()
1539 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) in pl011_tx_chars()
1540 uart_write_wakeup(&uap->port); in pl011_tx_chars()
1542 if (kfifo_is_empty(&tport->xmit_fifo)) { in pl011_tx_chars()
1543 pl011_stop_tx(&uap->port); in pl011_tx_chars()
1555 delta = status ^ uap->old_status; in pl011_modem_status()
1556 uap->old_status = status; in pl011_modem_status()
1562 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); in pl011_modem_status()
1564 if (delta & uap->vendor->fr_dsr) in pl011_modem_status()
1565 uap->port.icount.dsr++; in pl011_modem_status()
1567 if (delta & uap->vendor->fr_cts) in pl011_modem_status()
1568 uart_handle_cts_change(&uap->port, in pl011_modem_status()
1569 status & uap->vendor->fr_cts); in pl011_modem_status()
1571 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); in pl011_modem_status()
1576 if (!uap->vendor->cts_event_workaround) in check_apply_cts_event_workaround()
1583 * WA: introduce 26ns(1 uart clk) delay before W1C; in check_apply_cts_event_workaround()
1597 uart_port_lock(&uap->port); in pl011_int()
1598 status = pl011_read(uap, REG_RIS) & uap->im; in pl011_int()
1618 if (pass_counter-- == 0) in pl011_int()
1621 status = pl011_read(uap, REG_RIS) & uap->im; in pl011_int()
1626 uart_unlock_and_check_sysrq(&uap->port); in pl011_int()
1636 /* Allow feature register bits to be inverted to work around errata */ in pl011_tx_empty()
1637 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr; in pl011_tx_empty()
1639 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? in pl011_tx_empty()
1657 pl011_maybe_set_bit(status & uap->vendor->fr_dsr, &result, TIOCM_DSR); in pl011_get_mctrl()
1658 pl011_maybe_set_bit(status & uap->vendor->fr_cts, &result, TIOCM_CTS); in pl011_get_mctrl()
1659 pl011_maybe_set_bit(status & uap->vendor->fr_ri, &result, TIOCM_RNG); in pl011_get_mctrl()
1686 if (port->status & UPSTAT_AUTORTS) { in pl011_set_mctrl()
1687 /* We need to disable auto-RTS if we want to turn RTS off */ in pl011_set_mctrl()
1701 uart_port_lock_irqsave(&uap->port, &flags); in pl011_break_ctl()
1703 if (break_state == -1) in pl011_break_ctl()
1708 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_break_ctl()
1729 * (including tx queue), so we're also fine with start_tx()'s caller in pl011_quiesce_irqs()
1775 pinctrl_pm_select_default_state(port->dev); in pl011_hwinit()
1780 retval = clk_prepare_enable(uap->clk); in pl011_hwinit()
1784 uap->port.uartclk = clk_get_rate(uap->clk); in pl011_hwinit()
1795 uap->im = pl011_read(uap, REG_IMSC); in pl011_hwinit()
1798 if (dev_get_platdata(uap->port.dev)) { in pl011_hwinit()
1801 plat = dev_get_platdata(uap->port.dev); in pl011_hwinit()
1802 if (plat->init) in pl011_hwinit()
1803 plat->init(); in pl011_hwinit()
1820 * Wait 10 PCLKs before writing LCRH_TX register, in pl011_write_lcr_h()
1821 * to get this delay write read only register 10 times in pl011_write_lcr_h()
1823 for (i = 0; i < 10; ++i) in pl011_write_lcr_h()
1831 pl011_write(uap->im, uap, REG_IMSC); in pl011_allocate_irq()
1833 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap); in pl011_allocate_irq()
1846 uart_port_lock_irqsave(&uap->port, &flags); in pl011_enable_interrupts()
1857 for (i = 0; i < uap->fifosize * 2; ++i) { in pl011_enable_interrupts()
1864 uap->im = UART011_RTIM; in pl011_enable_interrupts()
1866 uap->im |= UART011_RXIM; in pl011_enable_interrupts()
1867 pl011_write(uap->im, uap, REG_IMSC); in pl011_enable_interrupts()
1868 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_enable_interrupts()
1876 uart_port_lock_irqsave(&uap->port, &flags); in pl011_unthrottle_rx()
1878 uap->im = UART011_RTIM; in pl011_unthrottle_rx()
1880 uap->im |= UART011_RXIM; in pl011_unthrottle_rx()
1882 pl011_write(uap->im, uap, REG_IMSC); in pl011_unthrottle_rx()
1885 if (uap->using_rx_dma) { in pl011_unthrottle_rx()
1886 uap->dmacr |= UART011_RXDMAE; in pl011_unthrottle_rx()
1887 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_unthrottle_rx()
1891 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_unthrottle_rx()
1909 pl011_write(uap->vendor->ifls, uap, REG_IFLS); in pl011_startup()
1911 uart_port_lock_irq(&uap->port); in pl011_startup()
1917 if (!(port->rs485.flags & SER_RS485_ENABLED)) in pl011_startup()
1922 uart_port_unlock_irq(&uap->port); in pl011_startup()
1927 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; in pl011_startup()
1937 clk_disable_unprepare(uap->clk); in pl011_startup()
1956 uap->old_status = 0; in sbsa_uart_startup()
1981 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); in pl011_disable_uart()
1982 uart_port_lock_irq(&uap->port); in pl011_disable_uart()
1987 uart_port_unlock_irq(&uap->port); in pl011_disable_uart()
1999 uart_port_lock_irq(&uap->port); in pl011_disable_interrupts()
2002 uap->im = 0; in pl011_disable_interrupts()
2003 pl011_write(uap->im, uap, REG_IMSC); in pl011_disable_interrupts()
2006 uart_port_unlock_irq(&uap->port); in pl011_disable_interrupts()
2018 if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF)) in pl011_shutdown()
2021 free_irq(uap->port.irq, uap); in pl011_shutdown()
2028 clk_disable_unprepare(uap->clk); in pl011_shutdown()
2030 pinctrl_pm_select_sleep_state(port->dev); in pl011_shutdown()
2032 if (dev_get_platdata(uap->port.dev)) { in pl011_shutdown()
2035 plat = dev_get_platdata(uap->port.dev); in pl011_shutdown()
2036 if (plat->exit) in pl011_shutdown()
2037 plat->exit(); in pl011_shutdown()
2040 if (uap->port.ops->flush_buffer) in pl011_shutdown()
2041 uap->port.ops->flush_buffer(port); in pl011_shutdown()
2051 free_irq(uap->port.irq, uap); in sbsa_uart_shutdown()
2053 if (uap->port.ops->flush_buffer) in sbsa_uart_shutdown()
2054 uap->port.ops->flush_buffer(port); in sbsa_uart_shutdown()
2060 port->read_status_mask = UART011_DR_OE | 255; in pl011_setup_status_masks()
2061 if (termios->c_iflag & INPCK) in pl011_setup_status_masks()
2062 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; in pl011_setup_status_masks()
2063 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) in pl011_setup_status_masks()
2064 port->read_status_mask |= UART011_DR_BE; in pl011_setup_status_masks()
2069 port->ignore_status_mask = 0; in pl011_setup_status_masks()
2070 if (termios->c_iflag & IGNPAR) in pl011_setup_status_masks()
2071 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; in pl011_setup_status_masks()
2072 if (termios->c_iflag & IGNBRK) { in pl011_setup_status_masks()
2073 port->ignore_status_mask |= UART011_DR_BE; in pl011_setup_status_masks()
2078 if (termios->c_iflag & IGNPAR) in pl011_setup_status_masks()
2079 port->ignore_status_mask |= UART011_DR_OE; in pl011_setup_status_masks()
2085 if ((termios->c_cflag & CREAD) == 0) in pl011_setup_status_masks()
2086 port->ignore_status_mask |= UART_DUMMY_DR_RX; in pl011_setup_status_masks()
2100 if (uap->vendor->oversampling) in pl011_set_termios()
2109 port->uartclk / clkdiv); in pl011_set_termios()
2114 if (uap->dmarx.auto_poll_rate) in pl011_set_termios()
2115 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); in pl011_set_termios()
2118 if (baud > port->uartclk / 16) in pl011_set_termios()
2119 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); in pl011_set_termios()
2121 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); in pl011_set_termios()
2123 switch (termios->c_cflag & CSIZE) { in pl011_set_termios()
2137 if (termios->c_cflag & CSTOPB) in pl011_set_termios()
2139 if (termios->c_cflag & PARENB) { in pl011_set_termios()
2141 if (!(termios->c_cflag & PARODD)) in pl011_set_termios()
2143 if (termios->c_cflag & CMSPAR) in pl011_set_termios()
2146 if (uap->fifosize > 1) in pl011_set_termios()
2149 bits = tty_get_frame_size(termios->c_cflag); in pl011_set_termios()
2154 * Update the per-port timeout. in pl011_set_termios()
2156 uart_update_timeout(port, termios->c_cflag, baud); in pl011_set_termios()
2161 * wait for the tx queue to empty. in pl011_set_termios()
2163 uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud)); in pl011_set_termios()
2167 if (UART_ENABLE_MS(port, termios->c_cflag)) in pl011_set_termios()
2170 if (port->rs485.flags & SER_RS485_ENABLED) in pl011_set_termios()
2171 termios->c_cflag &= ~CRTSCTS; in pl011_set_termios()
2175 if (termios->c_cflag & CRTSCTS) { in pl011_set_termios()
2180 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; in pl011_set_termios()
2183 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); in pl011_set_termios()
2186 if (uap->vendor->oversampling) { in pl011_set_termios()
2187 if (baud > port->uartclk / 16) in pl011_set_termios()
2199 if (uap->vendor->oversampling) { in pl011_set_termios()
2201 quot -= 1; in pl011_set_termios()
2203 quot -= 2; in pl011_set_termios()
2210 * ----------v----------v----------v----------v----- in pl011_set_termios()
2213 * ----------^----------^----------^----------^----- in pl011_set_termios()
2236 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud); in sbsa_uart_set_termios()
2239 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); in sbsa_uart_set_termios()
2240 termios->c_cflag &= ~(CMSPAR | CRTSCTS); in sbsa_uart_set_termios()
2241 termios->c_cflag |= CS8 | CLOCAL; in sbsa_uart_set_termios()
2244 uart_update_timeout(port, CS8, uap->fixed_baud); in sbsa_uart_set_termios()
2253 return uap->port.type == PORT_AMBA ? uap->type : NULL; in pl011_type()
2262 port->type = PORT_AMBA; in pl011_config_port()
2272 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) in pl011_verify_port()
2273 ret = -EINVAL; in pl011_verify_port()
2274 if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) in pl011_verify_port()
2275 ret = -EINVAL; in pl011_verify_port()
2276 if (ser->baud_base < 9600) in pl011_verify_port()
2277 ret = -EINVAL; in pl011_verify_port()
2278 if (port->mapbase != (unsigned long)ser->iomem_base) in pl011_verify_port()
2279 ret = -EINVAL; in pl011_verify_port()
2289 if (port->rs485.flags & SER_RS485_ENABLED) in pl011_rs485_config()
2293 if (rs485->flags & SER_RS485_ENABLED) { in pl011_rs485_config()
2298 port->status &= ~UPSTAT_AUTORTS; in pl011_rs485_config()
2370 uap->console_line_ended = (ch == '\n'); in pl011_console_putchar()
2399 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); in pl011_console_get_options()
2401 if (uap->vendor->oversampling && in pl011_console_get_options()
2420 if (co->index >= UART_NR) in pl011_console_setup()
2421 co->index = 0; in pl011_console_setup()
2422 uap = amba_ports[co->index]; in pl011_console_setup()
2424 return -ENODEV; in pl011_console_setup()
2427 pinctrl_pm_select_default_state(uap->port.dev); in pl011_console_setup()
2429 ret = clk_prepare(uap->clk); in pl011_console_setup()
2433 uap->console_line_ended = true; in pl011_console_setup()
2435 if (dev_get_platdata(uap->port.dev)) { in pl011_console_setup()
2438 plat = dev_get_platdata(uap->port.dev); in pl011_console_setup()
2439 if (plat->init) in pl011_console_setup()
2440 plat->init(); in pl011_console_setup()
2443 uap->port.uartclk = clk_get_rate(uap->clk); in pl011_console_setup()
2445 if (uap->vendor->fixed_options) { in pl011_console_setup()
2446 baud = uap->fixed_baud; in pl011_console_setup()
2455 return uart_set_options(&uap->port, co, baud, parity, bits, flow); in pl011_console_setup()
2459 * pl011_console_match - non-standard console matching
2474 * Returns 0 if console matches; otherwise non-zero to use default matching
2490 return -ENODEV; in pl011_console_match()
2493 return -ENODEV; in pl011_console_match()
2496 return -ENODEV; in pl011_console_match()
2505 port = &amba_ports[i]->port; in pl011_console_match()
2507 if (port->mapbase != addr) in pl011_console_match()
2510 co->index = i; in pl011_console_match()
2515 return -ENODEV; in pl011_console_match()
2521 struct uart_amba_port *uap = amba_ports[co->index]; in pl011_console_write_atomic()
2527 clk_enable(uap->clk); in pl011_console_write_atomic()
2529 if (!uap->vendor->always_enabled) { in pl011_console_write_atomic()
2535 if (!uap->console_line_ended) in pl011_console_write_atomic()
2536 uart_console_write(&uap->port, "\n", 1, pl011_console_putchar); in pl011_console_write_atomic()
2537 uart_console_write(&uap->port, wctxt->outbuf, wctxt->len, pl011_console_putchar); in pl011_console_write_atomic()
2539 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy) in pl011_console_write_atomic()
2542 if (!uap->vendor->always_enabled) in pl011_console_write_atomic()
2545 clk_disable(uap->clk); in pl011_console_write_atomic()
2553 struct uart_amba_port *uap = amba_ports[co->index]; in pl011_console_write_thread()
2559 clk_enable(uap->clk); in pl011_console_write_thread()
2561 if (!uap->vendor->always_enabled) { in pl011_console_write_thread()
2569 unsigned int len = READ_ONCE(wctxt->len); in pl011_console_write_thread()
2574 uart_console_write(&uap->port, wctxt->outbuf + i, 1, pl011_console_putchar); in pl011_console_write_thread()
2583 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy) in pl011_console_write_thread()
2586 if (!uap->vendor->always_enabled) in pl011_console_write_thread()
2589 clk_disable(uap->clk); in pl011_console_write_thread()
2597 __uart_port_lock_irqsave(&amba_ports[co->index]->port, flags); in pl011_console_device_lock()
2603 __uart_port_unlock_irqrestore(&amba_ports[co->index]->port, flags); in pl011_console_device_unlock()
2617 .index = -1,
2625 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) in qdf2400_e44_putc()
2627 writel(c, port->membase + UART01x_DR); in qdf2400_e44_putc()
2628 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) in qdf2400_e44_putc()
2634 struct earlycon_device *dev = con->data; in qdf2400_e44_early_write()
2636 uart_console_write(&dev->port, s, n, qdf2400_e44_putc); in qdf2400_e44_early_write()
2641 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) in pl011_putc()
2643 if (port->iotype == UPIO_MEM32) in pl011_putc()
2644 writel(c, port->membase + UART01x_DR); in pl011_putc()
2646 writeb(c, port->membase + UART01x_DR); in pl011_putc()
2647 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) in pl011_putc()
2653 struct earlycon_device *dev = con->data; in pl011_early_write()
2655 uart_console_write(&dev->port, s, n, pl011_putc); in pl011_early_write()
2661 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) in pl011_getc()
2664 if (port->iotype == UPIO_MEM32) in pl011_getc()
2665 return readl(port->membase + UART01x_DR); in pl011_getc()
2667 return readb(port->membase + UART01x_DR); in pl011_getc()
2672 struct earlycon_device *dev = con->data; in pl011_early_read()
2676 ch = pl011_getc(&dev->port); in pl011_early_read()
2690 * On non-ACPI systems, earlycon is enabled by specifying
2704 if (!device->port.membase) in pl011_early_console_setup()
2705 return -ENODEV; in pl011_early_console_setup()
2707 device->con->write = pl011_early_write; in pl011_early_console_setup()
2708 device->con->read = pl011_early_read; in pl011_early_console_setup()
2715 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2724 * case, the SPCR code will detect the need for the E44 work-around,
2731 if (!device->port.membase) in qdf2400_e44_early_console_setup()
2732 return -ENODEV; in qdf2400_e44_early_console_setup()
2734 device->con->write = qdf2400_e44_early_write; in qdf2400_e44_early_console_setup()
2764 np = dev->of_node; in pl011_probe_dt_alias()
2781 …dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeratio… in pl011_probe_dt_alias()
2811 return -EBUSY; in pl011_find_free_port()
2826 uap->port.dev = dev; in pl011_setup_port()
2827 uap->port.mapbase = mmiobase->start; in pl011_setup_port()
2828 uap->port.membase = base; in pl011_setup_port()
2829 uap->port.fifosize = uap->fifosize; in pl011_setup_port()
2830 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); in pl011_setup_port()
2831 uap->port.flags = UPF_BOOT_AUTOCONF; in pl011_setup_port()
2832 uap->port.line = index; in pl011_setup_port()
2834 ret = uart_get_rs485_mode(&uap->port); in pl011_setup_port()
2854 dev_err(uap->port.dev, in pl011_register_port()
2855 "Failed to register AMBA-PL011 driver\n"); in pl011_register_port()
2863 ret = uart_add_one_port(&amba_reg, &uap->port); in pl011_register_port()
2880 struct vendor_data *vendor = id->data; in pl011_probe()
2888 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), in pl011_probe()
2891 return -ENOMEM; in pl011_probe()
2893 uap->clk = devm_clk_get(&dev->dev, NULL); in pl011_probe()
2894 if (IS_ERR(uap->clk)) in pl011_probe()
2895 return PTR_ERR(uap->clk); in pl011_probe()
2897 uap->reg_offset = vendor->reg_offset; in pl011_probe()
2898 uap->vendor = vendor; in pl011_probe()
2899 uap->fifosize = vendor->get_fifosize(dev); in pl011_probe()
2900 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; in pl011_probe()
2901 uap->port.irq = dev->irq[0]; in pl011_probe()
2902 uap->port.ops = &amba_pl011_pops; in pl011_probe()
2903 uap->port.rs485_config = pl011_rs485_config; in pl011_probe()
2904 uap->port.rs485_supported = pl011_rs485_supported; in pl011_probe()
2905 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); in pl011_probe()
2907 if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) { in pl011_probe()
2910 uap->port.iotype = UPIO_MEM; in pl011_probe()
2913 uap->port.iotype = UPIO_MEM32; in pl011_probe()
2916 dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n", in pl011_probe()
2918 return -EINVAL; in pl011_probe()
2921 hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC, in pl011_probe()
2923 hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC, in pl011_probe()
2926 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); in pl011_probe()
2939 uart_remove_one_port(&amba_reg, &uap->port); in pl011_remove()
2949 return -EINVAL; in pl011_suspend()
2951 return uart_suspend_port(&amba_reg, &uap->port); in pl011_suspend()
2959 return -EINVAL; in pl011_resume()
2961 return uart_resume_port(&amba_reg, &uap->port); in pl011_resume()
2975 uap->vendor = &vendor_qdt_qdf2400_e44; in qpdf2400_erratum44_workaround()
2994 if (pdev->dev.of_node) { in sbsa_uart_probe()
2995 struct device_node *np = pdev->dev.of_node; in sbsa_uart_probe()
2997 ret = of_property_read_u32(np, "current-speed", &baudrate); in sbsa_uart_probe()
3008 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port), in sbsa_uart_probe()
3011 return -ENOMEM; in sbsa_uart_probe()
3016 uap->port.irq = ret; in sbsa_uart_probe()
3018 uap->vendor = &vendor_sbsa; in sbsa_uart_probe()
3019 qpdf2400_erratum44_workaround(&pdev->dev, uap); in sbsa_uart_probe()
3021 uap->reg_offset = uap->vendor->reg_offset; in sbsa_uart_probe()
3022 uap->fifosize = 32; in sbsa_uart_probe()
3023 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; in sbsa_uart_probe()
3024 uap->port.ops = &sbsa_uart_pops; in sbsa_uart_probe()
3025 uap->fixed_baud = baudrate; in sbsa_uart_probe()
3027 snprintf(uap->type, sizeof(uap->type), "SBSA"); in sbsa_uart_probe()
3031 ret = pl011_setup_port(&pdev->dev, uap, r, portnr); in sbsa_uart_probe()
3044 uart_remove_one_port(&amba_reg, &uap->port); in sbsa_uart_remove()
3049 { .compatible = "arm,sbsa-uart", },
3065 .name = "sbsa-uart",
3091 .name = "uart-pl011",