Lines Matching +full:has +full:- +full:builtin +full:- +full:dma

1 // SPDX-License-Identifier: GPL-2.0+
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
35 #include <linux/dma-mapping.h>
82 /* The size of the array - must be last */
219 /* Deals with DMA transactions */
222 dma_addr_t dma; member
245 dma_addr_t dma; member
261 unsigned int fifosize; /* vendor-specific */
262 unsigned int fixed_baud; /* vendor-set fixed baud rate */
267 /* DMA stuff */
268 unsigned int dmacr; /* dma control reg */
282 return uap->reg_offset[reg]; in pl011_reg_to_offset()
288 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); in pl011_read()
290 return (uap->port.iotype == UPIO_MEM32) ? in pl011_read()
297 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); in pl011_write()
299 if (uap->port.iotype == UPIO_MEM32) in pl011_write()
325 uap->port.icount.rx++; in pl011_fifo_to_tty()
330 uap->port.icount.brk++; in pl011_fifo_to_tty()
331 if (uart_handle_break(&uap->port)) in pl011_fifo_to_tty()
334 uap->port.icount.parity++; in pl011_fifo_to_tty()
336 uap->port.icount.frame++; in pl011_fifo_to_tty()
339 uap->port.icount.overrun++; in pl011_fifo_to_tty()
341 ch &= uap->port.read_status_mask; in pl011_fifo_to_tty()
351 sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255); in pl011_fifo_to_tty()
353 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); in pl011_fifo_to_tty()
360 * All the DMA operation mode stuff goes inside this ifdef.
361 * This assumes that you have a generic DMA device interface,
362 * no custom DMA interfaces are supported.
371 db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE, in pl011_dmabuf_init()
372 &db->dma, GFP_KERNEL); in pl011_dmabuf_init()
373 if (!db->buf) in pl011_dmabuf_init()
374 return -ENOMEM; in pl011_dmabuf_init()
375 db->len = PL011_DMA_BUFFER_SIZE; in pl011_dmabuf_init()
383 if (db->buf) { in pl011_dmabuf_free()
384 dma_free_coherent(chan->device->dev, in pl011_dmabuf_free()
385 PL011_DMA_BUFFER_SIZE, db->buf, db->dma); in pl011_dmabuf_free()
391 /* DMA is the sole user of the platform data right now */ in pl011_dma_probe()
392 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); in pl011_dma_probe()
393 struct device *dev = uap->port.dev; in pl011_dma_probe()
395 .dst_addr = uap->port.mapbase + in pl011_dma_probe()
399 .dst_maxburst = uap->fifosize >> 1, in pl011_dma_probe()
405 uap->dma_probed = true; in pl011_dma_probe()
408 if (PTR_ERR(chan) == -EPROBE_DEFER) { in pl011_dma_probe()
409 uap->dma_probed = false; in pl011_dma_probe()
414 if (!plat || !plat->dma_filter) { in pl011_dma_probe()
415 dev_dbg(uap->port.dev, "no DMA platform data\n"); in pl011_dma_probe()
419 /* Try to acquire a generic DMA engine slave TX channel */ in pl011_dma_probe()
423 chan = dma_request_channel(mask, plat->dma_filter, in pl011_dma_probe()
424 plat->dma_tx_param); in pl011_dma_probe()
426 dev_err(uap->port.dev, "no TX DMA channel!\n"); in pl011_dma_probe()
432 uap->dmatx.chan = chan; in pl011_dma_probe()
434 dev_info(uap->port.dev, "DMA channel TX %s\n", in pl011_dma_probe()
435 dma_chan_name(uap->dmatx.chan)); in pl011_dma_probe()
440 if (IS_ERR(chan) && plat && plat->dma_rx_param) { in pl011_dma_probe()
441 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); in pl011_dma_probe()
444 dev_err(uap->port.dev, "no RX DMA channel!\n"); in pl011_dma_probe()
451 .src_addr = uap->port.mapbase + in pl011_dma_probe()
455 .src_maxburst = uap->fifosize >> 2, in pl011_dma_probe()
461 * Some DMA controllers provide information on their capabilities. in pl011_dma_probe()
469 dev_info(uap->port.dev, in pl011_dma_probe()
470 "RX DMA disabled - no residue processing\n"); in pl011_dma_probe()
475 uap->dmarx.chan = chan; in pl011_dma_probe()
477 uap->dmarx.auto_poll_rate = false; in pl011_dma_probe()
478 if (plat && plat->dma_rx_poll_enable) { in pl011_dma_probe()
480 if (plat->dma_rx_poll_rate) { in pl011_dma_probe()
481 uap->dmarx.auto_poll_rate = false; in pl011_dma_probe()
482 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; in pl011_dma_probe()
489 uap->dmarx.auto_poll_rate = true; in pl011_dma_probe()
490 uap->dmarx.poll_rate = 100; in pl011_dma_probe()
493 if (plat->dma_rx_poll_timeout) in pl011_dma_probe()
494 uap->dmarx.poll_timeout = in pl011_dma_probe()
495 plat->dma_rx_poll_timeout; in pl011_dma_probe()
497 uap->dmarx.poll_timeout = 3000; in pl011_dma_probe()
498 } else if (!plat && dev->of_node) { in pl011_dma_probe()
499 uap->dmarx.auto_poll_rate = in pl011_dma_probe()
500 of_property_read_bool(dev->of_node, "auto-poll"); in pl011_dma_probe()
501 if (uap->dmarx.auto_poll_rate) { in pl011_dma_probe()
504 if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0) in pl011_dma_probe()
505 uap->dmarx.poll_rate = x; in pl011_dma_probe()
507 uap->dmarx.poll_rate = 100; in pl011_dma_probe()
508 if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0) in pl011_dma_probe()
509 uap->dmarx.poll_timeout = x; in pl011_dma_probe()
511 uap->dmarx.poll_timeout = 3000; in pl011_dma_probe()
514 dev_info(uap->port.dev, "DMA channel RX %s\n", in pl011_dma_probe()
515 dma_chan_name(uap->dmarx.chan)); in pl011_dma_probe()
521 if (uap->dmatx.chan) in pl011_dma_remove()
522 dma_release_channel(uap->dmatx.chan); in pl011_dma_remove()
523 if (uap->dmarx.chan) in pl011_dma_remove()
524 dma_release_channel(uap->dmarx.chan); in pl011_dma_remove()
532 * The current DMA TX buffer has been sent.
533 * Try to queue up another DMA buffer.
538 struct tty_port *tport = &uap->port.state->port; in pl011_dma_tx_callback()
539 struct pl011_dmatx_data *dmatx = &uap->dmatx; in pl011_dma_tx_callback()
543 uart_port_lock_irqsave(&uap->port, &flags); in pl011_dma_tx_callback()
544 if (uap->dmatx.queued) in pl011_dma_tx_callback()
545 dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, in pl011_dma_tx_callback()
546 dmatx->len, DMA_TO_DEVICE); in pl011_dma_tx_callback()
548 dmacr = uap->dmacr; in pl011_dma_tx_callback()
549 uap->dmacr = dmacr & ~UART011_TXDMAE; in pl011_dma_tx_callback()
550 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_callback()
553 * If TX DMA was disabled, it means that we've stopped the DMA for in pl011_dma_tx_callback()
554 * some reason (eg, XOFF received, or we want to send an X-char.) in pl011_dma_tx_callback()
556 * Note: we need to be careful here of a potential race between DMA in pl011_dma_tx_callback()
557 * and the rest of the driver - if the driver disables TX DMA while in pl011_dma_tx_callback()
561 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || in pl011_dma_tx_callback()
562 kfifo_is_empty(&tport->xmit_fifo)) { in pl011_dma_tx_callback()
563 uap->dmatx.queued = false; in pl011_dma_tx_callback()
564 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_dma_tx_callback()
570 * We didn't queue a DMA buffer for some reason, but we in pl011_dma_tx_callback()
571 * have data pending to be sent. Re-enable the TX IRQ. in pl011_dma_tx_callback()
575 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_dma_tx_callback()
579 * Try to refill the TX DMA buffer.
582 * 1 if we queued up a TX DMA buffer.
583 * 0 if we didn't want to handle this by DMA
588 struct pl011_dmatx_data *dmatx = &uap->dmatx; in pl011_dma_tx_refill()
589 struct dma_chan *chan = dmatx->chan; in pl011_dma_tx_refill()
590 struct dma_device *dma_dev = chan->device; in pl011_dma_tx_refill()
592 struct tty_port *tport = &uap->port.state->port; in pl011_dma_tx_refill()
596 * Try to avoid the overhead involved in using DMA if the in pl011_dma_tx_refill()
601 count = kfifo_len(&tport->xmit_fifo); in pl011_dma_tx_refill()
602 if (count < (uap->fifosize >> 1)) { in pl011_dma_tx_refill()
603 uap->dmatx.queued = false; in pl011_dma_tx_refill()
608 * Bodge: don't send the last character by DMA, as this in pl011_dma_tx_refill()
609 * will prevent XON from notifying us to restart DMA. in pl011_dma_tx_refill()
611 count -= 1; in pl011_dma_tx_refill()
613 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ in pl011_dma_tx_refill()
617 count = kfifo_out_peek(&tport->xmit_fifo, dmatx->buf, count); in pl011_dma_tx_refill()
618 dmatx->len = count; in pl011_dma_tx_refill()
619 dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, in pl011_dma_tx_refill()
621 if (dmatx->dma == DMA_MAPPING_ERROR) { in pl011_dma_tx_refill()
622 uap->dmatx.queued = false; in pl011_dma_tx_refill()
623 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); in pl011_dma_tx_refill()
624 return -EBUSY; in pl011_dma_tx_refill()
627 desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV, in pl011_dma_tx_refill()
630 dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); in pl011_dma_tx_refill()
631 uap->dmatx.queued = false; in pl011_dma_tx_refill()
633 * If DMA cannot be used right now, we complete this in pl011_dma_tx_refill()
636 dev_dbg(uap->port.dev, "TX DMA busy\n"); in pl011_dma_tx_refill()
637 return -EBUSY; in pl011_dma_tx_refill()
641 desc->callback = pl011_dma_tx_callback; in pl011_dma_tx_refill()
642 desc->callback_param = uap; in pl011_dma_tx_refill()
647 /* Fire the DMA transaction */ in pl011_dma_tx_refill()
648 dma_dev->device_issue_pending(chan); in pl011_dma_tx_refill()
650 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_refill()
651 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_refill()
652 uap->dmatx.queued = true; in pl011_dma_tx_refill()
655 * Now we know that DMA will fire, so advance the ring buffer in pl011_dma_tx_refill()
658 uart_xmit_advance(&uap->port, count); in pl011_dma_tx_refill()
660 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) in pl011_dma_tx_refill()
661 uart_write_wakeup(&uap->port); in pl011_dma_tx_refill()
667 * We received a transmit interrupt without a pending X-char but with
672 * true if we queued a DMA buffer
676 if (!uap->using_tx_dma) in pl011_dma_tx_irq()
681 * TX interrupt, it will be because we've just sent an X-char. in pl011_dma_tx_irq()
682 * Ensure the TX DMA is enabled and the TX IRQ is disabled. in pl011_dma_tx_irq()
684 if (uap->dmatx.queued) { in pl011_dma_tx_irq()
685 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_irq()
686 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_irq()
687 uap->im &= ~UART011_TXIM; in pl011_dma_tx_irq()
688 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_irq()
697 uap->im &= ~UART011_TXIM; in pl011_dma_tx_irq()
698 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_irq()
705 * Stop the DMA transmit (eg, due to received XOFF).
710 if (uap->dmatx.queued) { in pl011_dma_tx_stop()
711 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_tx_stop()
712 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_stop()
717 * Try to start a DMA transmit, or in the case of an XON/OFF
728 if (!uap->using_tx_dma) in pl011_dma_tx_start()
731 if (!uap->port.x_char) { in pl011_dma_tx_start()
732 /* no X-char, try to push chars out in DMA mode */ in pl011_dma_tx_start()
735 if (!uap->dmatx.queued) { in pl011_dma_tx_start()
737 uap->im &= ~UART011_TXIM; in pl011_dma_tx_start()
738 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_start()
742 } else if (!(uap->dmacr & UART011_TXDMAE)) { in pl011_dma_tx_start()
743 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_start()
744 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_start()
750 * We have an X-char to send. Disable DMA to prevent it loading in pl011_dma_tx_start()
753 dmacr = uap->dmacr; in pl011_dma_tx_start()
754 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_tx_start()
755 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_start()
761 * loaded the character, we should just re-enable DMA. in pl011_dma_tx_start()
766 pl011_write(uap->port.x_char, uap, REG_DR); in pl011_dma_tx_start()
767 uap->port.icount.tx++; in pl011_dma_tx_start()
768 uap->port.x_char = 0; in pl011_dma_tx_start()
770 /* Success - restore the DMA state */ in pl011_dma_tx_start()
771 uap->dmacr = dmacr; in pl011_dma_tx_start()
782 __releases(&uap->port.lock) in pl011_dma_flush_buffer()
783 __acquires(&uap->port.lock) in pl011_dma_flush_buffer()
788 if (!uap->using_tx_dma) in pl011_dma_flush_buffer()
791 dmaengine_terminate_async(uap->dmatx.chan); in pl011_dma_flush_buffer()
793 if (uap->dmatx.queued) { in pl011_dma_flush_buffer()
794 dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma, in pl011_dma_flush_buffer()
795 uap->dmatx.len, DMA_TO_DEVICE); in pl011_dma_flush_buffer()
796 uap->dmatx.queued = false; in pl011_dma_flush_buffer()
797 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_flush_buffer()
798 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_flush_buffer()
806 struct dma_chan *rxchan = uap->dmarx.chan; in pl011_dma_rx_trigger_dma()
807 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_trigger_dma()
812 return -EIO; in pl011_dma_rx_trigger_dma()
814 /* Start the RX DMA job */ in pl011_dma_rx_trigger_dma()
815 dbuf = uap->dmarx.use_buf_b ? in pl011_dma_rx_trigger_dma()
816 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; in pl011_dma_rx_trigger_dma()
817 desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len, in pl011_dma_rx_trigger_dma()
821 * If the DMA engine is busy and cannot prepare a in pl011_dma_rx_trigger_dma()
826 uap->dmarx.running = false; in pl011_dma_rx_trigger_dma()
828 return -EBUSY; in pl011_dma_rx_trigger_dma()
832 desc->callback = pl011_dma_rx_callback; in pl011_dma_rx_trigger_dma()
833 desc->callback_param = uap; in pl011_dma_rx_trigger_dma()
834 dmarx->cookie = dmaengine_submit(desc); in pl011_dma_rx_trigger_dma()
837 uap->dmacr |= UART011_RXDMAE; in pl011_dma_rx_trigger_dma()
838 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_trigger_dma()
839 uap->dmarx.running = true; in pl011_dma_rx_trigger_dma()
841 uap->im &= ~UART011_RXIM; in pl011_dma_rx_trigger_dma()
842 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_trigger_dma()
848 * This is called when either the DMA job is complete, or
850 * with the port spinlock uap->port.lock held.
856 struct tty_port *port = &uap->port.state->port; in pl011_dma_rx_chars()
858 &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; in pl011_dma_rx_chars()
862 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_chars()
865 if (uap->dmarx.poll_rate) { in pl011_dma_rx_chars()
867 dmataken = dbuf->len - dmarx->last_residue; in pl011_dma_rx_chars()
870 pending -= dmataken; in pl011_dma_rx_chars()
873 /* Pick the remain data from the DMA */ in pl011_dma_rx_chars()
876 * First take all chars in the DMA pipe, then look in the FIFO. in pl011_dma_rx_chars()
880 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending); in pl011_dma_rx_chars()
882 uap->port.icount.rx += dma_count; in pl011_dma_rx_chars()
884 dev_warn(uap->port.dev, in pl011_dma_rx_chars()
888 /* Reset the last_residue for Rx DMA poll */ in pl011_dma_rx_chars()
889 if (uap->dmarx.poll_rate) in pl011_dma_rx_chars()
890 dmarx->last_residue = dbuf->len; in pl011_dma_rx_chars()
893 * Only continue with trying to read the FIFO if all DMA chars have in pl011_dma_rx_chars()
902 * If we read all the DMA'd characters, and we had an in pl011_dma_rx_chars()
908 * trigger an immediate interrupt and stop the DMA job, so we in pl011_dma_rx_chars()
909 * will always find the error in the FIFO, never in the DMA in pl011_dma_rx_chars()
915 dev_vdbg(uap->port.dev, in pl011_dma_rx_chars()
916 "Took %d chars from DMA buffer and %d chars from the FIFO\n", in pl011_dma_rx_chars()
923 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_irq()
924 struct dma_chan *rxchan = dmarx->chan; in pl011_dma_rx_irq()
925 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? in pl011_dma_rx_irq()
926 &dmarx->dbuf_b : &dmarx->dbuf_a; in pl011_dma_rx_irq()
937 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); in pl011_dma_rx_irq()
938 dmastat = rxchan->device->device_tx_status(rxchan, in pl011_dma_rx_irq()
939 dmarx->cookie, &state); in pl011_dma_rx_irq()
941 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); in pl011_dma_rx_irq()
943 /* Disable RX DMA - incoming data will wait in the FIFO */ in pl011_dma_rx_irq()
944 uap->dmacr &= ~UART011_RXDMAE; in pl011_dma_rx_irq()
945 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_irq()
946 uap->dmarx.running = false; in pl011_dma_rx_irq()
948 pending = dbuf->len - state.residue; in pl011_dma_rx_irq()
950 /* Then we terminate the transfer - we now know our residue */ in pl011_dma_rx_irq()
957 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); in pl011_dma_rx_irq()
959 /* Switch buffer & re-trigger DMA job */ in pl011_dma_rx_irq()
960 dmarx->use_buf_b = !dmarx->use_buf_b; in pl011_dma_rx_irq()
962 dev_dbg(uap->port.dev, in pl011_dma_rx_irq()
963 "could not retrigger RX DMA job fall back to interrupt mode\n"); in pl011_dma_rx_irq()
964 uap->im |= UART011_RXIM; in pl011_dma_rx_irq()
965 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_irq()
972 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_callback()
973 struct dma_chan *rxchan = dmarx->chan; in pl011_dma_rx_callback()
974 bool lastbuf = dmarx->use_buf_b; in pl011_dma_rx_callback()
975 struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? in pl011_dma_rx_callback()
976 &dmarx->dbuf_b : &dmarx->dbuf_a; in pl011_dma_rx_callback()
983 * RX buffer is totally stuffed but no timeout has yet in pl011_dma_rx_callback()
985 * routine to flush out the secondary DMA buffer while in pl011_dma_rx_callback()
986 * we immediately trigger the next DMA job. in pl011_dma_rx_callback()
988 uart_port_lock_irq(&uap->port); in pl011_dma_rx_callback()
991 * the DMA irq handler. So we check the residue here. in pl011_dma_rx_callback()
993 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); in pl011_dma_rx_callback()
994 pending = dbuf->len - state.residue; in pl011_dma_rx_callback()
996 /* Then we terminate the transfer - we now know our residue */ in pl011_dma_rx_callback()
999 uap->dmarx.running = false; in pl011_dma_rx_callback()
1000 dmarx->use_buf_b = !lastbuf; in pl011_dma_rx_callback()
1004 uart_unlock_and_check_sysrq(&uap->port); in pl011_dma_rx_callback()
1006 * Do this check after we picked the DMA chars so we don't in pl011_dma_rx_callback()
1010 dev_dbg(uap->port.dev, in pl011_dma_rx_callback()
1011 "could not retrigger RX DMA job fall back to interrupt mode\n"); in pl011_dma_rx_callback()
1012 uap->im |= UART011_RXIM; in pl011_dma_rx_callback()
1013 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_callback()
1024 if (!uap->using_rx_dma) in pl011_dma_rx_stop()
1027 /* FIXME. Just disable the DMA enable */ in pl011_dma_rx_stop()
1028 uap->dmacr &= ~UART011_RXDMAE; in pl011_dma_rx_stop()
1029 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_stop()
1033 * Timer handler for Rx DMA polling.
1034 * Every polling, It checks the residue in the dma buffer and transfer
1040 struct tty_port *port = &uap->port.state->port; in pl011_dma_rx_poll()
1041 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_poll()
1042 struct dma_chan *rxchan = uap->dmarx.chan; in pl011_dma_rx_poll()
1050 dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; in pl011_dma_rx_poll()
1051 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); in pl011_dma_rx_poll()
1052 if (likely(state.residue < dmarx->last_residue)) { in pl011_dma_rx_poll()
1053 dmataken = dbuf->len - dmarx->last_residue; in pl011_dma_rx_poll()
1054 size = dmarx->last_residue - state.residue; in pl011_dma_rx_poll()
1055 dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, in pl011_dma_rx_poll()
1058 dmarx->last_residue = state.residue; in pl011_dma_rx_poll()
1059 dmarx->last_jiffies = jiffies; in pl011_dma_rx_poll()
1065 * to interrupt mode. We will retrigger DMA at the first interrupt. in pl011_dma_rx_poll()
1067 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) in pl011_dma_rx_poll()
1068 > uap->dmarx.poll_timeout) { in pl011_dma_rx_poll()
1069 uart_port_lock_irqsave(&uap->port, &flags); in pl011_dma_rx_poll()
1071 uap->im |= UART011_RXIM; in pl011_dma_rx_poll()
1072 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_poll()
1073 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_dma_rx_poll()
1075 uap->dmarx.running = false; in pl011_dma_rx_poll()
1077 del_timer(&uap->dmarx.timer); in pl011_dma_rx_poll()
1079 mod_timer(&uap->dmarx.timer, in pl011_dma_rx_poll()
1080 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_dma_rx_poll()
1088 if (!uap->dma_probed) in pl011_dma_startup()
1091 if (!uap->dmatx.chan) in pl011_dma_startup()
1094 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); in pl011_dma_startup()
1095 if (!uap->dmatx.buf) { in pl011_dma_startup()
1096 uap->port.fifosize = uap->fifosize; in pl011_dma_startup()
1100 uap->dmatx.len = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1102 /* The DMA buffer is now the FIFO the TTY subsystem can use */ in pl011_dma_startup()
1103 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1104 uap->using_tx_dma = true; in pl011_dma_startup()
1106 if (!uap->dmarx.chan) in pl011_dma_startup()
1109 /* Allocate and map DMA RX buffers */ in pl011_dma_startup()
1110 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a, in pl011_dma_startup()
1113 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", in pl011_dma_startup()
1118 ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b, in pl011_dma_startup()
1121 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", in pl011_dma_startup()
1123 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, in pl011_dma_startup()
1128 uap->using_rx_dma = true; in pl011_dma_startup()
1131 /* Turn on DMA error (RX/TX will be enabled on demand) */ in pl011_dma_startup()
1132 uap->dmacr |= UART011_DMAONERR; in pl011_dma_startup()
1133 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_startup()
1136 * ST Micro variants has some specific dma burst threshold in pl011_dma_startup()
1140 if (uap->vendor->dma_threshold) in pl011_dma_startup()
1144 if (uap->using_rx_dma) { in pl011_dma_startup()
1146 dev_dbg(uap->port.dev, in pl011_dma_startup()
1147 "could not trigger initial RX DMA job, fall back to interrupt mode\n"); in pl011_dma_startup()
1148 if (uap->dmarx.poll_rate) { in pl011_dma_startup()
1149 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); in pl011_dma_startup()
1150 mod_timer(&uap->dmarx.timer, in pl011_dma_startup()
1151 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_dma_startup()
1152 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1153 uap->dmarx.last_jiffies = jiffies; in pl011_dma_startup()
1160 if (!(uap->using_tx_dma || uap->using_rx_dma)) in pl011_dma_shutdown()
1163 /* Disable RX and TX DMA */ in pl011_dma_shutdown()
1164 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) in pl011_dma_shutdown()
1167 uart_port_lock_irq(&uap->port); in pl011_dma_shutdown()
1168 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); in pl011_dma_shutdown()
1169 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_shutdown()
1170 uart_port_unlock_irq(&uap->port); in pl011_dma_shutdown()
1172 if (uap->using_tx_dma) { in pl011_dma_shutdown()
1174 dmaengine_terminate_all(uap->dmatx.chan); in pl011_dma_shutdown()
1175 if (uap->dmatx.queued) { in pl011_dma_shutdown()
1176 dma_unmap_single(uap->dmatx.chan->device->dev, in pl011_dma_shutdown()
1177 uap->dmatx.dma, uap->dmatx.len, in pl011_dma_shutdown()
1179 uap->dmatx.queued = false; in pl011_dma_shutdown()
1182 kfree(uap->dmatx.buf); in pl011_dma_shutdown()
1183 uap->using_tx_dma = false; in pl011_dma_shutdown()
1186 if (uap->using_rx_dma) { in pl011_dma_shutdown()
1187 dmaengine_terminate_all(uap->dmarx.chan); in pl011_dma_shutdown()
1188 /* Clean up the RX DMA */ in pl011_dma_shutdown()
1189 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); in pl011_dma_shutdown()
1190 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); in pl011_dma_shutdown()
1191 if (uap->dmarx.poll_rate) in pl011_dma_shutdown()
1192 del_timer_sync(&uap->dmarx.timer); in pl011_dma_shutdown()
1193 uap->using_rx_dma = false; in pl011_dma_shutdown()
1199 return uap->using_rx_dma; in pl011_dma_rx_available()
1204 return uap->using_rx_dma && uap->dmarx.running; in pl011_dma_rx_running()
1208 /* Blank functions if the DMA engine is not available */
1245 return -EIO; in pl011_dma_rx_trigger_dma()
1267 const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2; in pl011_rs485_tx_stop()
1268 struct uart_port *port = &uap->port; in pl011_rs485_tx_stop()
1275 dev_warn(port->dev, in pl011_rs485_tx_stop()
1280 udelay(uap->rs485_tx_drain_interval); in pl011_rs485_tx_stop()
1284 if (port->rs485.delay_rts_after_send) in pl011_rs485_tx_stop()
1285 mdelay(port->rs485.delay_rts_after_send); in pl011_rs485_tx_stop()
1289 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) in pl011_rs485_tx_stop()
1299 uap->rs485_tx_started = false; in pl011_rs485_tx_stop()
1307 uap->im &= ~UART011_TXIM; in pl011_stop_tx()
1308 pl011_write(uap->im, uap, REG_IMSC); in pl011_stop_tx()
1311 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) in pl011_stop_tx()
1317 /* Start TX with programmed I/O only (no DMA) */
1321 uap->im |= UART011_TXIM; in pl011_start_tx_pio()
1322 pl011_write(uap->im, uap, REG_IMSC); in pl011_start_tx_pio()
1328 struct uart_port *port = &uap->port; in pl011_rs485_tx_start()
1335 /* Disable receiver if half-duplex */ in pl011_rs485_tx_start()
1336 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) in pl011_rs485_tx_start()
1339 if (port->rs485.flags & SER_RS485_RTS_ON_SEND) in pl011_rs485_tx_start()
1346 if (port->rs485.delay_rts_before_send) in pl011_rs485_tx_start()
1347 mdelay(port->rs485.delay_rts_before_send); in pl011_rs485_tx_start()
1349 uap->rs485_tx_started = true; in pl011_rs485_tx_start()
1357 if ((uap->port.rs485.flags & SER_RS485_ENABLED) && in pl011_start_tx()
1358 !uap->rs485_tx_started) in pl011_start_tx()
1370 uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM | in pl011_stop_rx()
1372 pl011_write(uap->im, uap, REG_IMSC); in pl011_stop_rx()
1391 uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM; in pl011_enable_ms()
1392 pl011_write(uap->im, uap, REG_IMSC); in pl011_enable_ms()
1396 __releases(&uap->port.lock) in pl011_rx_chars()
1397 __acquires(&uap->port.lock) in pl011_rx_chars()
1401 uart_port_unlock(&uap->port); in pl011_rx_chars()
1402 tty_flip_buffer_push(&uap->port.state->port); in pl011_rx_chars()
1404 * If we were temporarily out of DMA mode for a while, in pl011_rx_chars()
1405 * attempt to switch back to DMA mode again. in pl011_rx_chars()
1409 dev_dbg(uap->port.dev, in pl011_rx_chars()
1410 "could not trigger RX DMA job fall back to interrupt mode again\n"); in pl011_rx_chars()
1411 uap->im |= UART011_RXIM; in pl011_rx_chars()
1412 pl011_write(uap->im, uap, REG_IMSC); in pl011_rx_chars()
1415 /* Start Rx DMA poll */ in pl011_rx_chars()
1416 if (uap->dmarx.poll_rate) { in pl011_rx_chars()
1417 uap->dmarx.last_jiffies = jiffies; in pl011_rx_chars()
1418 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; in pl011_rx_chars()
1419 mod_timer(&uap->dmarx.timer, in pl011_rx_chars()
1420 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_rx_chars()
1425 uart_port_lock(&uap->port); in pl011_rx_chars()
1436 uap->port.icount.tx++; in pl011_tx_char()
1444 struct tty_port *tport = &uap->port.state->port; in pl011_tx_chars()
1445 int count = uap->fifosize >> 1; in pl011_tx_chars()
1447 if (uap->port.x_char) { in pl011_tx_chars()
1448 if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) in pl011_tx_chars()
1450 uap->port.x_char = 0; in pl011_tx_chars()
1451 --count; in pl011_tx_chars()
1453 if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&uap->port)) { in pl011_tx_chars()
1454 pl011_stop_tx(&uap->port); in pl011_tx_chars()
1458 /* If we are using DMA mode, try to send some characters. */ in pl011_tx_chars()
1465 if (likely(from_irq) && count-- == 0) in pl011_tx_chars()
1468 if (!kfifo_peek(&tport->xmit_fifo, &c)) in pl011_tx_chars()
1474 kfifo_skip(&tport->xmit_fifo); in pl011_tx_chars()
1477 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) in pl011_tx_chars()
1478 uart_write_wakeup(&uap->port); in pl011_tx_chars()
1480 if (kfifo_is_empty(&tport->xmit_fifo)) { in pl011_tx_chars()
1481 pl011_stop_tx(&uap->port); in pl011_tx_chars()
1493 delta = status ^ uap->old_status; in pl011_modem_status()
1494 uap->old_status = status; in pl011_modem_status()
1500 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); in pl011_modem_status()
1502 if (delta & uap->vendor->fr_dsr) in pl011_modem_status()
1503 uap->port.icount.dsr++; in pl011_modem_status()
1505 if (delta & uap->vendor->fr_cts) in pl011_modem_status()
1506 uart_handle_cts_change(&uap->port, in pl011_modem_status()
1507 status & uap->vendor->fr_cts); in pl011_modem_status()
1509 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); in pl011_modem_status()
1514 if (!uap->vendor->cts_event_workaround) in check_apply_cts_event_workaround()
1535 uart_port_lock(&uap->port); in pl011_int()
1536 status = pl011_read(uap, REG_RIS) & uap->im; in pl011_int()
1556 if (pass_counter-- == 0) in pl011_int()
1559 status = pl011_read(uap, REG_RIS) & uap->im; in pl011_int()
1564 uart_unlock_and_check_sysrq(&uap->port); in pl011_int()
1575 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr; in pl011_tx_empty()
1577 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? in pl011_tx_empty()
1595 pl011_maybe_set_bit(status & uap->vendor->fr_dsr, &result, TIOCM_DSR); in pl011_get_mctrl()
1596 pl011_maybe_set_bit(status & uap->vendor->fr_cts, &result, TIOCM_CTS); in pl011_get_mctrl()
1597 pl011_maybe_set_bit(status & uap->vendor->fr_ri, &result, TIOCM_RNG); in pl011_get_mctrl()
1624 if (port->status & UPSTAT_AUTORTS) { in pl011_set_mctrl()
1625 /* We need to disable auto-RTS if we want to turn RTS off */ in pl011_set_mctrl()
1639 uart_port_lock_irqsave(&uap->port, &flags); in pl011_break_ctl()
1641 if (break_state == -1) in pl011_break_ctl()
1646 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_break_ctl()
1713 pinctrl_pm_select_default_state(port->dev); in pl011_hwinit()
1718 retval = clk_prepare_enable(uap->clk); in pl011_hwinit()
1722 uap->port.uartclk = clk_get_rate(uap->clk); in pl011_hwinit()
1733 uap->im = pl011_read(uap, REG_IMSC); in pl011_hwinit()
1736 if (dev_get_platdata(uap->port.dev)) { in pl011_hwinit()
1739 plat = dev_get_platdata(uap->port.dev); in pl011_hwinit()
1740 if (plat->init) in pl011_hwinit()
1741 plat->init(); in pl011_hwinit()
1769 pl011_write(uap->im, uap, REG_IMSC); in pl011_allocate_irq()
1771 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap); in pl011_allocate_irq()
1775 * Enable interrupts, only timeouts when using DMA
1776 * if initial RX DMA job failed, start in interrupt mode
1784 uart_port_lock_irqsave(&uap->port, &flags); in pl011_enable_interrupts()
1795 for (i = 0; i < uap->fifosize * 2; ++i) { in pl011_enable_interrupts()
1802 uap->im = UART011_RTIM; in pl011_enable_interrupts()
1804 uap->im |= UART011_RXIM; in pl011_enable_interrupts()
1805 pl011_write(uap->im, uap, REG_IMSC); in pl011_enable_interrupts()
1806 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_enable_interrupts()
1814 uart_port_lock_irqsave(&uap->port, &flags); in pl011_unthrottle_rx()
1816 uap->im = UART011_RTIM; in pl011_unthrottle_rx()
1818 uap->im |= UART011_RXIM; in pl011_unthrottle_rx()
1820 pl011_write(uap->im, uap, REG_IMSC); in pl011_unthrottle_rx()
1823 if (uap->using_rx_dma) { in pl011_unthrottle_rx()
1824 uap->dmacr |= UART011_RXDMAE; in pl011_unthrottle_rx()
1825 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_unthrottle_rx()
1829 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_unthrottle_rx()
1847 pl011_write(uap->vendor->ifls, uap, REG_IFLS); in pl011_startup()
1849 uart_port_lock_irq(&uap->port); in pl011_startup()
1855 if (!(port->rs485.flags & SER_RS485_ENABLED)) in pl011_startup()
1860 uart_port_unlock_irq(&uap->port); in pl011_startup()
1865 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; in pl011_startup()
1867 /* Startup DMA */ in pl011_startup()
1875 clk_disable_unprepare(uap->clk); in pl011_startup()
1894 uap->old_status = 0; in sbsa_uart_startup()
1919 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); in pl011_disable_uart()
1920 uart_port_lock_irq(&uap->port); in pl011_disable_uart()
1925 uart_port_unlock_irq(&uap->port); in pl011_disable_uart()
1937 uart_port_lock_irq(&uap->port); in pl011_disable_interrupts()
1940 uap->im = 0; in pl011_disable_interrupts()
1941 pl011_write(uap->im, uap, REG_IMSC); in pl011_disable_interrupts()
1944 uart_port_unlock_irq(&uap->port); in pl011_disable_interrupts()
1956 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) in pl011_shutdown()
1959 free_irq(uap->port.irq, uap); in pl011_shutdown()
1966 clk_disable_unprepare(uap->clk); in pl011_shutdown()
1968 pinctrl_pm_select_sleep_state(port->dev); in pl011_shutdown()
1970 if (dev_get_platdata(uap->port.dev)) { in pl011_shutdown()
1973 plat = dev_get_platdata(uap->port.dev); in pl011_shutdown()
1974 if (plat->exit) in pl011_shutdown()
1975 plat->exit(); in pl011_shutdown()
1978 if (uap->port.ops->flush_buffer) in pl011_shutdown()
1979 uap->port.ops->flush_buffer(port); in pl011_shutdown()
1989 free_irq(uap->port.irq, uap); in sbsa_uart_shutdown()
1991 if (uap->port.ops->flush_buffer) in sbsa_uart_shutdown()
1992 uap->port.ops->flush_buffer(port); in sbsa_uart_shutdown()
1998 port->read_status_mask = UART011_DR_OE | 255; in pl011_setup_status_masks()
1999 if (termios->c_iflag & INPCK) in pl011_setup_status_masks()
2000 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; in pl011_setup_status_masks()
2001 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) in pl011_setup_status_masks()
2002 port->read_status_mask |= UART011_DR_BE; in pl011_setup_status_masks()
2007 port->ignore_status_mask = 0; in pl011_setup_status_masks()
2008 if (termios->c_iflag & IGNPAR) in pl011_setup_status_masks()
2009 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; in pl011_setup_status_masks()
2010 if (termios->c_iflag & IGNBRK) { in pl011_setup_status_masks()
2011 port->ignore_status_mask |= UART011_DR_BE; in pl011_setup_status_masks()
2016 if (termios->c_iflag & IGNPAR) in pl011_setup_status_masks()
2017 port->ignore_status_mask |= UART011_DR_OE; in pl011_setup_status_masks()
2023 if ((termios->c_cflag & CREAD) == 0) in pl011_setup_status_masks()
2024 port->ignore_status_mask |= UART_DUMMY_DR_RX; in pl011_setup_status_masks()
2038 if (uap->vendor->oversampling) in pl011_set_termios()
2047 port->uartclk / clkdiv); in pl011_set_termios()
2050 * Adjust RX DMA polling rate with baud rate if not specified. in pl011_set_termios()
2052 if (uap->dmarx.auto_poll_rate) in pl011_set_termios()
2053 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); in pl011_set_termios()
2056 if (baud > port->uartclk / 16) in pl011_set_termios()
2057 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); in pl011_set_termios()
2059 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); in pl011_set_termios()
2061 switch (termios->c_cflag & CSIZE) { in pl011_set_termios()
2075 if (termios->c_cflag & CSTOPB) in pl011_set_termios()
2077 if (termios->c_cflag & PARENB) { in pl011_set_termios()
2079 if (!(termios->c_cflag & PARODD)) in pl011_set_termios()
2081 if (termios->c_cflag & CMSPAR) in pl011_set_termios()
2084 if (uap->fifosize > 1) in pl011_set_termios()
2087 bits = tty_get_frame_size(termios->c_cflag); in pl011_set_termios()
2092 * Update the per-port timeout. in pl011_set_termios()
2094 uart_update_timeout(port, termios->c_cflag, baud); in pl011_set_termios()
2101 uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); in pl011_set_termios()
2105 if (UART_ENABLE_MS(port, termios->c_cflag)) in pl011_set_termios()
2108 if (port->rs485.flags & SER_RS485_ENABLED) in pl011_set_termios()
2109 termios->c_cflag &= ~CRTSCTS; in pl011_set_termios()
2113 if (termios->c_cflag & CRTSCTS) { in pl011_set_termios()
2118 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; in pl011_set_termios()
2121 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); in pl011_set_termios()
2124 if (uap->vendor->oversampling) { in pl011_set_termios()
2125 if (baud > port->uartclk / 16) in pl011_set_termios()
2137 if (uap->vendor->oversampling) { in pl011_set_termios()
2139 quot -= 1; in pl011_set_termios()
2141 quot -= 2; in pl011_set_termios()
2148 * ----------v----------v----------v----------v----- in pl011_set_termios()
2151 * ----------^----------^----------^----------^----- in pl011_set_termios()
2174 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud); in sbsa_uart_set_termios()
2177 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); in sbsa_uart_set_termios()
2178 termios->c_cflag &= ~(CMSPAR | CRTSCTS); in sbsa_uart_set_termios()
2179 termios->c_cflag |= CS8 | CLOCAL; in sbsa_uart_set_termios()
2182 uart_update_timeout(port, CS8, uap->fixed_baud); in sbsa_uart_set_termios()
2191 return uap->port.type == PORT_AMBA ? uap->type : NULL; in pl011_type()
2200 port->type = PORT_AMBA; in pl011_config_port()
2210 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) in pl011_verify_port()
2211 ret = -EINVAL; in pl011_verify_port()
2212 if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) in pl011_verify_port()
2213 ret = -EINVAL; in pl011_verify_port()
2214 if (ser->baud_base < 9600) in pl011_verify_port()
2215 ret = -EINVAL; in pl011_verify_port()
2216 if (port->mapbase != (unsigned long)ser->iomem_base) in pl011_verify_port()
2217 ret = -EINVAL; in pl011_verify_port()
2227 if (port->rs485.flags & SER_RS485_ENABLED) in pl011_rs485_config()
2231 if (rs485->flags & SER_RS485_ENABLED) { in pl011_rs485_config()
2236 port->status &= ~UPSTAT_AUTORTS; in pl011_rs485_config()
2313 struct uart_amba_port *uap = amba_ports[co->index]; in pl011_console_write()
2318 clk_enable(uap->clk); in pl011_console_write()
2321 locked = uart_port_trylock_irqsave(&uap->port, &flags); in pl011_console_write()
2323 uart_port_lock_irqsave(&uap->port, &flags); in pl011_console_write()
2328 if (!uap->vendor->always_enabled) { in pl011_console_write()
2335 uart_console_write(&uap->port, s, count, pl011_console_putchar); in pl011_console_write()
2342 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) in pl011_console_write()
2343 & uap->vendor->fr_busy) in pl011_console_write()
2345 if (!uap->vendor->always_enabled) in pl011_console_write()
2349 uart_port_unlock_irqrestore(&uap->port, flags); in pl011_console_write()
2351 clk_disable(uap->clk); in pl011_console_write()
2380 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); in pl011_console_get_options()
2382 if (uap->vendor->oversampling && in pl011_console_get_options()
2397 * Check whether an invalid uart number has been specified, and in pl011_console_setup()
2401 if (co->index >= UART_NR) in pl011_console_setup()
2402 co->index = 0; in pl011_console_setup()
2403 uap = amba_ports[co->index]; in pl011_console_setup()
2405 return -ENODEV; in pl011_console_setup()
2408 pinctrl_pm_select_default_state(uap->port.dev); in pl011_console_setup()
2410 ret = clk_prepare(uap->clk); in pl011_console_setup()
2414 if (dev_get_platdata(uap->port.dev)) { in pl011_console_setup()
2417 plat = dev_get_platdata(uap->port.dev); in pl011_console_setup()
2418 if (plat->init) in pl011_console_setup()
2419 plat->init(); in pl011_console_setup()
2422 uap->port.uartclk = clk_get_rate(uap->clk); in pl011_console_setup()
2424 if (uap->vendor->fixed_options) { in pl011_console_setup()
2425 baud = uap->fixed_baud; in pl011_console_setup()
2434 return uart_set_options(&uap->port, co, baud, parity, bits, flow); in pl011_console_setup()
2438 * pl011_console_match - non-standard console matching
2453 * Returns 0 if console matches; otherwise non-zero to use default matching
2469 return -ENODEV; in pl011_console_match()
2472 return -ENODEV; in pl011_console_match()
2475 return -ENODEV; in pl011_console_match()
2484 port = &amba_ports[i]->port; in pl011_console_match()
2486 if (port->mapbase != addr) in pl011_console_match()
2489 co->index = i; in pl011_console_match()
2494 return -ENODEV; in pl011_console_match()
2505 .index = -1,
2513 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) in qdf2400_e44_putc()
2515 writel(c, port->membase + UART01x_DR); in qdf2400_e44_putc()
2516 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) in qdf2400_e44_putc()
2522 struct earlycon_device *dev = con->data; in qdf2400_e44_early_write()
2524 uart_console_write(&dev->port, s, n, qdf2400_e44_putc); in qdf2400_e44_early_write()
2529 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) in pl011_putc()
2531 if (port->iotype == UPIO_MEM32) in pl011_putc()
2532 writel(c, port->membase + UART01x_DR); in pl011_putc()
2534 writeb(c, port->membase + UART01x_DR); in pl011_putc()
2535 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) in pl011_putc()
2541 struct earlycon_device *dev = con->data; in pl011_early_write()
2543 uart_console_write(&dev->port, s, n, pl011_putc); in pl011_early_write()
2549 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) in pl011_getc()
2552 if (port->iotype == UPIO_MEM32) in pl011_getc()
2553 return readl(port->membase + UART01x_DR); in pl011_getc()
2555 return readb(port->membase + UART01x_DR); in pl011_getc()
2560 struct earlycon_device *dev = con->data; in pl011_early_read()
2564 ch = pl011_getc(&dev->port); in pl011_early_read()
2578 * On non-ACPI systems, earlycon is enabled by specifying
2592 if (!device->port.membase) in pl011_early_console_setup()
2593 return -ENODEV; in pl011_early_console_setup()
2595 device->con->write = pl011_early_write; in pl011_early_console_setup()
2596 device->con->read = pl011_early_read; in pl011_early_console_setup()
2603 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2612 * case, the SPCR code will detect the need for the E44 work-around,
2619 if (!device->port.membase) in qdf2400_e44_early_console_setup()
2620 return -ENODEV; in qdf2400_e44_early_console_setup()
2622 device->con->write = qdf2400_e44_early_write; in qdf2400_e44_early_console_setup()
2652 np = dev->of_node; in pl011_probe_dt_alias()
2669 …dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeratio… in pl011_probe_dt_alias()
2699 return -EBUSY; in pl011_find_free_port()
2714 uap->port.dev = dev; in pl011_setup_port()
2715 uap->port.mapbase = mmiobase->start; in pl011_setup_port()
2716 uap->port.membase = base; in pl011_setup_port()
2717 uap->port.fifosize = uap->fifosize; in pl011_setup_port()
2718 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); in pl011_setup_port()
2719 uap->port.flags = UPF_BOOT_AUTOCONF; in pl011_setup_port()
2720 uap->port.line = index; in pl011_setup_port()
2722 ret = uart_get_rs485_mode(&uap->port); in pl011_setup_port()
2742 dev_err(uap->port.dev, in pl011_register_port()
2743 "Failed to register AMBA-PL011 driver\n"); in pl011_register_port()
2751 ret = uart_add_one_port(&amba_reg, &uap->port); in pl011_register_port()
2768 struct vendor_data *vendor = id->data; in pl011_probe()
2776 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), in pl011_probe()
2779 return -ENOMEM; in pl011_probe()
2781 uap->clk = devm_clk_get(&dev->dev, NULL); in pl011_probe()
2782 if (IS_ERR(uap->clk)) in pl011_probe()
2783 return PTR_ERR(uap->clk); in pl011_probe()
2785 uap->reg_offset = vendor->reg_offset; in pl011_probe()
2786 uap->vendor = vendor; in pl011_probe()
2787 uap->fifosize = vendor->get_fifosize(dev); in pl011_probe()
2788 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; in pl011_probe()
2789 uap->port.irq = dev->irq[0]; in pl011_probe()
2790 uap->port.ops = &amba_pl011_pops; in pl011_probe()
2791 uap->port.rs485_config = pl011_rs485_config; in pl011_probe()
2792 uap->port.rs485_supported = pl011_rs485_supported; in pl011_probe()
2793 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); in pl011_probe()
2795 if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) { in pl011_probe()
2798 uap->port.iotype = UPIO_MEM; in pl011_probe()
2801 uap->port.iotype = UPIO_MEM32; in pl011_probe()
2804 dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n", in pl011_probe()
2806 return -EINVAL; in pl011_probe()
2810 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); in pl011_probe()
2823 uart_remove_one_port(&amba_reg, &uap->port); in pl011_remove()
2833 return -EINVAL; in pl011_suspend()
2835 return uart_suspend_port(&amba_reg, &uap->port); in pl011_suspend()
2843 return -EINVAL; in pl011_resume()
2845 return uart_resume_port(&amba_reg, &uap->port); in pl011_resume()
2859 uap->vendor = &vendor_qdt_qdf2400_e44; in qpdf2400_erratum44_workaround()
2878 if (pdev->dev.of_node) { in sbsa_uart_probe()
2879 struct device_node *np = pdev->dev.of_node; in sbsa_uart_probe()
2881 ret = of_property_read_u32(np, "current-speed", &baudrate); in sbsa_uart_probe()
2892 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port), in sbsa_uart_probe()
2895 return -ENOMEM; in sbsa_uart_probe()
2900 uap->port.irq = ret; in sbsa_uart_probe()
2902 uap->vendor = &vendor_sbsa; in sbsa_uart_probe()
2903 qpdf2400_erratum44_workaround(&pdev->dev, uap); in sbsa_uart_probe()
2905 uap->reg_offset = uap->vendor->reg_offset; in sbsa_uart_probe()
2906 uap->fifosize = 32; in sbsa_uart_probe()
2907 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; in sbsa_uart_probe()
2908 uap->port.ops = &sbsa_uart_pops; in sbsa_uart_probe()
2909 uap->fixed_baud = baudrate; in sbsa_uart_probe()
2911 snprintf(uap->type, sizeof(uap->type), "SBSA"); in sbsa_uart_probe()
2915 ret = pl011_setup_port(&pdev->dev, uap, r, portnr); in sbsa_uart_probe()
2928 uart_remove_one_port(&amba_reg, &uap->port); in sbsa_uart_remove()
2933 { .compatible = "arm,sbsa-uart", },
2949 .name = "sbsa-uart",
2975 .name = "uart-pl011",
3000 * While this can be a module, if builtin it's most likely the console