spi-geni-qcom.c (5c68005083d620b1499fc81926a514d39ae8b88c) spi-geni-qcom.c (3a76c7ca9e77269dd10cf21465a055274cfa40c6)
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
3
4#include <linux/clk.h>
5#include <linux/dmaengine.h>
6#include <linux/dma-mapping.h>
7#include <linux/dma/qcom-gpi-dma.h>
8#include <linux/interrupt.h>

--- 83 unchanged lines hidden (view full) ---

92 unsigned int oversampling;
93 spinlock_t lock;
94 int irq;
95 bool cs_flag;
96 bool abort_failed;
97 struct dma_chan *tx;
98 struct dma_chan *rx;
99 int cur_xfer_mode;
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
3
4#include <linux/clk.h>
5#include <linux/dmaengine.h>
6#include <linux/dma-mapping.h>
7#include <linux/dma/qcom-gpi-dma.h>
8#include <linux/interrupt.h>

--- 83 unchanged lines hidden (view full) ---

92 unsigned int oversampling;
93 spinlock_t lock;
94 int irq;
95 bool cs_flag;
96 bool abort_failed;
97 struct dma_chan *tx;
98 struct dma_chan *rx;
99 int cur_xfer_mode;
100 dma_addr_t tx_se_dma;
101 dma_addr_t rx_se_dma;
102};
103
104static int get_spi_clk_cfg(unsigned int speed_hz,
105 struct spi_geni_master *mas,
106 unsigned int *clk_idx,
107 unsigned int *clk_div)
108{
109 unsigned long sclk_freq;

--- 59 unchanged lines hidden (view full) ---

169 * access this from an interrupt.
170 */
171 mas->abort_failed = true;
172 }
173
174unmap_if_dma:
175 if (mas->cur_xfer_mode == GENI_SE_DMA) {
176 if (xfer) {
100};
101
102static int get_spi_clk_cfg(unsigned int speed_hz,
103 struct spi_geni_master *mas,
104 unsigned int *clk_idx,
105 unsigned int *clk_div)
106{
107 unsigned long sclk_freq;

--- 59 unchanged lines hidden (view full) ---

167 * access this from an interrupt.
168 */
169 mas->abort_failed = true;
170 }
171
172unmap_if_dma:
173 if (mas->cur_xfer_mode == GENI_SE_DMA) {
174 if (xfer) {
177 if (xfer->tx_buf && mas->tx_se_dma) {
175 if (xfer->tx_buf) {
178 spin_lock_irq(&mas->lock);
179 reinit_completion(&mas->tx_reset_done);
180 writel(1, se->base + SE_DMA_TX_FSM_RST);
181 spin_unlock_irq(&mas->lock);
182 time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
183 if (!time_left)
184 dev_err(mas->dev, "DMA TX RESET failed\n");
176 spin_lock_irq(&mas->lock);
177 reinit_completion(&mas->tx_reset_done);
178 writel(1, se->base + SE_DMA_TX_FSM_RST);
179 spin_unlock_irq(&mas->lock);
180 time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
181 if (!time_left)
182 dev_err(mas->dev, "DMA TX RESET failed\n");
185 geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
186 }
183 }
187 if (xfer->rx_buf && mas->rx_se_dma) {
184 if (xfer->rx_buf) {
188 spin_lock_irq(&mas->lock);
189 reinit_completion(&mas->rx_reset_done);
190 writel(1, se->base + SE_DMA_RX_FSM_RST);
191 spin_unlock_irq(&mas->lock);
192 time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
193 if (!time_left)
194 dev_err(mas->dev, "DMA RX RESET failed\n");
185 spin_lock_irq(&mas->lock);
186 reinit_completion(&mas->rx_reset_done);
187 writel(1, se->base + SE_DMA_RX_FSM_RST);
188 spin_unlock_irq(&mas->lock);
189 time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
190 if (!time_left)
191 dev_err(mas->dev, "DMA RX RESET failed\n");
195 geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
196 }
197 } else {
198 /*
199 * This can happen if a timeout happened and we had to wait
200 * for lock in this function because isr was holding the lock
201 * and handling transfer completion at that time.
202 */
203 dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");

--- 314 unchanged lines hidden (view full) ---

518
519 if (peripheral.cmd & SPI_RX)
520 dma_async_issue_pending(mas->rx);
521
522 dma_async_issue_pending(mas->tx);
523 return 1;
524}
525
192 }
193 } else {
194 /*
195 * This can happen if a timeout happened and we had to wait
196 * for lock in this function because isr was holding the lock
197 * and handling transfer completion at that time.
198 */
199 dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");

--- 314 unchanged lines hidden (view full) ---

514
515 if (peripheral.cmd & SPI_RX)
516 dma_async_issue_pending(mas->rx);
517
518 dma_async_issue_pending(mas->tx);
519 return 1;
520}
521
522static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
523 struct spi_geni_master *mas)
524{
525 u32 len;
526
527 if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
528 len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
529 else
530 len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
531 len &= TRANS_LEN_MSK;
532
533 return len;
534}
535
526static bool geni_can_dma(struct spi_controller *ctlr,
527 struct spi_device *slv, struct spi_transfer *xfer)
528{
529 struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
536static bool geni_can_dma(struct spi_controller *ctlr,
537 struct spi_device *slv, struct spi_transfer *xfer)
538{
539 struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
540 u32 len, fifo_size;
530
541
531 /*
532 * Return true if transfer needs to be mapped prior to
533 * calling transfer_one which is the case only for GPI_DMA.
534 * For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
535 */
536 return mas->cur_xfer_mode == GENI_GPI_DMA;
542 if (mas->cur_xfer_mode == GENI_GPI_DMA)
543 return true;
544
545 len = get_xfer_len_in_words(xfer, mas);
546 fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
547
548 if (len > fifo_size)
549 return true;
550 else
551 return false;
537}
538
539static int spi_geni_prepare_message(struct spi_master *spi,
540 struct spi_message *spi_msg)
541{
542 struct spi_geni_master *mas = spi_master_get_devdata(spi);
543 int ret;
544

--- 222 unchanged lines hidden (view full) ---

767 mas->rx_rem_bytes -= rx_bytes;
768}
769
770static int setup_se_xfer(struct spi_transfer *xfer,
771 struct spi_geni_master *mas,
772 u16 mode, struct spi_master *spi)
773{
774 u32 m_cmd = 0;
552}
553
554static int spi_geni_prepare_message(struct spi_master *spi,
555 struct spi_message *spi_msg)
556{
557 struct spi_geni_master *mas = spi_master_get_devdata(spi);
558 int ret;
559

--- 222 unchanged lines hidden (view full) ---

782 mas->rx_rem_bytes -= rx_bytes;
783}
784
785static int setup_se_xfer(struct spi_transfer *xfer,
786 struct spi_geni_master *mas,
787 u16 mode, struct spi_master *spi)
788{
789 u32 m_cmd = 0;
775 u32 len, fifo_size;
790 u32 len;
776 struct geni_se *se = &mas->se;
777 int ret;
778
779 /*
780 * Ensure that our interrupt handler isn't still running from some
781 * prior command before we start messing with the hardware behind
782 * its back. We don't need to _keep_ the lock here since we're only
783 * worried about racing with out interrupt handler. The SPI core

--- 15 unchanged lines hidden (view full) ---

799 /* Speed and bits per word can be overridden per transfer */
800 ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
801 if (ret)
802 return ret;
803
804 mas->tx_rem_bytes = 0;
805 mas->rx_rem_bytes = 0;
806
791 struct geni_se *se = &mas->se;
792 int ret;
793
794 /*
795 * Ensure that our interrupt handler isn't still running from some
796 * prior command before we start messing with the hardware behind
797 * its back. We don't need to _keep_ the lock here since we're only
798 * worried about racing with out interrupt handler. The SPI core

--- 15 unchanged lines hidden (view full) ---

814 /* Speed and bits per word can be overridden per transfer */
815 ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
816 if (ret)
817 return ret;
818
819 mas->tx_rem_bytes = 0;
820 mas->rx_rem_bytes = 0;
821
807 if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
808 len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
809 else
810 len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
811 len &= TRANS_LEN_MSK;
822 len = get_xfer_len_in_words(xfer, mas);
812
813 mas->cur_xfer = xfer;
814 if (xfer->tx_buf) {
815 m_cmd |= SPI_TX_ONLY;
816 mas->tx_rem_bytes = xfer->len;
817 writel(len, se->base + SE_SPI_TX_TRANS_LEN);
818 }
819
820 if (xfer->rx_buf) {
821 m_cmd |= SPI_RX_ONLY;
822 writel(len, se->base + SE_SPI_RX_TRANS_LEN);
823 mas->rx_rem_bytes = xfer->len;
824 }
825
823
824 mas->cur_xfer = xfer;
825 if (xfer->tx_buf) {
826 m_cmd |= SPI_TX_ONLY;
827 mas->tx_rem_bytes = xfer->len;
828 writel(len, se->base + SE_SPI_TX_TRANS_LEN);
829 }
830
831 if (xfer->rx_buf) {
832 m_cmd |= SPI_RX_ONLY;
833 writel(len, se->base + SE_SPI_RX_TRANS_LEN);
834 mas->rx_rem_bytes = xfer->len;
835 }
836
826 /* Select transfer mode based on transfer length */
827 fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
828 mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
837 /*
838 * Select DMA mode if sgt are present; and with only 1 entry
839 * This is not a serious limitation because the xfer buffers are
840 * expected to fit into in 1 entry almost always, and if any
841 * doesn't for any reason we fall back to FIFO mode anyway
842 */
843 if (!xfer->tx_sg.nents && !xfer->rx_sg.nents)
844 mas->cur_xfer_mode = GENI_SE_FIFO;
845 else if (xfer->tx_sg.nents > 1 || xfer->rx_sg.nents > 1) {
846 dev_warn_once(mas->dev, "Doing FIFO, cannot handle tx_nents-%d, rx_nents-%d\n",
847 xfer->tx_sg.nents, xfer->rx_sg.nents);
848 mas->cur_xfer_mode = GENI_SE_FIFO;
849 } else
850 mas->cur_xfer_mode = GENI_SE_DMA;
829 geni_se_select_mode(se, mas->cur_xfer_mode);
830
831 /*
832 * Lock around right before we start the transfer since our
833 * interrupt could come in at any time now.
834 */
835 spin_lock_irq(&mas->lock);
836 geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
837
838 if (mas->cur_xfer_mode == GENI_SE_DMA) {
851 geni_se_select_mode(se, mas->cur_xfer_mode);
852
853 /*
854 * Lock around right before we start the transfer since our
855 * interrupt could come in at any time now.
856 */
857 spin_lock_irq(&mas->lock);
858 geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
859
860 if (mas->cur_xfer_mode == GENI_SE_DMA) {
839 if (m_cmd & SPI_RX_ONLY) {
840 ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
841 xfer->len, &mas->rx_se_dma);
842 if (ret) {
843 dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
844 mas->rx_se_dma = 0;
845 goto unlock_and_return;
846 }
847 }
848 if (m_cmd & SPI_TX_ONLY) {
849 ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
850 xfer->len, &mas->tx_se_dma);
851 if (ret) {
852 dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
853 mas->tx_se_dma = 0;
854 if (m_cmd & SPI_RX_ONLY) {
855 /* Unmap rx buffer if duplex transfer */
856 geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
857 mas->rx_se_dma = 0;
858 }
859 goto unlock_and_return;
860 }
861 }
861 if (m_cmd & SPI_RX_ONLY)
862 geni_se_rx_init_dma(se, sg_dma_address(xfer->rx_sg.sgl),
863 sg_dma_len(xfer->rx_sg.sgl));
864 if (m_cmd & SPI_TX_ONLY)
865 geni_se_tx_init_dma(se, sg_dma_address(xfer->tx_sg.sgl),
866 sg_dma_len(xfer->tx_sg.sgl));
862 } else if (m_cmd & SPI_TX_ONLY) {
863 if (geni_spi_handle_tx(mas))
864 writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
865 }
866
867 } else if (m_cmd & SPI_TX_ONLY) {
868 if (geni_spi_handle_tx(mas))
869 writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
870 }
871
867unlock_and_return:
868 spin_unlock_irq(&mas->lock);
869 return ret;
870}
871
872static int spi_geni_transfer_one(struct spi_master *spi,
873 struct spi_device *slv,
874 struct spi_transfer *xfer)
875{

--- 84 unchanged lines hidden (view full) ---

960 mas->tx_rem_bytes = 0;
961 if (dma_rx_status & RX_DMA_DONE)
962 mas->rx_rem_bytes = 0;
963 if (dma_tx_status & TX_RESET_DONE)
964 complete(&mas->tx_reset_done);
965 if (dma_rx_status & RX_RESET_DONE)
966 complete(&mas->rx_reset_done);
967 if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
872 spin_unlock_irq(&mas->lock);
873 return ret;
874}
875
876static int spi_geni_transfer_one(struct spi_master *spi,
877 struct spi_device *slv,
878 struct spi_transfer *xfer)
879{

--- 84 unchanged lines hidden (view full) ---

964 mas->tx_rem_bytes = 0;
965 if (dma_rx_status & RX_DMA_DONE)
966 mas->rx_rem_bytes = 0;
967 if (dma_tx_status & TX_RESET_DONE)
968 complete(&mas->tx_reset_done);
969 if (dma_rx_status & RX_RESET_DONE)
970 complete(&mas->rx_reset_done);
971 if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
968 if (xfer->tx_buf && mas->tx_se_dma) {
969 geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
970 mas->tx_se_dma = 0;
971 }
972 if (xfer->rx_buf && mas->rx_se_dma) {
973 geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
974 mas->rx_se_dma = 0;
975 }
976 spi_finalize_current_transfer(spi);
977 mas->cur_xfer = NULL;
978 }
979 }
980
981 if (m_irq & M_CMD_CANCEL_EN)
982 complete(&mas->cancel_done);
983 if (m_irq & M_CMD_ABORT_EN)

--- 68 unchanged lines hidden (view full) ---

1052 }
1053
1054 spi->bus_num = -1;
1055 spi->dev.of_node = dev->of_node;
1056 spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
1057 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1058 spi->num_chipselect = 4;
1059 spi->max_speed_hz = 50000000;
972 spi_finalize_current_transfer(spi);
973 mas->cur_xfer = NULL;
974 }
975 }
976
977 if (m_irq & M_CMD_CANCEL_EN)
978 complete(&mas->cancel_done);
979 if (m_irq & M_CMD_ABORT_EN)

--- 68 unchanged lines hidden (view full) ---

1048 }
1049
1050 spi->bus_num = -1;
1051 spi->dev.of_node = dev->of_node;
1052 spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
1053 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1054 spi->num_chipselect = 4;
1055 spi->max_speed_hz = 50000000;
1056 spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
1060 spi->prepare_message = spi_geni_prepare_message;
1061 spi->transfer_one = spi_geni_transfer_one;
1062 spi->can_dma = geni_can_dma;
1063 spi->dma_map_dev = dev->parent;
1064 spi->auto_runtime_pm = true;
1065 spi->handle_err = spi_geni_handle_err;
1066 spi->use_gpio_descriptors = true;
1067

--- 155 unchanged lines hidden ---
1057 spi->prepare_message = spi_geni_prepare_message;
1058 spi->transfer_one = spi_geni_transfer_one;
1059 spi->can_dma = geni_can_dma;
1060 spi->dma_map_dev = dev->parent;
1061 spi->auto_runtime_pm = true;
1062 spi->handle_err = spi_geni_handle_err;
1063 spi->use_gpio_descriptors = true;
1064

--- 155 unchanged lines hidden ---