1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * PXA2xx SPI DMA engine support. 4 * 5 * Copyright (C) 2013, 2021 Intel Corporation 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/dev_printk.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/errno.h> 14 #include <linux/irqreturn.h> 15 #include <linux/scatterlist.h> 16 #include <linux/string.h> 17 #include <linux/types.h> 18 19 #include <linux/spi/spi.h> 20 21 #include "spi-pxa2xx.h" 22 23 struct device; 24 25 static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, 26 bool error) 27 { 28 struct spi_message *msg = drv_data->controller->cur_msg; 29 30 /* 31 * It is possible that one CPU is handling ROR interrupt and other 32 * just gets DMA completion. Calling pump_transfers() twice for the 33 * same transfer leads to problems thus we prevent concurrent calls 34 * by using dma_running. 35 */ 36 if (atomic_dec_and_test(&drv_data->dma_running)) { 37 /* 38 * If the other CPU is still handling the ROR interrupt we 39 * might not know about the error yet. So we re-check the 40 * ROR bit here before we clear the status register. 41 */ 42 if (!error) 43 error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR; 44 45 /* Clear status & disable interrupts */ 46 clear_SSCR1_bits(drv_data, drv_data->dma_cr1); 47 write_SSSR_CS(drv_data, drv_data->clear_sr); 48 if (!pxa25x_ssp_comp(drv_data)) 49 pxa2xx_spi_write(drv_data, SSTO, 0); 50 51 if (error) { 52 /* In case we got an error we disable the SSP now */ 53 pxa_ssp_disable(drv_data->ssp); 54 msg->status = -EIO; 55 } 56 57 spi_finalize_current_transfer(drv_data->controller); 58 } 59 } 60 61 static void pxa2xx_spi_dma_callback(void *data) 62 { 63 pxa2xx_spi_dma_transfer_complete(data, false); 64 } 65 66 static struct dma_async_tx_descriptor * 67 pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, 68 enum dma_transfer_direction dir, 69 struct spi_transfer *xfer) 70 { 71 enum dma_slave_buswidth width; 72 struct dma_slave_config cfg; 73 struct dma_chan *chan; 74 struct sg_table *sgt; 75 int ret; 76 77 switch (drv_data->n_bytes) { 78 case 1: 79 width = DMA_SLAVE_BUSWIDTH_1_BYTE; 80 break; 81 case 2: 82 width = DMA_SLAVE_BUSWIDTH_2_BYTES; 83 break; 84 default: 85 width = DMA_SLAVE_BUSWIDTH_4_BYTES; 86 break; 87 } 88 89 memset(&cfg, 0, sizeof(cfg)); 90 cfg.direction = dir; 91 92 if (dir == DMA_MEM_TO_DEV) { 93 cfg.dst_addr = drv_data->ssp->phys_base + SSDR; 94 cfg.dst_addr_width = width; 95 cfg.dst_maxburst = drv_data->controller_info->dma_burst_size; 96 97 sgt = &xfer->tx_sg; 98 chan = drv_data->controller->dma_tx; 99 } else { 100 cfg.src_addr = drv_data->ssp->phys_base + SSDR; 101 cfg.src_addr_width = width; 102 cfg.src_maxburst = drv_data->controller_info->dma_burst_size; 103 104 sgt = &xfer->rx_sg; 105 chan = drv_data->controller->dma_rx; 106 } 107 108 ret = dmaengine_slave_config(chan, &cfg); 109 if (ret) { 110 dev_warn(drv_data->ssp->dev, "DMA slave config failed\n"); 111 return NULL; 112 } 113 114 return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir, 115 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 116 } 117 118 irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) 119 { 120 u32 status; 121 122 status = read_SSSR_bits(drv_data, drv_data->mask_sr); 123 if (status & SSSR_ROR) { 124 dev_err(drv_data->ssp->dev, "FIFO overrun\n"); 125 126 dmaengine_terminate_async(drv_data->controller->dma_rx); 127 dmaengine_terminate_async(drv_data->controller->dma_tx); 128 129 pxa2xx_spi_dma_transfer_complete(drv_data, true); 130 return IRQ_HANDLED; 131 } 132 133 return IRQ_NONE; 134 } 135 136 int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, 137 struct spi_transfer *xfer) 138 { 139 struct dma_async_tx_descriptor *tx_desc, *rx_desc; 140 int err; 141 142 tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer); 143 if (!tx_desc) { 144 dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n"); 145 err = -EBUSY; 146 goto err_tx; 147 } 148 149 rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer); 150 if (!rx_desc) { 151 dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n"); 152 err = -EBUSY; 153 goto err_rx; 154 } 155 156 /* We are ready when RX completes */ 157 rx_desc->callback = pxa2xx_spi_dma_callback; 158 rx_desc->callback_param = drv_data; 159 160 dmaengine_submit(rx_desc); 161 dmaengine_submit(tx_desc); 162 return 0; 163 164 err_rx: 165 dmaengine_terminate_async(drv_data->controller->dma_tx); 166 err_tx: 167 return err; 168 } 169 170 void pxa2xx_spi_dma_start(struct driver_data *drv_data) 171 { 172 dma_async_issue_pending(drv_data->controller->dma_rx); 173 dma_async_issue_pending(drv_data->controller->dma_tx); 174 175 atomic_set(&drv_data->dma_running, 1); 176 } 177 178 void pxa2xx_spi_dma_stop(struct driver_data *drv_data) 179 { 180 atomic_set(&drv_data->dma_running, 0); 181 dmaengine_terminate_sync(drv_data->controller->dma_rx); 182 dmaengine_terminate_sync(drv_data->controller->dma_tx); 183 } 184 185 int pxa2xx_spi_dma_setup(struct driver_data *drv_data) 186 { 187 struct pxa2xx_spi_controller *pdata = drv_data->controller_info; 188 struct spi_controller *controller = drv_data->controller; 189 struct device *dev = drv_data->ssp->dev; 190 dma_cap_mask_t mask; 191 192 dma_cap_zero(mask); 193 dma_cap_set(DMA_SLAVE, mask); 194 195 controller->dma_tx = dma_request_slave_channel_compat(mask, 196 pdata->dma_filter, pdata->tx_param, dev, "tx"); 197 if (!controller->dma_tx) 198 return -ENODEV; 199 200 controller->dma_rx = dma_request_slave_channel_compat(mask, 201 pdata->dma_filter, pdata->rx_param, dev, "rx"); 202 if (!controller->dma_rx) { 203 dma_release_channel(controller->dma_tx); 204 controller->dma_tx = NULL; 205 return -ENODEV; 206 } 207 208 return 0; 209 } 210 211 void pxa2xx_spi_dma_release(struct driver_data *drv_data) 212 { 213 struct spi_controller *controller = drv_data->controller; 214 215 if (controller->dma_rx) { 216 dmaengine_terminate_sync(controller->dma_rx); 217 dma_release_channel(controller->dma_rx); 218 controller->dma_rx = NULL; 219 } 220 if (controller->dma_tx) { 221 dmaengine_terminate_sync(controller->dma_tx); 222 dma_release_channel(controller->dma_tx); 223 controller->dma_tx = NULL; 224 } 225 } 226