1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright 2013 Freescale Semiconductor, Inc.
4 // Copyright 2020-2025 NXP
5 //
6 // Freescale DSPI driver
7 // This file contains a driver for the Freescale DSPI
8
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/regmap.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-fsl-dspi.h>
22
23 #define DRIVER_NAME "fsl-dspi"
24
25 #define SPI_MCR 0x00
26 #define SPI_MCR_HOST BIT(31)
27 #define SPI_MCR_PCSIS(x) ((x) << 16)
28 #define SPI_MCR_CLR_TXF BIT(11)
29 #define SPI_MCR_CLR_RXF BIT(10)
30 #define SPI_MCR_XSPI BIT(3)
31 #define SPI_MCR_DIS_TXF BIT(13)
32 #define SPI_MCR_DIS_RXF BIT(12)
33 #define SPI_MCR_HALT BIT(0)
34
35 #define SPI_TCR 0x08
36 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
37
38 #define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
39 #define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
40 #define SPI_CTAR_CPOL BIT(26)
41 #define SPI_CTAR_CPHA BIT(25)
42 #define SPI_CTAR_LSBFE BIT(24)
43 #define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
44 #define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
45 #define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
46 #define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
47 #define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
48 #define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
49 #define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
50 #define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
51 #define SPI_CTAR_SCALE_BITS 0xf
52
53 #define SPI_CTAR0_SLAVE 0x0c
54
55 #define SPI_SR 0x2c
56 #define SPI_SR_TCFQF BIT(31)
57 #define SPI_SR_TFUF BIT(27)
58 #define SPI_SR_TFFF BIT(25)
59 #define SPI_SR_CMDTCF BIT(23)
60 #define SPI_SR_SPEF BIT(21)
61 #define SPI_SR_RFOF BIT(19)
62 #define SPI_SR_TFIWF BIT(18)
63 #define SPI_SR_RFDF BIT(17)
64 #define SPI_SR_CMDFFF BIT(16)
65 #define SPI_SR_TXRXS BIT(30)
66 #define SPI_SR_CLEAR (SPI_SR_TCFQF | \
67 SPI_SR_TFUF | SPI_SR_TFFF | \
68 SPI_SR_CMDTCF | SPI_SR_SPEF | \
69 SPI_SR_RFOF | SPI_SR_TFIWF | \
70 SPI_SR_RFDF | SPI_SR_CMDFFF)
71
72 #define SPI_RSER_TFFFE BIT(25)
73 #define SPI_RSER_TFFFD BIT(24)
74 #define SPI_RSER_RFDFE BIT(17)
75 #define SPI_RSER_RFDFD BIT(16)
76
77 #define SPI_RSER 0x30
78 #define SPI_RSER_TCFQE BIT(31)
79 #define SPI_RSER_CMDTCFE BIT(23)
80
81 #define SPI_PUSHR 0x34
82 #define SPI_PUSHR_CMD_CONT BIT(15)
83 #define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
84 #define SPI_PUSHR_CMD_EOQ BIT(11)
85 #define SPI_PUSHR_CMD_CTCNT BIT(10)
86 #define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
87
88 #define SPI_PUSHR_SLAVE 0x34
89
90 #define SPI_POPR 0x38
91
92 #define SPI_TXFR0 0x3c
93 #define SPI_TXFR1 0x40
94 #define SPI_TXFR2 0x44
95 #define SPI_TXFR3 0x48
96 #define SPI_RXFR0 0x7c
97 #define SPI_RXFR1 0x80
98 #define SPI_RXFR2 0x84
99 #define SPI_RXFR3 0x88
100
101 #define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
102 #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
103 #define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
104
105 #define SPI_SREX 0x13c
106
107 #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
108 #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
109
110 #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
111
112 struct chip_data {
113 u32 ctar_val;
114 };
115
116 enum dspi_trans_mode {
117 DSPI_XSPI_MODE,
118 DSPI_DMA_MODE,
119 };
120
121 struct fsl_dspi_devtype_data {
122 enum dspi_trans_mode trans_mode;
123 u8 max_clock_factor;
124 int fifo_size;
125 };
126
127 enum {
128 LS1021A,
129 LS1012A,
130 LS1028A,
131 LS1043A,
132 LS1046A,
133 LS2080A,
134 LS2085A,
135 LX2160A,
136 MCF5441X,
137 VF610,
138 };
139
140 static const struct fsl_dspi_devtype_data devtype_data[] = {
141 [VF610] = {
142 .trans_mode = DSPI_DMA_MODE,
143 .max_clock_factor = 2,
144 .fifo_size = 4,
145 },
146 [LS1021A] = {
147 /* Has A-011218 DMA erratum */
148 .trans_mode = DSPI_XSPI_MODE,
149 .max_clock_factor = 8,
150 .fifo_size = 4,
151 },
152 [LS1012A] = {
153 /* Has A-011218 DMA erratum */
154 .trans_mode = DSPI_XSPI_MODE,
155 .max_clock_factor = 8,
156 .fifo_size = 16,
157 },
158 [LS1028A] = {
159 .trans_mode = DSPI_XSPI_MODE,
160 .max_clock_factor = 8,
161 .fifo_size = 4,
162 },
163 [LS1043A] = {
164 /* Has A-011218 DMA erratum */
165 .trans_mode = DSPI_XSPI_MODE,
166 .max_clock_factor = 8,
167 .fifo_size = 16,
168 },
169 [LS1046A] = {
170 /* Has A-011218 DMA erratum */
171 .trans_mode = DSPI_XSPI_MODE,
172 .max_clock_factor = 8,
173 .fifo_size = 16,
174 },
175 [LS2080A] = {
176 .trans_mode = DSPI_XSPI_MODE,
177 .max_clock_factor = 8,
178 .fifo_size = 4,
179 },
180 [LS2085A] = {
181 .trans_mode = DSPI_XSPI_MODE,
182 .max_clock_factor = 8,
183 .fifo_size = 4,
184 },
185 [LX2160A] = {
186 .trans_mode = DSPI_XSPI_MODE,
187 .max_clock_factor = 8,
188 .fifo_size = 4,
189 },
190 [MCF5441X] = {
191 .trans_mode = DSPI_DMA_MODE,
192 .max_clock_factor = 8,
193 .fifo_size = 16,
194 },
195 };
196
197 struct fsl_dspi_dma {
198 u32 *tx_dma_buf;
199 struct dma_chan *chan_tx;
200 dma_addr_t tx_dma_phys;
201 struct completion cmd_tx_complete;
202 struct dma_async_tx_descriptor *tx_desc;
203
204 u32 *rx_dma_buf;
205 struct dma_chan *chan_rx;
206 dma_addr_t rx_dma_phys;
207 struct completion cmd_rx_complete;
208 struct dma_async_tx_descriptor *rx_desc;
209 };
210
211 struct fsl_dspi {
212 struct spi_controller *ctlr;
213 struct platform_device *pdev;
214
215 struct regmap *regmap;
216 struct regmap *regmap_pushr;
217 int irq;
218 struct clk *clk;
219
220 struct spi_transfer *cur_transfer;
221 struct spi_message *cur_msg;
222 struct chip_data *cur_chip;
223 size_t progress;
224 size_t len;
225 const void *tx;
226 void *rx;
227 u16 tx_cmd;
228 const struct fsl_dspi_devtype_data *devtype_data;
229
230 struct completion xfer_done;
231
232 struct fsl_dspi_dma *dma;
233
234 int oper_word_size;
235 int oper_bits_per_word;
236
237 int words_in_flight;
238
239 /*
240 * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
241 * individually (in XSPI mode)
242 */
243 int pushr_cmd;
244 int pushr_tx;
245
246 void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
247 void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
248 };
249
dspi_native_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)250 static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
251 {
252 switch (dspi->oper_word_size) {
253 case 1:
254 *txdata = *(u8 *)dspi->tx;
255 break;
256 case 2:
257 *txdata = *(u16 *)dspi->tx;
258 break;
259 case 4:
260 *txdata = *(u32 *)dspi->tx;
261 break;
262 }
263 dspi->tx += dspi->oper_word_size;
264 }
265
dspi_native_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)266 static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
267 {
268 switch (dspi->oper_word_size) {
269 case 1:
270 *(u8 *)dspi->rx = rxdata;
271 break;
272 case 2:
273 *(u16 *)dspi->rx = rxdata;
274 break;
275 case 4:
276 *(u32 *)dspi->rx = rxdata;
277 break;
278 }
279 dspi->rx += dspi->oper_word_size;
280 }
281
dspi_8on32_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)282 static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
283 {
284 *txdata = (__force u32)cpu_to_be32(*(u32 *)dspi->tx);
285 dspi->tx += sizeof(u32);
286 }
287
dspi_8on32_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)288 static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
289 {
290 *(u32 *)dspi->rx = be32_to_cpu((__force __be32)rxdata);
291 dspi->rx += sizeof(u32);
292 }
293
dspi_8on16_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)294 static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
295 {
296 *txdata = (__force u32)cpu_to_be16(*(u16 *)dspi->tx);
297 dspi->tx += sizeof(u16);
298 }
299
dspi_8on16_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)300 static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
301 {
302 *(u16 *)dspi->rx = be16_to_cpu((__force __be16)rxdata);
303 dspi->rx += sizeof(u16);
304 }
305
dspi_16on32_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)306 static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
307 {
308 u16 hi = *(u16 *)dspi->tx;
309 u16 lo = *(u16 *)(dspi->tx + 2);
310
311 *txdata = (u32)hi << 16 | lo;
312 dspi->tx += sizeof(u32);
313 }
314
dspi_16on32_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)315 static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
316 {
317 u16 hi = rxdata & 0xffff;
318 u16 lo = rxdata >> 16;
319
320 *(u16 *)dspi->rx = lo;
321 *(u16 *)(dspi->rx + 2) = hi;
322 dspi->rx += sizeof(u32);
323 }
324
325 /*
326 * Pop one word from the TX buffer for pushing into the
327 * PUSHR register (TX FIFO)
328 */
dspi_pop_tx(struct fsl_dspi * dspi)329 static u32 dspi_pop_tx(struct fsl_dspi *dspi)
330 {
331 u32 txdata = 0;
332
333 if (dspi->tx)
334 dspi->host_to_dev(dspi, &txdata);
335 dspi->len -= dspi->oper_word_size;
336 return txdata;
337 }
338
339 /* Prepare one TX FIFO entry (txdata plus cmd) */
dspi_pop_tx_pushr(struct fsl_dspi * dspi)340 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
341 {
342 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
343
344 if (spi_controller_is_target(dspi->ctlr))
345 return data;
346
347 if (dspi->len > 0)
348 cmd |= SPI_PUSHR_CMD_CONT;
349 return cmd << 16 | data;
350 }
351
352 /* Push one word to the RX buffer from the POPR register (RX FIFO) */
dspi_push_rx(struct fsl_dspi * dspi,u32 rxdata)353 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
354 {
355 if (!dspi->rx)
356 return;
357 dspi->dev_to_host(dspi, rxdata);
358 }
359
dspi_tx_dma_callback(void * arg)360 static void dspi_tx_dma_callback(void *arg)
361 {
362 struct fsl_dspi *dspi = arg;
363 struct fsl_dspi_dma *dma = dspi->dma;
364
365 complete(&dma->cmd_tx_complete);
366 }
367
dspi_rx_dma_callback(void * arg)368 static void dspi_rx_dma_callback(void *arg)
369 {
370 struct fsl_dspi *dspi = arg;
371 struct fsl_dspi_dma *dma = dspi->dma;
372 int i;
373
374 if (dspi->rx) {
375 for (i = 0; i < dspi->words_in_flight; i++)
376 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
377 }
378
379 complete(&dma->cmd_rx_complete);
380 }
381
dspi_next_xfer_dma_submit(struct fsl_dspi * dspi)382 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
383 {
384 struct device *dev = &dspi->pdev->dev;
385 struct fsl_dspi_dma *dma = dspi->dma;
386 int time_left;
387 int i;
388
389 for (i = 0; i < dspi->words_in_flight; i++)
390 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
391
392 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
393 dma->tx_dma_phys,
394 dspi->words_in_flight *
395 DMA_SLAVE_BUSWIDTH_4_BYTES,
396 DMA_MEM_TO_DEV,
397 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
398 if (!dma->tx_desc) {
399 dev_err(dev, "Not able to get desc for DMA xfer\n");
400 return -EIO;
401 }
402
403 dma->tx_desc->callback = dspi_tx_dma_callback;
404 dma->tx_desc->callback_param = dspi;
405 if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
406 dev_err(dev, "DMA submit failed\n");
407 return -EINVAL;
408 }
409
410 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
411 dma->rx_dma_phys,
412 dspi->words_in_flight *
413 DMA_SLAVE_BUSWIDTH_4_BYTES,
414 DMA_DEV_TO_MEM,
415 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
416 if (!dma->rx_desc) {
417 dev_err(dev, "Not able to get desc for DMA xfer\n");
418 return -EIO;
419 }
420
421 dma->rx_desc->callback = dspi_rx_dma_callback;
422 dma->rx_desc->callback_param = dspi;
423 if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
424 dev_err(dev, "DMA submit failed\n");
425 return -EINVAL;
426 }
427
428 reinit_completion(&dspi->dma->cmd_rx_complete);
429 reinit_completion(&dspi->dma->cmd_tx_complete);
430
431 dma_async_issue_pending(dma->chan_rx);
432 dma_async_issue_pending(dma->chan_tx);
433
434 if (spi_controller_is_target(dspi->ctlr)) {
435 wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
436 return 0;
437 }
438
439 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
440 DMA_COMPLETION_TIMEOUT);
441 if (time_left == 0) {
442 dev_err(dev, "DMA tx timeout\n");
443 dmaengine_terminate_all(dma->chan_tx);
444 dmaengine_terminate_all(dma->chan_rx);
445 return -ETIMEDOUT;
446 }
447
448 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
449 DMA_COMPLETION_TIMEOUT);
450 if (time_left == 0) {
451 dev_err(dev, "DMA rx timeout\n");
452 dmaengine_terminate_all(dma->chan_tx);
453 dmaengine_terminate_all(dma->chan_rx);
454 return -ETIMEDOUT;
455 }
456
457 return 0;
458 }
459
460 static void dspi_setup_accel(struct fsl_dspi *dspi);
461
dspi_dma_xfer(struct fsl_dspi * dspi)462 static int dspi_dma_xfer(struct fsl_dspi *dspi)
463 {
464 struct spi_message *message = dspi->cur_msg;
465 struct device *dev = &dspi->pdev->dev;
466 int ret = 0;
467
468 /*
469 * dspi->len gets decremented by dspi_pop_tx_pushr in
470 * dspi_next_xfer_dma_submit
471 */
472 while (dspi->len) {
473 /* Figure out operational bits-per-word for this chunk */
474 dspi_setup_accel(dspi);
475
476 dspi->words_in_flight = dspi->len / dspi->oper_word_size;
477 if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
478 dspi->words_in_flight = dspi->devtype_data->fifo_size;
479
480 message->actual_length += dspi->words_in_flight *
481 dspi->oper_word_size;
482
483 ret = dspi_next_xfer_dma_submit(dspi);
484 if (ret) {
485 dev_err(dev, "DMA transfer failed\n");
486 break;
487 }
488 }
489
490 return ret;
491 }
492
dspi_request_dma(struct fsl_dspi * dspi,phys_addr_t phy_addr)493 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
494 {
495 int dma_bufsize = dspi->devtype_data->fifo_size * 2;
496 struct device *dev = &dspi->pdev->dev;
497 struct dma_slave_config cfg;
498 struct fsl_dspi_dma *dma;
499 int ret;
500
501 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
502 if (!dma)
503 return -ENOMEM;
504
505 dma->chan_rx = dma_request_chan(dev, "rx");
506 if (IS_ERR(dma->chan_rx))
507 return dev_err_probe(dev, PTR_ERR(dma->chan_rx), "rx dma channel not available\n");
508
509 dma->chan_tx = dma_request_chan(dev, "tx");
510 if (IS_ERR(dma->chan_tx)) {
511 ret = dev_err_probe(dev, PTR_ERR(dma->chan_tx), "tx dma channel not available\n");
512 goto err_tx_channel;
513 }
514
515 dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
516 dma_bufsize, &dma->tx_dma_phys,
517 GFP_KERNEL);
518 if (!dma->tx_dma_buf) {
519 ret = -ENOMEM;
520 goto err_tx_dma_buf;
521 }
522
523 dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
524 dma_bufsize, &dma->rx_dma_phys,
525 GFP_KERNEL);
526 if (!dma->rx_dma_buf) {
527 ret = -ENOMEM;
528 goto err_rx_dma_buf;
529 }
530
531 memset(&cfg, 0, sizeof(cfg));
532 cfg.src_addr = phy_addr + SPI_POPR;
533 cfg.dst_addr = phy_addr + SPI_PUSHR;
534 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
535 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
536 cfg.src_maxburst = 1;
537 cfg.dst_maxburst = 1;
538
539 cfg.direction = DMA_DEV_TO_MEM;
540 ret = dmaengine_slave_config(dma->chan_rx, &cfg);
541 if (ret) {
542 dev_err_probe(dev, ret, "can't configure rx dma channel\n");
543 goto err_slave_config;
544 }
545
546 cfg.direction = DMA_MEM_TO_DEV;
547 ret = dmaengine_slave_config(dma->chan_tx, &cfg);
548 if (ret) {
549 dev_err_probe(dev, ret, "can't configure tx dma channel\n");
550 goto err_slave_config;
551 }
552
553 dspi->dma = dma;
554 init_completion(&dma->cmd_tx_complete);
555 init_completion(&dma->cmd_rx_complete);
556
557 return 0;
558
559 err_slave_config:
560 dma_free_coherent(dma->chan_rx->device->dev,
561 dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
562 err_rx_dma_buf:
563 dma_free_coherent(dma->chan_tx->device->dev,
564 dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
565 err_tx_dma_buf:
566 dma_release_channel(dma->chan_tx);
567 err_tx_channel:
568 dma_release_channel(dma->chan_rx);
569
570 devm_kfree(dev, dma);
571 dspi->dma = NULL;
572
573 return ret;
574 }
575
dspi_release_dma(struct fsl_dspi * dspi)576 static void dspi_release_dma(struct fsl_dspi *dspi)
577 {
578 int dma_bufsize = dspi->devtype_data->fifo_size * 2;
579 struct fsl_dspi_dma *dma = dspi->dma;
580
581 if (!dma)
582 return;
583
584 if (dma->chan_tx) {
585 dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
586 dma->tx_dma_buf, dma->tx_dma_phys);
587 dma_release_channel(dma->chan_tx);
588 }
589
590 if (dma->chan_rx) {
591 dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
592 dma->rx_dma_buf, dma->rx_dma_phys);
593 dma_release_channel(dma->chan_rx);
594 }
595 }
596
hz_to_spi_baud(char * pbr,char * br,int speed_hz,unsigned long clkrate)597 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
598 unsigned long clkrate)
599 {
600 /* Valid baud rate pre-scaler values */
601 int pbr_tbl[4] = {2, 3, 5, 7};
602 int brs[16] = { 2, 4, 6, 8,
603 16, 32, 64, 128,
604 256, 512, 1024, 2048,
605 4096, 8192, 16384, 32768 };
606 int scale_needed, scale, minscale = INT_MAX;
607 int i, j;
608
609 scale_needed = clkrate / speed_hz;
610 if (clkrate % speed_hz)
611 scale_needed++;
612
613 for (i = 0; i < ARRAY_SIZE(brs); i++)
614 for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
615 scale = brs[i] * pbr_tbl[j];
616 if (scale >= scale_needed) {
617 if (scale < minscale) {
618 minscale = scale;
619 *br = i;
620 *pbr = j;
621 }
622 break;
623 }
624 }
625
626 if (minscale == INT_MAX) {
627 pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
628 speed_hz, clkrate);
629 *pbr = ARRAY_SIZE(pbr_tbl) - 1;
630 *br = ARRAY_SIZE(brs) - 1;
631 }
632 }
633
ns_delay_scale(char * psc,char * sc,int delay_ns,unsigned long clkrate)634 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
635 unsigned long clkrate)
636 {
637 int scale_needed, scale, minscale = INT_MAX;
638 int pscale_tbl[4] = {1, 3, 5, 7};
639 u32 remainder;
640 int i, j;
641
642 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
643 &remainder);
644 if (remainder)
645 scale_needed++;
646
647 for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
648 for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
649 scale = pscale_tbl[i] * (2 << j);
650 if (scale >= scale_needed) {
651 if (scale < minscale) {
652 minscale = scale;
653 *psc = i;
654 *sc = j;
655 }
656 break;
657 }
658 }
659
660 if (minscale == INT_MAX) {
661 pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
662 delay_ns, clkrate);
663 *psc = ARRAY_SIZE(pscale_tbl) - 1;
664 *sc = SPI_CTAR_SCALE_BITS;
665 }
666 }
667
dspi_pushr_cmd_write(struct fsl_dspi * dspi,u16 cmd)668 static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
669 {
670 /*
671 * The only time when the PCS doesn't need continuation after this word
672 * is when it's last. We need to look ahead, because we actually call
673 * dspi_pop_tx (the function that decrements dspi->len) _after_
674 * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
675 * word is enough. If there's more to transmit than that,
676 * dspi_xspi_write will know to split the FIFO writes in 2, and
677 * generate a new PUSHR command with the final word that will have PCS
678 * deasserted (not continued) here.
679 */
680 if (dspi->len > dspi->oper_word_size)
681 cmd |= SPI_PUSHR_CMD_CONT;
682 regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
683 }
684
dspi_pushr_txdata_write(struct fsl_dspi * dspi,u16 txdata)685 static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
686 {
687 regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
688 }
689
dspi_xspi_fifo_write(struct fsl_dspi * dspi,int num_words)690 static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
691 {
692 int num_bytes = num_words * dspi->oper_word_size;
693 u16 tx_cmd = dspi->tx_cmd;
694
695 /*
696 * If the PCS needs to de-assert (i.e. we're at the end of the buffer
697 * and cs_change does not want the PCS to stay on), then we need a new
698 * PUSHR command, since this one (for the body of the buffer)
699 * necessarily has the CONT bit set.
700 * So send one word less during this go, to force a split and a command
701 * with a single word next time, when CONT will be unset.
702 */
703 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
704 tx_cmd |= SPI_PUSHR_CMD_EOQ;
705
706 /* Update CTARE */
707 regmap_write(dspi->regmap, SPI_CTARE(0),
708 SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
709 SPI_CTARE_DTCP(num_words));
710
711 /*
712 * Write the CMD FIFO entry first, and then the two
713 * corresponding TX FIFO entries (or one...).
714 */
715 dspi_pushr_cmd_write(dspi, tx_cmd);
716
717 /* Fill TX FIFO with as many transfers as possible */
718 while (num_words--) {
719 u32 data = dspi_pop_tx(dspi);
720
721 dspi_pushr_txdata_write(dspi, data & 0xFFFF);
722 if (dspi->oper_bits_per_word > 16)
723 dspi_pushr_txdata_write(dspi, data >> 16);
724 }
725 }
726
dspi_popr_read(struct fsl_dspi * dspi)727 static u32 dspi_popr_read(struct fsl_dspi *dspi)
728 {
729 u32 rxdata = 0;
730
731 regmap_read(dspi->regmap, SPI_POPR, &rxdata);
732 return rxdata;
733 }
734
dspi_fifo_read(struct fsl_dspi * dspi)735 static void dspi_fifo_read(struct fsl_dspi *dspi)
736 {
737 int num_fifo_entries = dspi->words_in_flight;
738
739 /* Read one FIFO entry and push to rx buffer */
740 while (num_fifo_entries--)
741 dspi_push_rx(dspi, dspi_popr_read(dspi));
742 }
743
dspi_setup_accel(struct fsl_dspi * dspi)744 static void dspi_setup_accel(struct fsl_dspi *dspi)
745 {
746 struct spi_transfer *xfer = dspi->cur_transfer;
747 bool odd = !!(dspi->len & 1);
748
749 /* No accel for frames not multiple of 8 bits at the moment */
750 if (xfer->bits_per_word % 8)
751 goto no_accel;
752
753 if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
754 dspi->oper_bits_per_word = 16;
755 } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
756 dspi->oper_bits_per_word = 8;
757 } else {
758 /* Start off with maximum supported by hardware */
759 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
760 dspi->oper_bits_per_word = 32;
761 else
762 dspi->oper_bits_per_word = 16;
763
764 /*
765 * And go down only if the buffer can't be sent with
766 * words this big
767 */
768 do {
769 if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
770 break;
771
772 dspi->oper_bits_per_word /= 2;
773 } while (dspi->oper_bits_per_word > 8);
774 }
775
776 if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
777 dspi->dev_to_host = dspi_8on32_dev_to_host;
778 dspi->host_to_dev = dspi_8on32_host_to_dev;
779 } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
780 dspi->dev_to_host = dspi_8on16_dev_to_host;
781 dspi->host_to_dev = dspi_8on16_host_to_dev;
782 } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
783 dspi->dev_to_host = dspi_16on32_dev_to_host;
784 dspi->host_to_dev = dspi_16on32_host_to_dev;
785 } else {
786 no_accel:
787 dspi->dev_to_host = dspi_native_dev_to_host;
788 dspi->host_to_dev = dspi_native_host_to_dev;
789 dspi->oper_bits_per_word = xfer->bits_per_word;
790 }
791
792 dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
793
794 /*
795 * Update CTAR here (code is common for XSPI and DMA modes).
796 * We will update CTARE in the portion specific to XSPI, when we
797 * also know the preload value (DTCP).
798 */
799 regmap_write(dspi->regmap, SPI_CTAR(0),
800 dspi->cur_chip->ctar_val |
801 SPI_FRAME_BITS(dspi->oper_bits_per_word));
802 }
803
dspi_fifo_write(struct fsl_dspi * dspi)804 static void dspi_fifo_write(struct fsl_dspi *dspi)
805 {
806 int num_fifo_entries = dspi->devtype_data->fifo_size;
807 struct spi_transfer *xfer = dspi->cur_transfer;
808 struct spi_message *msg = dspi->cur_msg;
809 int num_words, num_bytes;
810
811 dspi_setup_accel(dspi);
812
813 /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
814 if (dspi->oper_word_size == 4)
815 num_fifo_entries /= 2;
816
817 /*
818 * Integer division intentionally trims off odd (or non-multiple of 4)
819 * numbers of bytes at the end of the buffer, which will be sent next
820 * time using a smaller oper_word_size.
821 */
822 num_words = dspi->len / dspi->oper_word_size;
823 if (num_words > num_fifo_entries)
824 num_words = num_fifo_entries;
825
826 /* Update total number of bytes that were transferred */
827 num_bytes = num_words * dspi->oper_word_size;
828 msg->actual_length += num_bytes;
829 dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
830
831 /*
832 * Update shared variable for use in the next interrupt (both in
833 * dspi_fifo_read and in dspi_fifo_write).
834 */
835 dspi->words_in_flight = num_words;
836
837 spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
838
839 dspi_xspi_fifo_write(dspi, num_words);
840 /*
841 * Everything after this point is in a potential race with the next
842 * interrupt, so we must never use dspi->words_in_flight again since it
843 * might already be modified by the next dspi_fifo_write.
844 */
845
846 spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
847 dspi->progress, !dspi->irq);
848 }
849
dspi_rxtx(struct fsl_dspi * dspi)850 static int dspi_rxtx(struct fsl_dspi *dspi)
851 {
852 dspi_fifo_read(dspi);
853
854 if (!dspi->len)
855 /* Success! */
856 return 0;
857
858 dspi_fifo_write(dspi);
859
860 return -EINPROGRESS;
861 }
862
dspi_poll(struct fsl_dspi * dspi)863 static int dspi_poll(struct fsl_dspi *dspi)
864 {
865 int tries = 1000;
866 u32 spi_sr;
867
868 do {
869 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
870 regmap_write(dspi->regmap, SPI_SR, spi_sr);
871
872 if (spi_sr & SPI_SR_CMDTCF)
873 break;
874 } while (--tries);
875
876 if (!tries)
877 return -ETIMEDOUT;
878
879 return dspi_rxtx(dspi);
880 }
881
dspi_interrupt(int irq,void * dev_id)882 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
883 {
884 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
885 u32 spi_sr;
886
887 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
888 regmap_write(dspi->regmap, SPI_SR, spi_sr);
889
890 if (!(spi_sr & SPI_SR_CMDTCF))
891 return IRQ_NONE;
892
893 if (dspi_rxtx(dspi) == 0)
894 complete(&dspi->xfer_done);
895
896 return IRQ_HANDLED;
897 }
898
dspi_assert_cs(struct spi_device * spi,bool * cs)899 static void dspi_assert_cs(struct spi_device *spi, bool *cs)
900 {
901 if (!spi_get_csgpiod(spi, 0) || *cs)
902 return;
903
904 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true);
905 *cs = true;
906 }
907
dspi_deassert_cs(struct spi_device * spi,bool * cs)908 static void dspi_deassert_cs(struct spi_device *spi, bool *cs)
909 {
910 if (!spi_get_csgpiod(spi, 0) || !*cs)
911 return;
912
913 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false);
914 *cs = false;
915 }
916
dspi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * message)917 static int dspi_transfer_one_message(struct spi_controller *ctlr,
918 struct spi_message *message)
919 {
920 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
921 struct spi_device *spi = message->spi;
922 struct spi_transfer *transfer;
923 bool cs = false;
924 int status = 0;
925 u32 val = 0;
926 bool cs_change = false;
927
928 message->actual_length = 0;
929
930 /* Put DSPI in running mode if halted. */
931 regmap_read(dspi->regmap, SPI_MCR, &val);
932 if (val & SPI_MCR_HALT) {
933 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
934 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
935 !(val & SPI_SR_TXRXS))
936 ;
937 }
938
939 list_for_each_entry(transfer, &message->transfers, transfer_list) {
940 dspi->cur_transfer = transfer;
941 dspi->cur_msg = message;
942 dspi->cur_chip = spi_get_ctldata(spi);
943
944 dspi_assert_cs(spi, &cs);
945
946 /* Prepare command word for CMD FIFO */
947 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0);
948 if (!spi_get_csgpiod(spi, 0))
949 dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0));
950
951 if (list_is_last(&dspi->cur_transfer->transfer_list,
952 &dspi->cur_msg->transfers)) {
953 /* Leave PCS activated after last transfer when
954 * cs_change is set.
955 */
956 if (transfer->cs_change)
957 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
958 } else {
959 /* Keep PCS active between transfers in same message
960 * when cs_change is not set, and de-activate PCS
961 * between transfers in the same message when
962 * cs_change is set.
963 */
964 if (!transfer->cs_change)
965 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
966 }
967
968 cs_change = transfer->cs_change;
969 dspi->tx = transfer->tx_buf;
970 dspi->rx = transfer->rx_buf;
971 dspi->len = transfer->len;
972 dspi->progress = 0;
973
974 regmap_update_bits(dspi->regmap, SPI_MCR,
975 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
976 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
977
978 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
979
980 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
981 dspi->progress, !dspi->irq);
982
983 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
984 status = dspi_dma_xfer(dspi);
985 } else {
986 /*
987 * Reinitialize the completion before transferring data
988 * to avoid the case where it might remain in the done
989 * state due to a spurious interrupt from a previous
990 * transfer. This could falsely signal that the current
991 * transfer has completed.
992 */
993 if (dspi->irq)
994 reinit_completion(&dspi->xfer_done);
995
996 dspi_fifo_write(dspi);
997
998 if (dspi->irq) {
999 wait_for_completion(&dspi->xfer_done);
1000 } else {
1001 do {
1002 status = dspi_poll(dspi);
1003 } while (status == -EINPROGRESS);
1004 }
1005 }
1006 if (status)
1007 break;
1008
1009 spi_transfer_delay_exec(transfer);
1010
1011 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT))
1012 dspi_deassert_cs(spi, &cs);
1013 }
1014
1015 if (status || !cs_change) {
1016 /* Put DSPI in stop mode */
1017 regmap_update_bits(dspi->regmap, SPI_MCR,
1018 SPI_MCR_HALT, SPI_MCR_HALT);
1019 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
1020 val & SPI_SR_TXRXS)
1021 ;
1022 }
1023
1024 message->status = status;
1025 spi_finalize_current_message(ctlr);
1026
1027 return status;
1028 }
1029
dspi_setup(struct spi_device * spi)1030 static int dspi_setup(struct spi_device *spi)
1031 {
1032 struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
1033 u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
1034 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
1035 u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
1036 u32 cs_sck_delay = 0, sck_cs_delay = 0;
1037 struct fsl_dspi_platform_data *pdata;
1038 unsigned char pasc = 0, asc = 0;
1039 struct gpio_desc *gpio_cs;
1040 struct chip_data *chip;
1041 unsigned long clkrate;
1042 bool cs = true;
1043 int val;
1044
1045 /* Only alloc on first setup */
1046 chip = spi_get_ctldata(spi);
1047 if (chip == NULL) {
1048 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1049 if (!chip)
1050 return -ENOMEM;
1051 }
1052
1053 pdata = dev_get_platdata(&dspi->pdev->dev);
1054
1055 if (!pdata) {
1056 val = spi_delay_to_ns(&spi->cs_setup, NULL);
1057 cs_sck_delay = val >= 0 ? val : 0;
1058 if (!cs_sck_delay)
1059 of_property_read_u32(spi->dev.of_node,
1060 "fsl,spi-cs-sck-delay",
1061 &cs_sck_delay);
1062
1063 val = spi_delay_to_ns(&spi->cs_hold, NULL);
1064 sck_cs_delay = val >= 0 ? val : 0;
1065 if (!sck_cs_delay)
1066 of_property_read_u32(spi->dev.of_node,
1067 "fsl,spi-sck-cs-delay",
1068 &sck_cs_delay);
1069 } else {
1070 cs_sck_delay = pdata->cs_sck_delay;
1071 sck_cs_delay = pdata->sck_cs_delay;
1072 }
1073
1074 /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
1075 * glitches of half a cycle by never allowing tCSC + tASC to go below
1076 * half a SCK period.
1077 */
1078 if (cs_sck_delay < quarter_period_ns)
1079 cs_sck_delay = quarter_period_ns;
1080 if (sck_cs_delay < quarter_period_ns)
1081 sck_cs_delay = quarter_period_ns;
1082
1083 dev_dbg(&spi->dev,
1084 "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
1085 cs_sck_delay, sck_cs_delay);
1086
1087 clkrate = clk_get_rate(dspi->clk);
1088 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
1089
1090 /* Set PCS to SCK delay scale values */
1091 ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
1092
1093 /* Set After SCK delay scale values */
1094 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
1095
1096 chip->ctar_val = 0;
1097 if (spi->mode & SPI_CPOL)
1098 chip->ctar_val |= SPI_CTAR_CPOL;
1099 if (spi->mode & SPI_CPHA)
1100 chip->ctar_val |= SPI_CTAR_CPHA;
1101
1102 if (!spi_controller_is_target(dspi->ctlr)) {
1103 chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
1104 SPI_CTAR_CSSCK(cssck) |
1105 SPI_CTAR_PASC(pasc) |
1106 SPI_CTAR_ASC(asc) |
1107 SPI_CTAR_PBR(pbr) |
1108 SPI_CTAR_BR(br);
1109
1110 if (spi->mode & SPI_LSB_FIRST)
1111 chip->ctar_val |= SPI_CTAR_LSBFE;
1112 }
1113
1114 gpio_cs = spi_get_csgpiod(spi, 0);
1115 if (gpio_cs)
1116 gpiod_direction_output(gpio_cs, false);
1117
1118 dspi_deassert_cs(spi, &cs);
1119
1120 spi_set_ctldata(spi, chip);
1121
1122 return 0;
1123 }
1124
dspi_cleanup(struct spi_device * spi)1125 static void dspi_cleanup(struct spi_device *spi)
1126 {
1127 struct chip_data *chip = spi_get_ctldata(spi);
1128
1129 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
1130 spi->controller->bus_num, spi_get_chipselect(spi, 0));
1131
1132 kfree(chip);
1133 }
1134
1135 static const struct of_device_id fsl_dspi_dt_ids[] = {
1136 {
1137 .compatible = "fsl,vf610-dspi",
1138 .data = &devtype_data[VF610],
1139 }, {
1140 .compatible = "fsl,ls1021a-v1.0-dspi",
1141 .data = &devtype_data[LS1021A],
1142 }, {
1143 .compatible = "fsl,ls1012a-dspi",
1144 .data = &devtype_data[LS1012A],
1145 }, {
1146 .compatible = "fsl,ls1028a-dspi",
1147 .data = &devtype_data[LS1028A],
1148 }, {
1149 .compatible = "fsl,ls1043a-dspi",
1150 .data = &devtype_data[LS1043A],
1151 }, {
1152 .compatible = "fsl,ls1046a-dspi",
1153 .data = &devtype_data[LS1046A],
1154 }, {
1155 .compatible = "fsl,ls2080a-dspi",
1156 .data = &devtype_data[LS2080A],
1157 }, {
1158 .compatible = "fsl,ls2085a-dspi",
1159 .data = &devtype_data[LS2085A],
1160 }, {
1161 .compatible = "fsl,lx2160a-dspi",
1162 .data = &devtype_data[LX2160A],
1163 },
1164 { /* sentinel */ }
1165 };
1166 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
1167
1168 #ifdef CONFIG_PM_SLEEP
dspi_suspend(struct device * dev)1169 static int dspi_suspend(struct device *dev)
1170 {
1171 struct fsl_dspi *dspi = dev_get_drvdata(dev);
1172
1173 if (dspi->irq)
1174 disable_irq(dspi->irq);
1175 spi_controller_suspend(dspi->ctlr);
1176 clk_disable_unprepare(dspi->clk);
1177
1178 pinctrl_pm_select_sleep_state(dev);
1179
1180 return 0;
1181 }
1182
dspi_resume(struct device * dev)1183 static int dspi_resume(struct device *dev)
1184 {
1185 struct fsl_dspi *dspi = dev_get_drvdata(dev);
1186 int ret;
1187
1188 pinctrl_pm_select_default_state(dev);
1189
1190 ret = clk_prepare_enable(dspi->clk);
1191 if (ret)
1192 return ret;
1193 spi_controller_resume(dspi->ctlr);
1194 if (dspi->irq)
1195 enable_irq(dspi->irq);
1196
1197 return 0;
1198 }
1199 #endif /* CONFIG_PM_SLEEP */
1200
1201 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
1202
1203 static const struct regmap_range dspi_yes_ranges[] = {
1204 regmap_reg_range(SPI_MCR, SPI_MCR),
1205 regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
1206 regmap_reg_range(SPI_SR, SPI_TXFR3),
1207 regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
1208 regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
1209 regmap_reg_range(SPI_SREX, SPI_SREX),
1210 };
1211
1212 static const struct regmap_access_table dspi_access_table = {
1213 .yes_ranges = dspi_yes_ranges,
1214 .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges),
1215 };
1216
1217 static const struct regmap_range dspi_volatile_ranges[] = {
1218 regmap_reg_range(SPI_MCR, SPI_TCR),
1219 regmap_reg_range(SPI_SR, SPI_SR),
1220 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1221 };
1222
1223 static const struct regmap_access_table dspi_volatile_table = {
1224 .yes_ranges = dspi_volatile_ranges,
1225 .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
1226 };
1227
1228 static const struct regmap_config dspi_regmap_config = {
1229 .reg_bits = 32,
1230 .val_bits = 32,
1231 .reg_stride = 4,
1232 .max_register = 0x88,
1233 .volatile_table = &dspi_volatile_table,
1234 .rd_table = &dspi_access_table,
1235 .wr_table = &dspi_access_table,
1236 };
1237
1238 static const struct regmap_range dspi_xspi_volatile_ranges[] = {
1239 regmap_reg_range(SPI_MCR, SPI_TCR),
1240 regmap_reg_range(SPI_SR, SPI_SR),
1241 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1242 regmap_reg_range(SPI_SREX, SPI_SREX),
1243 };
1244
1245 static const struct regmap_access_table dspi_xspi_volatile_table = {
1246 .yes_ranges = dspi_xspi_volatile_ranges,
1247 .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
1248 };
1249
1250 static const struct regmap_config dspi_xspi_regmap_config[] = {
1251 {
1252 .reg_bits = 32,
1253 .val_bits = 32,
1254 .reg_stride = 4,
1255 .max_register = 0x13c,
1256 .volatile_table = &dspi_xspi_volatile_table,
1257 .rd_table = &dspi_access_table,
1258 .wr_table = &dspi_access_table,
1259 },
1260 {
1261 .name = "pushr",
1262 .reg_bits = 16,
1263 .val_bits = 16,
1264 .reg_stride = 2,
1265 .max_register = 0x2,
1266 },
1267 };
1268
dspi_init(struct fsl_dspi * dspi)1269 static int dspi_init(struct fsl_dspi *dspi)
1270 {
1271 unsigned int mcr;
1272
1273 /* Set idle states for all chip select signals to high */
1274 mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
1275
1276 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1277 mcr |= SPI_MCR_XSPI;
1278 if (!spi_controller_is_target(dspi->ctlr))
1279 mcr |= SPI_MCR_HOST;
1280
1281 mcr |= SPI_MCR_HALT;
1282
1283 regmap_write(dspi->regmap, SPI_MCR, mcr);
1284 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
1285
1286 switch (dspi->devtype_data->trans_mode) {
1287 case DSPI_XSPI_MODE:
1288 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
1289 break;
1290 case DSPI_DMA_MODE:
1291 regmap_write(dspi->regmap, SPI_RSER,
1292 SPI_RSER_TFFFE | SPI_RSER_TFFFD |
1293 SPI_RSER_RFDFE | SPI_RSER_RFDFD);
1294 break;
1295 default:
1296 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
1297 dspi->devtype_data->trans_mode);
1298 return -EINVAL;
1299 }
1300
1301 return 0;
1302 }
1303
dspi_target_abort(struct spi_controller * host)1304 static int dspi_target_abort(struct spi_controller *host)
1305 {
1306 struct fsl_dspi *dspi = spi_controller_get_devdata(host);
1307
1308 /*
1309 * Terminate all pending DMA transactions for the SPI working
1310 * in TARGET mode.
1311 */
1312 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1313 dmaengine_terminate_sync(dspi->dma->chan_rx);
1314 dmaengine_terminate_sync(dspi->dma->chan_tx);
1315 }
1316
1317 /* Clear the internal DSPI RX and TX FIFO buffers */
1318 regmap_update_bits(dspi->regmap, SPI_MCR,
1319 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
1320 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
1321
1322 return 0;
1323 }
1324
dspi_probe(struct platform_device * pdev)1325 static int dspi_probe(struct platform_device *pdev)
1326 {
1327 struct device_node *np = pdev->dev.of_node;
1328 const struct regmap_config *regmap_config;
1329 struct fsl_dspi_platform_data *pdata;
1330 struct spi_controller *ctlr;
1331 int ret, cs_num, bus_num = -1;
1332 struct fsl_dspi *dspi;
1333 struct resource *res;
1334 void __iomem *base;
1335 bool big_endian;
1336
1337 dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL);
1338 if (!dspi)
1339 return -ENOMEM;
1340
1341 ctlr = spi_alloc_host(&pdev->dev, 0);
1342 if (!ctlr)
1343 return -ENOMEM;
1344
1345 spi_controller_set_devdata(ctlr, dspi);
1346 platform_set_drvdata(pdev, dspi);
1347
1348 dspi->pdev = pdev;
1349 dspi->ctlr = ctlr;
1350
1351 ctlr->setup = dspi_setup;
1352 ctlr->transfer_one_message = dspi_transfer_one_message;
1353 ctlr->dev.of_node = pdev->dev.of_node;
1354
1355 ctlr->cleanup = dspi_cleanup;
1356 ctlr->target_abort = dspi_target_abort;
1357 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1358 ctlr->use_gpio_descriptors = true;
1359
1360 pdata = dev_get_platdata(&pdev->dev);
1361 if (pdata) {
1362 ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num;
1363 ctlr->bus_num = pdata->bus_num;
1364
1365 /* Only Coldfire uses platform data */
1366 dspi->devtype_data = &devtype_data[MCF5441X];
1367 big_endian = true;
1368 } else {
1369
1370 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1371 if (ret < 0) {
1372 dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1373 goto out_ctlr_put;
1374 }
1375 ctlr->num_chipselect = ctlr->max_native_cs = cs_num;
1376
1377 of_property_read_u32(np, "bus-num", &bus_num);
1378 ctlr->bus_num = bus_num;
1379
1380 if (of_property_read_bool(np, "spi-slave"))
1381 ctlr->target = true;
1382
1383 dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1384 if (!dspi->devtype_data) {
1385 dev_err(&pdev->dev, "can't get devtype_data\n");
1386 ret = -EFAULT;
1387 goto out_ctlr_put;
1388 }
1389
1390 big_endian = of_device_is_big_endian(np);
1391 }
1392 if (big_endian) {
1393 dspi->pushr_cmd = 0;
1394 dspi->pushr_tx = 2;
1395 } else {
1396 dspi->pushr_cmd = 2;
1397 dspi->pushr_tx = 0;
1398 }
1399
1400 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1401 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1402 else
1403 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1404
1405 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1406 if (IS_ERR(base)) {
1407 ret = PTR_ERR(base);
1408 goto out_ctlr_put;
1409 }
1410
1411 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1412 regmap_config = &dspi_xspi_regmap_config[0];
1413 else
1414 regmap_config = &dspi_regmap_config;
1415 dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
1416 if (IS_ERR(dspi->regmap)) {
1417 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1418 PTR_ERR(dspi->regmap));
1419 ret = PTR_ERR(dspi->regmap);
1420 goto out_ctlr_put;
1421 }
1422
1423 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
1424 dspi->regmap_pushr = devm_regmap_init_mmio(
1425 &pdev->dev, base + SPI_PUSHR,
1426 &dspi_xspi_regmap_config[1]);
1427 if (IS_ERR(dspi->regmap_pushr)) {
1428 dev_err(&pdev->dev,
1429 "failed to init pushr regmap: %ld\n",
1430 PTR_ERR(dspi->regmap_pushr));
1431 ret = PTR_ERR(dspi->regmap_pushr);
1432 goto out_ctlr_put;
1433 }
1434 }
1435
1436 dspi->clk = devm_clk_get_enabled(&pdev->dev, "dspi");
1437 if (IS_ERR(dspi->clk)) {
1438 ret = PTR_ERR(dspi->clk);
1439 dev_err(&pdev->dev, "unable to get clock\n");
1440 goto out_ctlr_put;
1441 }
1442
1443 ret = dspi_init(dspi);
1444 if (ret)
1445 goto out_ctlr_put;
1446
1447 dspi->irq = platform_get_irq(pdev, 0);
1448 if (dspi->irq <= 0) {
1449 dev_info(&pdev->dev,
1450 "can't get platform irq, using poll mode\n");
1451 dspi->irq = 0;
1452 goto poll_mode;
1453 }
1454
1455 init_completion(&dspi->xfer_done);
1456
1457 ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
1458 IRQF_SHARED, pdev->name, dspi);
1459 if (ret < 0) {
1460 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1461 goto out_ctlr_put;
1462 }
1463
1464 poll_mode:
1465
1466 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1467 ret = dspi_request_dma(dspi, res->start);
1468 if (ret < 0) {
1469 dev_err(&pdev->dev, "can't get dma channels\n");
1470 goto out_free_irq;
1471 }
1472 }
1473
1474 ctlr->max_speed_hz =
1475 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1476
1477 if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
1478 ctlr->ptp_sts_supported = true;
1479
1480 ret = spi_register_controller(ctlr);
1481 if (ret != 0) {
1482 dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
1483 goto out_release_dma;
1484 }
1485
1486 return ret;
1487
1488 out_release_dma:
1489 dspi_release_dma(dspi);
1490 out_free_irq:
1491 if (dspi->irq)
1492 free_irq(dspi->irq, dspi);
1493 out_ctlr_put:
1494 spi_controller_put(ctlr);
1495
1496 return ret;
1497 }
1498
dspi_remove(struct platform_device * pdev)1499 static void dspi_remove(struct platform_device *pdev)
1500 {
1501 struct fsl_dspi *dspi = platform_get_drvdata(pdev);
1502
1503 /* Disconnect from the SPI framework */
1504 spi_unregister_controller(dspi->ctlr);
1505
1506 /* Disable RX and TX */
1507 regmap_update_bits(dspi->regmap, SPI_MCR,
1508 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
1509 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
1510
1511 /* Stop Running */
1512 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
1513
1514 dspi_release_dma(dspi);
1515 if (dspi->irq)
1516 free_irq(dspi->irq, dspi);
1517 }
1518
dspi_shutdown(struct platform_device * pdev)1519 static void dspi_shutdown(struct platform_device *pdev)
1520 {
1521 dspi_remove(pdev);
1522 }
1523
1524 static struct platform_driver fsl_dspi_driver = {
1525 .driver.name = DRIVER_NAME,
1526 .driver.of_match_table = fsl_dspi_dt_ids,
1527 .driver.pm = &dspi_pm,
1528 .probe = dspi_probe,
1529 .remove = dspi_remove,
1530 .shutdown = dspi_shutdown,
1531 };
1532 module_platform_driver(fsl_dspi_driver);
1533
1534 MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1535 MODULE_LICENSE("GPL");
1536 MODULE_ALIAS("platform:" DRIVER_NAME);
1537