1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright 2013 Freescale Semiconductor, Inc.
4 // Copyright 2020-2025 NXP
5 //
6 // Freescale DSPI driver
7 // This file contains a driver for the Freescale DSPI
8
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/regmap.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-fsl-dspi.h>
22
23 #define DRIVER_NAME "fsl-dspi"
24
25 #define SPI_MCR 0x00
26 #define SPI_MCR_HOST BIT(31)
27 #define SPI_MCR_MTFE BIT(26)
28 #define SPI_MCR_PCSIS(x) ((x) << 16)
29 #define SPI_MCR_CLR_TXF BIT(11)
30 #define SPI_MCR_CLR_RXF BIT(10)
31 #define SPI_MCR_XSPI BIT(3)
32 #define SPI_MCR_DIS_TXF BIT(13)
33 #define SPI_MCR_DIS_RXF BIT(12)
34 #define SPI_MCR_HALT BIT(0)
35
36 #define SPI_TCR 0x08
37 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
38
39 #define SPI_CTAR(x) (0x0c + (((x) & GENMASK(2, 0)) * 4))
40 #define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
41 #define SPI_CTAR_DBR BIT(31)
42 #define SPI_CTAR_CPOL BIT(26)
43 #define SPI_CTAR_CPHA BIT(25)
44 #define SPI_CTAR_LSBFE BIT(24)
45 #define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
46 #define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
47 #define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
48 #define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
49 #define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
50 #define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
51 #define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
52 #define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
53 #define SPI_CTAR_SCALE_BITS 0xf
54
55 #define SPI_CTAR0_SLAVE 0x0c
56
57 #define SPI_SR 0x2c
58 #define SPI_SR_TCFQF BIT(31)
59 #define SPI_SR_TFUF BIT(27)
60 #define SPI_SR_TFFF BIT(25)
61 #define SPI_SR_CMDTCF BIT(23)
62 #define SPI_SR_SPEF BIT(21)
63 #define SPI_SR_RFOF BIT(19)
64 #define SPI_SR_TFIWF BIT(18)
65 #define SPI_SR_RFDF BIT(17)
66 #define SPI_SR_CMDFFF BIT(16)
67 #define SPI_SR_TXRXS BIT(30)
68 #define SPI_SR_CLEAR (SPI_SR_TCFQF | \
69 SPI_SR_TFUF | SPI_SR_TFFF | \
70 SPI_SR_CMDTCF | SPI_SR_SPEF | \
71 SPI_SR_RFOF | SPI_SR_TFIWF | \
72 SPI_SR_RFDF | SPI_SR_CMDFFF)
73
74 #define SPI_RSER_TFFFE BIT(25)
75 #define SPI_RSER_TFFFD BIT(24)
76 #define SPI_RSER_RFDFE BIT(17)
77 #define SPI_RSER_RFDFD BIT(16)
78
79 #define SPI_RSER 0x30
80 #define SPI_RSER_TCFQE BIT(31)
81 #define SPI_RSER_CMDTCFE BIT(23)
82
83 #define SPI_PUSHR 0x34
84 #define SPI_PUSHR_CMD_CONT BIT(15)
85 #define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
86 #define SPI_PUSHR_CMD_EOQ BIT(11)
87 #define SPI_PUSHR_CMD_CTCNT BIT(10)
88 #define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
89
90 #define SPI_PUSHR_SLAVE 0x34
91
92 #define SPI_POPR 0x38
93
94 #define SPI_TXFR0 0x3c
95 #define SPI_TXFR1 0x40
96 #define SPI_TXFR2 0x44
97 #define SPI_TXFR3 0x48
98 #define SPI_TXFR4 0x4C
99 #define SPI_RXFR0 0x7c
100 #define SPI_RXFR1 0x80
101 #define SPI_RXFR2 0x84
102 #define SPI_RXFR3 0x88
103 #define SPI_RXFR4 0x8C
104
105 #define SPI_CTARE(x) (0x11c + (((x) & GENMASK(2, 0)) * 4))
106 #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
107 #define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
108
109 #define SPI_SREX 0x13c
110
111 #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
112 #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
113
114 #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
115
116 #define SPI_25MHZ 25000000
117
118 struct chip_data {
119 u32 ctar_val;
120 };
121
122 enum dspi_trans_mode {
123 DSPI_XSPI_MODE,
124 DSPI_DMA_MODE,
125 };
126
127 struct fsl_dspi_devtype_data {
128 enum dspi_trans_mode trans_mode;
129 u8 max_clock_factor;
130 int fifo_size;
131 const struct regmap_config *regmap;
132 };
133
134 enum {
135 LS1021A,
136 LS1012A,
137 LS1028A,
138 LS1043A,
139 LS1046A,
140 LS2080A,
141 LS2085A,
142 LX2160A,
143 MCF5441X,
144 VF610,
145 S32G,
146 S32G_TARGET,
147 };
148
149 static const struct regmap_range dspi_yes_ranges[] = {
150 regmap_reg_range(SPI_MCR, SPI_MCR),
151 regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
152 regmap_reg_range(SPI_SR, SPI_TXFR3),
153 regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
154 regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
155 regmap_reg_range(SPI_SREX, SPI_SREX),
156 };
157
158 static const struct regmap_range s32g_dspi_yes_ranges[] = {
159 regmap_reg_range(SPI_MCR, SPI_MCR),
160 regmap_reg_range(SPI_TCR, SPI_CTAR(5)),
161 regmap_reg_range(SPI_SR, SPI_TXFR4),
162 regmap_reg_range(SPI_RXFR0, SPI_RXFR4),
163 regmap_reg_range(SPI_CTARE(0), SPI_CTARE(5)),
164 regmap_reg_range(SPI_SREX, SPI_SREX),
165 };
166
167 static const struct regmap_access_table dspi_access_table = {
168 .yes_ranges = dspi_yes_ranges,
169 .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges),
170 };
171
172 static const struct regmap_access_table s32g_dspi_access_table = {
173 .yes_ranges = s32g_dspi_yes_ranges,
174 .n_yes_ranges = ARRAY_SIZE(s32g_dspi_yes_ranges),
175 };
176
177 static const struct regmap_range dspi_volatile_ranges[] = {
178 regmap_reg_range(SPI_MCR, SPI_TCR),
179 regmap_reg_range(SPI_SR, SPI_SR),
180 regmap_reg_range(SPI_PUSHR, SPI_RXFR4),
181 regmap_reg_range(SPI_SREX, SPI_SREX),
182 };
183
184 static const struct regmap_access_table dspi_volatile_table = {
185 .yes_ranges = dspi_volatile_ranges,
186 .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
187 };
188
189 enum {
190 DSPI_REGMAP,
191 S32G_DSPI_REGMAP,
192 DSPI_XSPI_REGMAP,
193 S32G_DSPI_XSPI_REGMAP,
194 DSPI_PUSHR,
195 };
196
197 static const struct regmap_config dspi_regmap_config[] = {
198 [DSPI_REGMAP] = {
199 .reg_bits = 32,
200 .val_bits = 32,
201 .reg_stride = 4,
202 .max_register = SPI_RXFR3,
203 .volatile_table = &dspi_volatile_table,
204 .rd_table = &dspi_access_table,
205 .wr_table = &dspi_access_table,
206 },
207 [S32G_DSPI_REGMAP] = {
208 .reg_bits = 32,
209 .val_bits = 32,
210 .reg_stride = 4,
211 .max_register = SPI_RXFR4,
212 .volatile_table = &dspi_volatile_table,
213 .wr_table = &s32g_dspi_access_table,
214 .rd_table = &s32g_dspi_access_table,
215 },
216 [DSPI_XSPI_REGMAP] = {
217 .reg_bits = 32,
218 .val_bits = 32,
219 .reg_stride = 4,
220 .max_register = SPI_SREX,
221 .volatile_table = &dspi_volatile_table,
222 .rd_table = &dspi_access_table,
223 .wr_table = &dspi_access_table,
224 },
225 [S32G_DSPI_XSPI_REGMAP] = {
226 .reg_bits = 32,
227 .val_bits = 32,
228 .reg_stride = 4,
229 .max_register = SPI_SREX,
230 .volatile_table = &dspi_volatile_table,
231 .wr_table = &s32g_dspi_access_table,
232 .rd_table = &s32g_dspi_access_table,
233 },
234 [DSPI_PUSHR] = {
235 .name = "pushr",
236 .reg_bits = 16,
237 .val_bits = 16,
238 .reg_stride = 2,
239 .max_register = 0x2,
240 },
241 };
242
243 static const struct fsl_dspi_devtype_data devtype_data[] = {
244 [VF610] = {
245 .trans_mode = DSPI_DMA_MODE,
246 .max_clock_factor = 2,
247 .fifo_size = 4,
248 .regmap = &dspi_regmap_config[DSPI_REGMAP],
249 },
250 [LS1021A] = {
251 /* Has A-011218 DMA erratum */
252 .trans_mode = DSPI_XSPI_MODE,
253 .max_clock_factor = 8,
254 .fifo_size = 4,
255 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
256 },
257 [LS1012A] = {
258 /* Has A-011218 DMA erratum */
259 .trans_mode = DSPI_XSPI_MODE,
260 .max_clock_factor = 8,
261 .fifo_size = 16,
262 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
263 },
264 [LS1028A] = {
265 .trans_mode = DSPI_XSPI_MODE,
266 .max_clock_factor = 8,
267 .fifo_size = 4,
268 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
269 },
270 [LS1043A] = {
271 /* Has A-011218 DMA erratum */
272 .trans_mode = DSPI_XSPI_MODE,
273 .max_clock_factor = 8,
274 .fifo_size = 16,
275 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
276 },
277 [LS1046A] = {
278 /* Has A-011218 DMA erratum */
279 .trans_mode = DSPI_XSPI_MODE,
280 .max_clock_factor = 8,
281 .fifo_size = 16,
282 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
283 },
284 [LS2080A] = {
285 .trans_mode = DSPI_XSPI_MODE,
286 .max_clock_factor = 8,
287 .fifo_size = 4,
288 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
289 },
290 [LS2085A] = {
291 .trans_mode = DSPI_XSPI_MODE,
292 .max_clock_factor = 8,
293 .fifo_size = 4,
294 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
295 },
296 [LX2160A] = {
297 .trans_mode = DSPI_XSPI_MODE,
298 .max_clock_factor = 8,
299 .fifo_size = 4,
300 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP],
301 },
302 [MCF5441X] = {
303 .trans_mode = DSPI_DMA_MODE,
304 .max_clock_factor = 8,
305 .fifo_size = 16,
306 .regmap = &dspi_regmap_config[DSPI_REGMAP],
307 },
308 [S32G] = {
309 .trans_mode = DSPI_XSPI_MODE,
310 .max_clock_factor = 1,
311 .fifo_size = 5,
312 .regmap = &dspi_regmap_config[S32G_DSPI_XSPI_REGMAP],
313 },
314 [S32G_TARGET] = {
315 .trans_mode = DSPI_DMA_MODE,
316 .max_clock_factor = 1,
317 .fifo_size = 5,
318 .regmap = &dspi_regmap_config[S32G_DSPI_REGMAP],
319 },
320 };
321
322 struct fsl_dspi_dma {
323 u32 *tx_dma_buf;
324 struct dma_chan *chan_tx;
325 dma_addr_t tx_dma_phys;
326 struct completion cmd_tx_complete;
327 struct dma_async_tx_descriptor *tx_desc;
328
329 u32 *rx_dma_buf;
330 struct dma_chan *chan_rx;
331 dma_addr_t rx_dma_phys;
332 struct completion cmd_rx_complete;
333 struct dma_async_tx_descriptor *rx_desc;
334 };
335
336 struct fsl_dspi {
337 struct spi_controller *ctlr;
338 struct platform_device *pdev;
339
340 struct regmap *regmap;
341 struct regmap *regmap_pushr;
342 int irq;
343 struct clk *clk;
344
345 struct spi_transfer *cur_transfer;
346 struct spi_message *cur_msg;
347 struct chip_data *cur_chip;
348 size_t progress;
349 size_t len;
350 const void *tx;
351 void *rx;
352 u16 tx_cmd;
353 bool mtf_enabled;
354 const struct fsl_dspi_devtype_data *devtype_data;
355
356 struct completion xfer_done;
357
358 struct fsl_dspi_dma *dma;
359
360 int oper_word_size;
361 int oper_bits_per_word;
362
363 int words_in_flight;
364
365 /*
366 * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
367 * individually (in XSPI mode)
368 */
369 int pushr_cmd;
370 int pushr_tx;
371
372 void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
373 void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
374 };
375
is_s32g_dspi(struct fsl_dspi * data)376 static bool is_s32g_dspi(struct fsl_dspi *data)
377 {
378 return data->devtype_data == &devtype_data[S32G] ||
379 data->devtype_data == &devtype_data[S32G_TARGET];
380 }
381
dspi_native_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)382 static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
383 {
384 switch (dspi->oper_word_size) {
385 case 1:
386 *txdata = *(u8 *)dspi->tx;
387 break;
388 case 2:
389 *txdata = *(u16 *)dspi->tx;
390 break;
391 case 4:
392 *txdata = *(u32 *)dspi->tx;
393 break;
394 }
395 dspi->tx += dspi->oper_word_size;
396 }
397
dspi_native_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)398 static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
399 {
400 switch (dspi->oper_word_size) {
401 case 1:
402 *(u8 *)dspi->rx = rxdata;
403 break;
404 case 2:
405 *(u16 *)dspi->rx = rxdata;
406 break;
407 case 4:
408 *(u32 *)dspi->rx = rxdata;
409 break;
410 }
411 dspi->rx += dspi->oper_word_size;
412 }
413
dspi_8on32_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)414 static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
415 {
416 *txdata = (__force u32)cpu_to_be32(*(u32 *)dspi->tx);
417 dspi->tx += sizeof(u32);
418 }
419
dspi_8on32_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)420 static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
421 {
422 *(u32 *)dspi->rx = be32_to_cpu((__force __be32)rxdata);
423 dspi->rx += sizeof(u32);
424 }
425
dspi_8on16_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)426 static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
427 {
428 *txdata = (__force u32)cpu_to_be16(*(u16 *)dspi->tx);
429 dspi->tx += sizeof(u16);
430 }
431
dspi_8on16_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)432 static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
433 {
434 *(u16 *)dspi->rx = be16_to_cpu((__force __be16)rxdata);
435 dspi->rx += sizeof(u16);
436 }
437
dspi_16on32_host_to_dev(struct fsl_dspi * dspi,u32 * txdata)438 static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
439 {
440 u16 hi = *(u16 *)dspi->tx;
441 u16 lo = *(u16 *)(dspi->tx + 2);
442
443 *txdata = (u32)hi << 16 | lo;
444 dspi->tx += sizeof(u32);
445 }
446
dspi_16on32_dev_to_host(struct fsl_dspi * dspi,u32 rxdata)447 static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
448 {
449 u16 hi = rxdata & 0xffff;
450 u16 lo = rxdata >> 16;
451
452 *(u16 *)dspi->rx = lo;
453 *(u16 *)(dspi->rx + 2) = hi;
454 dspi->rx += sizeof(u32);
455 }
456
457 /*
458 * Pop one word from the TX buffer for pushing into the
459 * PUSHR register (TX FIFO)
460 */
dspi_pop_tx(struct fsl_dspi * dspi)461 static u32 dspi_pop_tx(struct fsl_dspi *dspi)
462 {
463 u32 txdata = 0;
464
465 if (dspi->tx)
466 dspi->host_to_dev(dspi, &txdata);
467 dspi->len -= dspi->oper_word_size;
468 return txdata;
469 }
470
471 /* Prepare one TX FIFO entry (txdata plus cmd) */
dspi_pop_tx_pushr(struct fsl_dspi * dspi)472 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
473 {
474 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
475
476 if (spi_controller_is_target(dspi->ctlr))
477 return data;
478
479 if (dspi->len > 0)
480 cmd |= SPI_PUSHR_CMD_CONT;
481 return cmd << 16 | data;
482 }
483
484 /* Push one word to the RX buffer from the POPR register (RX FIFO) */
dspi_push_rx(struct fsl_dspi * dspi,u32 rxdata)485 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
486 {
487 if (!dspi->rx)
488 return;
489 dspi->dev_to_host(dspi, rxdata);
490 }
491
dspi_tx_dma_callback(void * arg)492 static void dspi_tx_dma_callback(void *arg)
493 {
494 struct fsl_dspi *dspi = arg;
495 struct fsl_dspi_dma *dma = dspi->dma;
496
497 complete(&dma->cmd_tx_complete);
498 }
499
dspi_rx_dma_callback(void * arg)500 static void dspi_rx_dma_callback(void *arg)
501 {
502 struct fsl_dspi *dspi = arg;
503 struct fsl_dspi_dma *dma = dspi->dma;
504 int i;
505
506 if (dspi->rx) {
507 for (i = 0; i < dspi->words_in_flight; i++)
508 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
509 }
510
511 complete(&dma->cmd_rx_complete);
512 }
513
dspi_next_xfer_dma_submit(struct fsl_dspi * dspi)514 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
515 {
516 struct device *dev = &dspi->pdev->dev;
517 struct fsl_dspi_dma *dma = dspi->dma;
518 int time_left;
519 int i;
520
521 for (i = 0; i < dspi->words_in_flight; i++)
522 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
523
524 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
525 dma->tx_dma_phys,
526 dspi->words_in_flight *
527 DMA_SLAVE_BUSWIDTH_4_BYTES,
528 DMA_MEM_TO_DEV,
529 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
530 if (!dma->tx_desc) {
531 dev_err(dev, "Not able to get desc for DMA xfer\n");
532 return -EIO;
533 }
534
535 dma->tx_desc->callback = dspi_tx_dma_callback;
536 dma->tx_desc->callback_param = dspi;
537 if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
538 dev_err(dev, "DMA submit failed\n");
539 return -EINVAL;
540 }
541
542 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
543 dma->rx_dma_phys,
544 dspi->words_in_flight *
545 DMA_SLAVE_BUSWIDTH_4_BYTES,
546 DMA_DEV_TO_MEM,
547 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
548 if (!dma->rx_desc) {
549 dev_err(dev, "Not able to get desc for DMA xfer\n");
550 return -EIO;
551 }
552
553 dma->rx_desc->callback = dspi_rx_dma_callback;
554 dma->rx_desc->callback_param = dspi;
555 if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
556 dev_err(dev, "DMA submit failed\n");
557 return -EINVAL;
558 }
559
560 reinit_completion(&dspi->dma->cmd_rx_complete);
561 reinit_completion(&dspi->dma->cmd_tx_complete);
562
563 dma_async_issue_pending(dma->chan_rx);
564 dma_async_issue_pending(dma->chan_tx);
565
566 if (spi_controller_is_target(dspi->ctlr)) {
567 wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
568 return 0;
569 }
570
571 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
572 DMA_COMPLETION_TIMEOUT);
573 if (time_left == 0) {
574 dev_err(dev, "DMA tx timeout\n");
575 dmaengine_terminate_all(dma->chan_tx);
576 dmaengine_terminate_all(dma->chan_rx);
577 return -ETIMEDOUT;
578 }
579
580 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
581 DMA_COMPLETION_TIMEOUT);
582 if (time_left == 0) {
583 dev_err(dev, "DMA rx timeout\n");
584 dmaengine_terminate_all(dma->chan_tx);
585 dmaengine_terminate_all(dma->chan_rx);
586 return -ETIMEDOUT;
587 }
588
589 return 0;
590 }
591
592 static void dspi_setup_accel(struct fsl_dspi *dspi);
593
dspi_dma_xfer(struct fsl_dspi * dspi)594 static int dspi_dma_xfer(struct fsl_dspi *dspi)
595 {
596 struct spi_message *message = dspi->cur_msg;
597 struct device *dev = &dspi->pdev->dev;
598 int ret = 0;
599
600 /*
601 * dspi->len gets decremented by dspi_pop_tx_pushr in
602 * dspi_next_xfer_dma_submit
603 */
604 while (dspi->len) {
605 /* Figure out operational bits-per-word for this chunk */
606 dspi_setup_accel(dspi);
607
608 dspi->words_in_flight = dspi->len / dspi->oper_word_size;
609 if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
610 dspi->words_in_flight = dspi->devtype_data->fifo_size;
611
612 message->actual_length += dspi->words_in_flight *
613 dspi->oper_word_size;
614
615 ret = dspi_next_xfer_dma_submit(dspi);
616 if (ret) {
617 dev_err(dev, "DMA transfer failed\n");
618 break;
619 }
620 }
621
622 return ret;
623 }
624
dspi_request_dma(struct fsl_dspi * dspi,phys_addr_t phy_addr)625 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
626 {
627 int dma_bufsize = dspi->devtype_data->fifo_size * 2;
628 struct device *dev = &dspi->pdev->dev;
629 struct dma_slave_config cfg;
630 struct fsl_dspi_dma *dma;
631 int ret;
632
633 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
634 if (!dma)
635 return -ENOMEM;
636
637 dma->chan_rx = dma_request_chan(dev, "rx");
638 if (IS_ERR(dma->chan_rx))
639 return dev_err_probe(dev, PTR_ERR(dma->chan_rx), "rx dma channel not available\n");
640
641 dma->chan_tx = dma_request_chan(dev, "tx");
642 if (IS_ERR(dma->chan_tx)) {
643 ret = dev_err_probe(dev, PTR_ERR(dma->chan_tx), "tx dma channel not available\n");
644 goto err_tx_channel;
645 }
646
647 dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
648 dma_bufsize, &dma->tx_dma_phys,
649 GFP_KERNEL);
650 if (!dma->tx_dma_buf) {
651 ret = -ENOMEM;
652 goto err_tx_dma_buf;
653 }
654
655 dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
656 dma_bufsize, &dma->rx_dma_phys,
657 GFP_KERNEL);
658 if (!dma->rx_dma_buf) {
659 ret = -ENOMEM;
660 goto err_rx_dma_buf;
661 }
662
663 memset(&cfg, 0, sizeof(cfg));
664 cfg.src_addr = phy_addr + SPI_POPR;
665 cfg.dst_addr = phy_addr + SPI_PUSHR;
666 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
667 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
668 cfg.src_maxburst = 1;
669 cfg.dst_maxburst = 1;
670
671 cfg.direction = DMA_DEV_TO_MEM;
672 ret = dmaengine_slave_config(dma->chan_rx, &cfg);
673 if (ret) {
674 dev_err_probe(dev, ret, "can't configure rx dma channel\n");
675 goto err_slave_config;
676 }
677
678 cfg.direction = DMA_MEM_TO_DEV;
679 ret = dmaengine_slave_config(dma->chan_tx, &cfg);
680 if (ret) {
681 dev_err_probe(dev, ret, "can't configure tx dma channel\n");
682 goto err_slave_config;
683 }
684
685 dspi->dma = dma;
686 init_completion(&dma->cmd_tx_complete);
687 init_completion(&dma->cmd_rx_complete);
688
689 return 0;
690
691 err_slave_config:
692 dma_free_coherent(dma->chan_rx->device->dev,
693 dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
694 err_rx_dma_buf:
695 dma_free_coherent(dma->chan_tx->device->dev,
696 dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
697 err_tx_dma_buf:
698 dma_release_channel(dma->chan_tx);
699 err_tx_channel:
700 dma_release_channel(dma->chan_rx);
701
702 devm_kfree(dev, dma);
703 dspi->dma = NULL;
704
705 return ret;
706 }
707
dspi_release_dma(struct fsl_dspi * dspi)708 static void dspi_release_dma(struct fsl_dspi *dspi)
709 {
710 int dma_bufsize = dspi->devtype_data->fifo_size * 2;
711 struct fsl_dspi_dma *dma = dspi->dma;
712
713 if (!dma)
714 return;
715
716 if (dma->chan_tx) {
717 dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
718 dma->tx_dma_buf, dma->tx_dma_phys);
719 dma_release_channel(dma->chan_tx);
720 }
721
722 if (dma->chan_rx) {
723 dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
724 dma->rx_dma_buf, dma->rx_dma_phys);
725 dma_release_channel(dma->chan_rx);
726 }
727 }
728
hz_to_spi_baud(char * pbr,char * br,int speed_hz,unsigned long clkrate,bool mtf_enabled)729 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
730 unsigned long clkrate, bool mtf_enabled)
731 {
732 /* Valid baud rate pre-scaler values */
733 int pbr_tbl[4] = {2, 3, 5, 7};
734 int brs[16] = { 2, 4, 6, 8,
735 16, 32, 64, 128,
736 256, 512, 1024, 2048,
737 4096, 8192, 16384, 32768 };
738 int scale_needed, scale, minscale = INT_MAX;
739 int i, j;
740
741 scale_needed = clkrate / speed_hz;
742 if (clkrate % speed_hz)
743 scale_needed++;
744
745 for (i = 0; i < ARRAY_SIZE(brs); i++)
746 for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
747 if (mtf_enabled) {
748 /* In MTF mode DBR=1 so frequency is doubled */
749 scale = (brs[i] * pbr_tbl[j]) / 2;
750 } else {
751 scale = brs[i] * pbr_tbl[j];
752 }
753
754 if (scale >= scale_needed) {
755 if (scale < minscale) {
756 minscale = scale;
757 *br = i;
758 *pbr = j;
759 }
760 break;
761 }
762 }
763
764 if (minscale == INT_MAX) {
765 pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
766 speed_hz, clkrate);
767 *pbr = ARRAY_SIZE(pbr_tbl) - 1;
768 *br = ARRAY_SIZE(brs) - 1;
769 }
770 }
771
ns_delay_scale(char * psc,char * sc,int delay_ns,unsigned long clkrate)772 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
773 unsigned long clkrate)
774 {
775 int scale_needed, scale, minscale = INT_MAX;
776 int pscale_tbl[4] = {1, 3, 5, 7};
777 u32 remainder;
778 int i, j;
779
780 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
781 &remainder);
782 if (remainder)
783 scale_needed++;
784
785 for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
786 for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
787 scale = pscale_tbl[i] * (2 << j);
788 if (scale >= scale_needed) {
789 if (scale < minscale) {
790 minscale = scale;
791 *psc = i;
792 *sc = j;
793 }
794 break;
795 }
796 }
797
798 if (minscale == INT_MAX) {
799 pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
800 delay_ns, clkrate);
801 *psc = ARRAY_SIZE(pscale_tbl) - 1;
802 *sc = SPI_CTAR_SCALE_BITS;
803 }
804 }
805
dspi_pushr_cmd_write(struct fsl_dspi * dspi,u16 cmd)806 static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
807 {
808 /*
809 * The only time when the PCS doesn't need continuation after this word
810 * is when it's last. We need to look ahead, because we actually call
811 * dspi_pop_tx (the function that decrements dspi->len) _after_
812 * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
813 * word is enough. If there's more to transmit than that,
814 * dspi_xspi_write will know to split the FIFO writes in 2, and
815 * generate a new PUSHR command with the final word that will have PCS
816 * deasserted (not continued) here.
817 */
818 if (dspi->len > dspi->oper_word_size)
819 cmd |= SPI_PUSHR_CMD_CONT;
820 regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
821 }
822
dspi_pushr_txdata_write(struct fsl_dspi * dspi,u16 txdata)823 static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
824 {
825 regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
826 }
827
dspi_xspi_fifo_write(struct fsl_dspi * dspi,int num_words)828 static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
829 {
830 int num_bytes = num_words * dspi->oper_word_size;
831 u16 tx_cmd = dspi->tx_cmd;
832
833 /*
834 * If the PCS needs to de-assert (i.e. we're at the end of the buffer
835 * and cs_change does not want the PCS to stay on), then we need a new
836 * PUSHR command, since this one (for the body of the buffer)
837 * necessarily has the CONT bit set.
838 * So send one word less during this go, to force a split and a command
839 * with a single word next time, when CONT will be unset.
840 */
841 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
842 tx_cmd |= SPI_PUSHR_CMD_EOQ;
843
844 /* Update CTARE */
845 regmap_write(dspi->regmap, SPI_CTARE(0),
846 SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
847 SPI_CTARE_DTCP(num_words));
848
849 /*
850 * Write the CMD FIFO entry first, and then the two
851 * corresponding TX FIFO entries (or one...).
852 */
853 dspi_pushr_cmd_write(dspi, tx_cmd);
854
855 /* Fill TX FIFO with as many transfers as possible */
856 while (num_words--) {
857 u32 data = dspi_pop_tx(dspi);
858
859 dspi_pushr_txdata_write(dspi, data & 0xFFFF);
860 if (dspi->oper_bits_per_word > 16)
861 dspi_pushr_txdata_write(dspi, data >> 16);
862 }
863 }
864
dspi_popr_read(struct fsl_dspi * dspi)865 static u32 dspi_popr_read(struct fsl_dspi *dspi)
866 {
867 u32 rxdata = 0;
868
869 regmap_read(dspi->regmap, SPI_POPR, &rxdata);
870 return rxdata;
871 }
872
dspi_fifo_read(struct fsl_dspi * dspi)873 static void dspi_fifo_read(struct fsl_dspi *dspi)
874 {
875 int num_fifo_entries = dspi->words_in_flight;
876
877 /* Read one FIFO entry and push to rx buffer */
878 while (num_fifo_entries--)
879 dspi_push_rx(dspi, dspi_popr_read(dspi));
880 }
881
dspi_setup_accel(struct fsl_dspi * dspi)882 static void dspi_setup_accel(struct fsl_dspi *dspi)
883 {
884 struct spi_transfer *xfer = dspi->cur_transfer;
885 bool odd = !!(dspi->len & 1);
886
887 /*
888 * No accel for DMA transfers or frames not multiples of 8 bits at the
889 * moment.
890 */
891 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE ||
892 xfer->bits_per_word % 8)
893 goto no_accel;
894
895 if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
896 dspi->oper_bits_per_word = 16;
897 } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
898 dspi->oper_bits_per_word = 8;
899 } else {
900 /* Start off with maximum supported by hardware */
901 dspi->oper_bits_per_word = 32;
902
903 /*
904 * And go down only if the buffer can't be sent with
905 * words this big
906 */
907 do {
908 if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
909 break;
910
911 dspi->oper_bits_per_word /= 2;
912 } while (dspi->oper_bits_per_word > 8);
913 }
914
915 if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
916 dspi->dev_to_host = dspi_8on32_dev_to_host;
917 dspi->host_to_dev = dspi_8on32_host_to_dev;
918 } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
919 dspi->dev_to_host = dspi_8on16_dev_to_host;
920 dspi->host_to_dev = dspi_8on16_host_to_dev;
921 } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
922 dspi->dev_to_host = dspi_16on32_dev_to_host;
923 dspi->host_to_dev = dspi_16on32_host_to_dev;
924 } else {
925 no_accel:
926 dspi->dev_to_host = dspi_native_dev_to_host;
927 dspi->host_to_dev = dspi_native_host_to_dev;
928 dspi->oper_bits_per_word = xfer->bits_per_word;
929 }
930
931 dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
932
933 /*
934 * Update CTAR here (code is common for XSPI and DMA modes).
935 * We will update CTARE in the portion specific to XSPI, when we
936 * also know the preload value (DTCP).
937 */
938 regmap_write(dspi->regmap, SPI_CTAR(0),
939 dspi->cur_chip->ctar_val |
940 SPI_FRAME_BITS(dspi->oper_bits_per_word));
941 }
942
dspi_fifo_write(struct fsl_dspi * dspi)943 static void dspi_fifo_write(struct fsl_dspi *dspi)
944 {
945 int num_fifo_entries = dspi->devtype_data->fifo_size;
946 struct spi_transfer *xfer = dspi->cur_transfer;
947 struct spi_message *msg = dspi->cur_msg;
948 int num_words, num_bytes;
949
950 dspi_setup_accel(dspi);
951
952 /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
953 if (dspi->oper_word_size == 4)
954 num_fifo_entries /= 2;
955
956 /*
957 * Integer division intentionally trims off odd (or non-multiple of 4)
958 * numbers of bytes at the end of the buffer, which will be sent next
959 * time using a smaller oper_word_size.
960 */
961 num_words = dspi->len / dspi->oper_word_size;
962 if (num_words > num_fifo_entries)
963 num_words = num_fifo_entries;
964
965 /* Update total number of bytes that were transferred */
966 num_bytes = num_words * dspi->oper_word_size;
967 msg->actual_length += num_bytes;
968 dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
969
970 /*
971 * Update shared variable for use in the next interrupt (both in
972 * dspi_fifo_read and in dspi_fifo_write).
973 */
974 dspi->words_in_flight = num_words;
975
976 spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
977
978 dspi_xspi_fifo_write(dspi, num_words);
979 /*
980 * Everything after this point is in a potential race with the next
981 * interrupt, so we must never use dspi->words_in_flight again since it
982 * might already be modified by the next dspi_fifo_write.
983 */
984
985 spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
986 dspi->progress, !dspi->irq);
987 }
988
dspi_rxtx(struct fsl_dspi * dspi)989 static int dspi_rxtx(struct fsl_dspi *dspi)
990 {
991 dspi_fifo_read(dspi);
992
993 if (!dspi->len)
994 /* Success! */
995 return 0;
996
997 dspi_fifo_write(dspi);
998
999 return -EINPROGRESS;
1000 }
1001
dspi_poll(struct fsl_dspi * dspi)1002 static int dspi_poll(struct fsl_dspi *dspi)
1003 {
1004 int tries = 1000;
1005 u32 spi_sr;
1006
1007 do {
1008 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
1009 regmap_write(dspi->regmap, SPI_SR, spi_sr);
1010
1011 if (spi_sr & SPI_SR_CMDTCF)
1012 break;
1013 } while (--tries);
1014
1015 if (!tries)
1016 return -ETIMEDOUT;
1017
1018 return dspi_rxtx(dspi);
1019 }
1020
dspi_interrupt(int irq,void * dev_id)1021 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
1022 {
1023 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
1024 u32 spi_sr;
1025
1026 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
1027 regmap_write(dspi->regmap, SPI_SR, spi_sr);
1028
1029 if (!(spi_sr & SPI_SR_CMDTCF))
1030 return IRQ_NONE;
1031
1032 if (dspi_rxtx(dspi) == 0)
1033 complete(&dspi->xfer_done);
1034
1035 return IRQ_HANDLED;
1036 }
1037
dspi_assert_cs(struct spi_device * spi,bool * cs)1038 static void dspi_assert_cs(struct spi_device *spi, bool *cs)
1039 {
1040 if (!spi_get_csgpiod(spi, 0) || *cs)
1041 return;
1042
1043 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true);
1044 *cs = true;
1045 }
1046
dspi_deassert_cs(struct spi_device * spi,bool * cs)1047 static void dspi_deassert_cs(struct spi_device *spi, bool *cs)
1048 {
1049 if (!spi_get_csgpiod(spi, 0) || !*cs)
1050 return;
1051
1052 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false);
1053 *cs = false;
1054 }
1055
dspi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * message)1056 static int dspi_transfer_one_message(struct spi_controller *ctlr,
1057 struct spi_message *message)
1058 {
1059 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
1060 struct spi_device *spi = message->spi;
1061 struct spi_transfer *transfer;
1062 bool cs = false;
1063 int status = 0;
1064 u32 val = 0;
1065 bool cs_change = false;
1066
1067 message->actual_length = 0;
1068
1069 /* Put DSPI in running mode if halted. */
1070 regmap_read(dspi->regmap, SPI_MCR, &val);
1071 if (val & SPI_MCR_HALT) {
1072 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
1073 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
1074 !(val & SPI_SR_TXRXS))
1075 ;
1076 }
1077
1078 list_for_each_entry(transfer, &message->transfers, transfer_list) {
1079 dspi->cur_transfer = transfer;
1080 dspi->cur_msg = message;
1081 dspi->cur_chip = spi_get_ctldata(spi);
1082
1083 dspi_assert_cs(spi, &cs);
1084
1085 /* Prepare command word for CMD FIFO */
1086 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0);
1087 if (!spi_get_csgpiod(spi, 0))
1088 dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0));
1089
1090 if (list_is_last(&dspi->cur_transfer->transfer_list,
1091 &dspi->cur_msg->transfers)) {
1092 /* Leave PCS activated after last transfer when
1093 * cs_change is set.
1094 */
1095 if (transfer->cs_change)
1096 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
1097 } else {
1098 /* Keep PCS active between transfers in same message
1099 * when cs_change is not set, and de-activate PCS
1100 * between transfers in the same message when
1101 * cs_change is set.
1102 */
1103 if (!transfer->cs_change)
1104 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
1105 }
1106
1107 cs_change = transfer->cs_change;
1108 dspi->tx = transfer->tx_buf;
1109 dspi->rx = transfer->rx_buf;
1110 dspi->len = transfer->len;
1111 dspi->progress = 0;
1112
1113 regmap_update_bits(dspi->regmap, SPI_MCR,
1114 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
1115 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
1116
1117 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
1118
1119 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
1120 dspi->progress, !dspi->irq);
1121
1122 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1123 status = dspi_dma_xfer(dspi);
1124 } else {
1125 /*
1126 * Reinitialize the completion before transferring data
1127 * to avoid the case where it might remain in the done
1128 * state due to a spurious interrupt from a previous
1129 * transfer. This could falsely signal that the current
1130 * transfer has completed.
1131 */
1132 if (dspi->irq)
1133 reinit_completion(&dspi->xfer_done);
1134
1135 dspi_fifo_write(dspi);
1136
1137 if (dspi->irq) {
1138 wait_for_completion(&dspi->xfer_done);
1139 } else {
1140 do {
1141 status = dspi_poll(dspi);
1142 } while (status == -EINPROGRESS);
1143 }
1144 }
1145 if (status)
1146 break;
1147
1148 spi_transfer_delay_exec(transfer);
1149
1150 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT))
1151 dspi_deassert_cs(spi, &cs);
1152 }
1153
1154 if (status || !cs_change) {
1155 /* Put DSPI in stop mode */
1156 regmap_update_bits(dspi->regmap, SPI_MCR,
1157 SPI_MCR_HALT, SPI_MCR_HALT);
1158 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
1159 val & SPI_SR_TXRXS)
1160 ;
1161 }
1162
1163 message->status = status;
1164 spi_finalize_current_message(ctlr);
1165
1166 return status;
1167 }
1168
dspi_set_mtf(struct fsl_dspi * dspi)1169 static int dspi_set_mtf(struct fsl_dspi *dspi)
1170 {
1171 if (spi_controller_is_target(dspi->ctlr))
1172 return 0;
1173
1174 if (dspi->mtf_enabled)
1175 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE,
1176 SPI_MCR_MTFE);
1177 else
1178 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE, 0);
1179
1180 return 0;
1181 }
1182
dspi_setup(struct spi_device * spi)1183 static int dspi_setup(struct spi_device *spi)
1184 {
1185 struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
1186 u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
1187 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
1188 u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
1189 u32 cs_sck_delay = 0, sck_cs_delay = 0;
1190 struct fsl_dspi_platform_data *pdata;
1191 unsigned char pasc = 0, asc = 0;
1192 struct gpio_desc *gpio_cs;
1193 struct chip_data *chip;
1194 unsigned long clkrate;
1195 bool cs = true;
1196 int val;
1197
1198 /* Only alloc on first setup */
1199 chip = spi_get_ctldata(spi);
1200 if (chip == NULL) {
1201 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1202 if (!chip)
1203 return -ENOMEM;
1204 }
1205
1206 pdata = dev_get_platdata(&dspi->pdev->dev);
1207
1208 if (!pdata) {
1209 val = spi_delay_to_ns(&spi->cs_setup, NULL);
1210 cs_sck_delay = val >= 0 ? val : 0;
1211 if (!cs_sck_delay)
1212 of_property_read_u32(spi->dev.of_node,
1213 "fsl,spi-cs-sck-delay",
1214 &cs_sck_delay);
1215
1216 val = spi_delay_to_ns(&spi->cs_hold, NULL);
1217 sck_cs_delay = val >= 0 ? val : 0;
1218 if (!sck_cs_delay)
1219 of_property_read_u32(spi->dev.of_node,
1220 "fsl,spi-sck-cs-delay",
1221 &sck_cs_delay);
1222 } else {
1223 cs_sck_delay = pdata->cs_sck_delay;
1224 sck_cs_delay = pdata->sck_cs_delay;
1225 }
1226
1227 /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
1228 * glitches of half a cycle by never allowing tCSC + tASC to go below
1229 * half a SCK period.
1230 */
1231 if (cs_sck_delay < quarter_period_ns)
1232 cs_sck_delay = quarter_period_ns;
1233 if (sck_cs_delay < quarter_period_ns)
1234 sck_cs_delay = quarter_period_ns;
1235
1236 dev_dbg(&spi->dev,
1237 "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
1238 cs_sck_delay, sck_cs_delay);
1239
1240 clkrate = clk_get_rate(dspi->clk);
1241
1242 if (is_s32g_dspi(dspi) && spi->max_speed_hz > SPI_25MHZ)
1243 dspi->mtf_enabled = true;
1244 else
1245 dspi->mtf_enabled = false;
1246
1247 dspi_set_mtf(dspi);
1248
1249 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate,
1250 dspi->mtf_enabled);
1251
1252 /* Set PCS to SCK delay scale values */
1253 ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
1254
1255 /* Set After SCK delay scale values */
1256 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
1257
1258 chip->ctar_val = 0;
1259 if (spi->mode & SPI_CPOL)
1260 chip->ctar_val |= SPI_CTAR_CPOL;
1261 if (spi->mode & SPI_CPHA)
1262 chip->ctar_val |= SPI_CTAR_CPHA;
1263
1264 if (!spi_controller_is_target(dspi->ctlr)) {
1265 chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
1266 SPI_CTAR_CSSCK(cssck) |
1267 SPI_CTAR_PASC(pasc) |
1268 SPI_CTAR_ASC(asc) |
1269 SPI_CTAR_PBR(pbr) |
1270 SPI_CTAR_BR(br);
1271
1272 if (dspi->mtf_enabled)
1273 chip->ctar_val |= SPI_CTAR_DBR;
1274
1275 if (spi->mode & SPI_LSB_FIRST)
1276 chip->ctar_val |= SPI_CTAR_LSBFE;
1277 }
1278
1279 gpio_cs = spi_get_csgpiod(spi, 0);
1280 if (gpio_cs)
1281 gpiod_direction_output(gpio_cs, false);
1282
1283 dspi_deassert_cs(spi, &cs);
1284
1285 spi_set_ctldata(spi, chip);
1286
1287 return 0;
1288 }
1289
dspi_cleanup(struct spi_device * spi)1290 static void dspi_cleanup(struct spi_device *spi)
1291 {
1292 struct chip_data *chip = spi_get_ctldata(spi);
1293
1294 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
1295 spi->controller->bus_num, spi_get_chipselect(spi, 0));
1296
1297 kfree(chip);
1298 }
1299
1300 static const struct of_device_id fsl_dspi_dt_ids[] = {
1301 {
1302 .compatible = "fsl,vf610-dspi",
1303 .data = &devtype_data[VF610],
1304 }, {
1305 .compatible = "fsl,ls1021a-v1.0-dspi",
1306 .data = &devtype_data[LS1021A],
1307 }, {
1308 .compatible = "fsl,ls1012a-dspi",
1309 .data = &devtype_data[LS1012A],
1310 }, {
1311 .compatible = "fsl,ls1028a-dspi",
1312 .data = &devtype_data[LS1028A],
1313 }, {
1314 .compatible = "fsl,ls1043a-dspi",
1315 .data = &devtype_data[LS1043A],
1316 }, {
1317 .compatible = "fsl,ls1046a-dspi",
1318 .data = &devtype_data[LS1046A],
1319 }, {
1320 .compatible = "fsl,ls2080a-dspi",
1321 .data = &devtype_data[LS2080A],
1322 }, {
1323 .compatible = "fsl,ls2085a-dspi",
1324 .data = &devtype_data[LS2085A],
1325 }, {
1326 .compatible = "fsl,lx2160a-dspi",
1327 .data = &devtype_data[LX2160A],
1328 }, {
1329 .compatible = "nxp,s32g2-dspi",
1330 .data = &devtype_data[S32G],
1331 },
1332 { /* sentinel */ }
1333 };
1334 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
1335
dspi_init(struct fsl_dspi * dspi)1336 static int dspi_init(struct fsl_dspi *dspi)
1337 {
1338 unsigned int mcr;
1339
1340 /* Set idle states for all chip select signals to high */
1341 mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
1342
1343 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1344 mcr |= SPI_MCR_XSPI;
1345 if (!spi_controller_is_target(dspi->ctlr))
1346 mcr |= SPI_MCR_HOST;
1347
1348 mcr |= SPI_MCR_HALT;
1349
1350 regmap_write(dspi->regmap, SPI_MCR, mcr);
1351 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
1352
1353 switch (dspi->devtype_data->trans_mode) {
1354 case DSPI_XSPI_MODE:
1355 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
1356 break;
1357 case DSPI_DMA_MODE:
1358 regmap_write(dspi->regmap, SPI_RSER,
1359 SPI_RSER_TFFFE | SPI_RSER_TFFFD |
1360 SPI_RSER_RFDFE | SPI_RSER_RFDFD);
1361 break;
1362 default:
1363 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
1364 dspi->devtype_data->trans_mode);
1365 return -EINVAL;
1366 }
1367
1368 return 0;
1369 }
1370
1371 #ifdef CONFIG_PM_SLEEP
dspi_suspend(struct device * dev)1372 static int dspi_suspend(struct device *dev)
1373 {
1374 struct fsl_dspi *dspi = dev_get_drvdata(dev);
1375
1376 if (dspi->irq)
1377 disable_irq(dspi->irq);
1378 spi_controller_suspend(dspi->ctlr);
1379 clk_disable_unprepare(dspi->clk);
1380
1381 pinctrl_pm_select_sleep_state(dev);
1382
1383 return 0;
1384 }
1385
dspi_resume(struct device * dev)1386 static int dspi_resume(struct device *dev)
1387 {
1388 struct fsl_dspi *dspi = dev_get_drvdata(dev);
1389 int ret;
1390
1391 pinctrl_pm_select_default_state(dev);
1392
1393 ret = clk_prepare_enable(dspi->clk);
1394 if (ret)
1395 return ret;
1396 spi_controller_resume(dspi->ctlr);
1397
1398 ret = dspi_init(dspi);
1399 if (ret) {
1400 dev_err(dev, "failed to initialize dspi during resume\n");
1401 return ret;
1402 }
1403
1404 dspi_set_mtf(dspi);
1405
1406 if (dspi->irq)
1407 enable_irq(dspi->irq);
1408
1409 return 0;
1410 }
1411 #endif /* CONFIG_PM_SLEEP */
1412
1413 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
1414
dspi_target_abort(struct spi_controller * host)1415 static int dspi_target_abort(struct spi_controller *host)
1416 {
1417 struct fsl_dspi *dspi = spi_controller_get_devdata(host);
1418
1419 /*
1420 * Terminate all pending DMA transactions for the SPI working
1421 * in TARGET mode.
1422 */
1423 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1424 dmaengine_terminate_sync(dspi->dma->chan_rx);
1425 dmaengine_terminate_sync(dspi->dma->chan_tx);
1426 }
1427
1428 /* Clear the internal DSPI RX and TX FIFO buffers */
1429 regmap_update_bits(dspi->regmap, SPI_MCR,
1430 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
1431 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
1432
1433 return 0;
1434 }
1435
dspi_probe(struct platform_device * pdev)1436 static int dspi_probe(struct platform_device *pdev)
1437 {
1438 struct device_node *np = pdev->dev.of_node;
1439 struct fsl_dspi_platform_data *pdata;
1440 struct spi_controller *ctlr;
1441 int ret, cs_num, bus_num = -1;
1442 struct fsl_dspi *dspi;
1443 struct resource *res;
1444 void __iomem *base;
1445 bool big_endian;
1446
1447 dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL);
1448 if (!dspi)
1449 return -ENOMEM;
1450
1451 if (of_property_read_bool(np, "spi-slave"))
1452 ctlr = spi_alloc_target(&pdev->dev, 0);
1453 else
1454 ctlr = spi_alloc_host(&pdev->dev, 0);
1455 if (!ctlr)
1456 return -ENOMEM;
1457
1458 spi_controller_set_devdata(ctlr, dspi);
1459 platform_set_drvdata(pdev, dspi);
1460
1461 dspi->pdev = pdev;
1462 dspi->ctlr = ctlr;
1463
1464 ctlr->setup = dspi_setup;
1465 ctlr->transfer_one_message = dspi_transfer_one_message;
1466 ctlr->dev.of_node = pdev->dev.of_node;
1467
1468 ctlr->cleanup = dspi_cleanup;
1469 ctlr->target_abort = dspi_target_abort;
1470 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1471 ctlr->use_gpio_descriptors = true;
1472
1473 pdata = dev_get_platdata(&pdev->dev);
1474 if (pdata) {
1475 ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num;
1476 ctlr->bus_num = pdata->bus_num;
1477
1478 /* Only Coldfire uses platform data */
1479 dspi->devtype_data = &devtype_data[MCF5441X];
1480 big_endian = true;
1481 } else {
1482
1483 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1484 if (ret < 0) {
1485 dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1486 goto out_ctlr_put;
1487 }
1488 ctlr->num_chipselect = ctlr->max_native_cs = cs_num;
1489
1490 of_property_read_u32(np, "bus-num", &bus_num);
1491 ctlr->bus_num = bus_num;
1492
1493 dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1494 if (!dspi->devtype_data) {
1495 dev_err(&pdev->dev, "can't get devtype_data\n");
1496 ret = -EFAULT;
1497 goto out_ctlr_put;
1498 }
1499
1500 big_endian = of_device_is_big_endian(np);
1501 }
1502 if (big_endian) {
1503 dspi->pushr_cmd = 0;
1504 dspi->pushr_tx = 2;
1505 } else {
1506 dspi->pushr_cmd = 2;
1507 dspi->pushr_tx = 0;
1508 }
1509
1510 if (spi_controller_is_target(ctlr) && is_s32g_dspi(dspi))
1511 dspi->devtype_data = &devtype_data[S32G_TARGET];
1512
1513 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1514 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1515 else
1516 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1517
1518 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1519 if (IS_ERR(base)) {
1520 ret = PTR_ERR(base);
1521 goto out_ctlr_put;
1522 }
1523
1524 dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base,
1525 dspi->devtype_data->regmap);
1526 if (IS_ERR(dspi->regmap)) {
1527 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1528 PTR_ERR(dspi->regmap));
1529 ret = PTR_ERR(dspi->regmap);
1530 goto out_ctlr_put;
1531 }
1532
1533 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
1534 dspi->regmap_pushr = devm_regmap_init_mmio(
1535 &pdev->dev, base + SPI_PUSHR,
1536 &dspi_regmap_config[DSPI_PUSHR]);
1537 if (IS_ERR(dspi->regmap_pushr)) {
1538 dev_err(&pdev->dev,
1539 "failed to init pushr regmap: %ld\n",
1540 PTR_ERR(dspi->regmap_pushr));
1541 ret = PTR_ERR(dspi->regmap_pushr);
1542 goto out_ctlr_put;
1543 }
1544 }
1545
1546 dspi->clk = devm_clk_get_enabled(&pdev->dev, "dspi");
1547 if (IS_ERR(dspi->clk)) {
1548 ret = PTR_ERR(dspi->clk);
1549 dev_err(&pdev->dev, "unable to get clock\n");
1550 goto out_ctlr_put;
1551 }
1552
1553 ret = dspi_init(dspi);
1554 if (ret)
1555 goto out_ctlr_put;
1556
1557 dspi->irq = platform_get_irq(pdev, 0);
1558 if (dspi->irq <= 0) {
1559 dev_info(&pdev->dev,
1560 "can't get platform irq, using poll mode\n");
1561 dspi->irq = 0;
1562 goto poll_mode;
1563 }
1564
1565 init_completion(&dspi->xfer_done);
1566
1567 ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
1568 IRQF_SHARED, pdev->name, dspi);
1569 if (ret < 0) {
1570 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1571 goto out_ctlr_put;
1572 }
1573
1574 poll_mode:
1575
1576 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1577 ret = dspi_request_dma(dspi, res->start);
1578 if (ret < 0) {
1579 dev_err(&pdev->dev, "can't get dma channels\n");
1580 goto out_free_irq;
1581 }
1582 }
1583
1584 ctlr->max_speed_hz =
1585 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1586
1587 if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
1588 ctlr->ptp_sts_supported = true;
1589
1590 ret = spi_register_controller(ctlr);
1591 if (ret != 0) {
1592 dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
1593 goto out_release_dma;
1594 }
1595
1596 return ret;
1597
1598 out_release_dma:
1599 dspi_release_dma(dspi);
1600 out_free_irq:
1601 if (dspi->irq)
1602 free_irq(dspi->irq, dspi);
1603 out_ctlr_put:
1604 spi_controller_put(ctlr);
1605
1606 return ret;
1607 }
1608
dspi_remove(struct platform_device * pdev)1609 static void dspi_remove(struct platform_device *pdev)
1610 {
1611 struct fsl_dspi *dspi = platform_get_drvdata(pdev);
1612
1613 /* Disconnect from the SPI framework */
1614 spi_unregister_controller(dspi->ctlr);
1615
1616 /* Disable RX and TX */
1617 regmap_update_bits(dspi->regmap, SPI_MCR,
1618 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
1619 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
1620
1621 /* Stop Running */
1622 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
1623
1624 dspi_release_dma(dspi);
1625 if (dspi->irq)
1626 free_irq(dspi->irq, dspi);
1627 }
1628
dspi_shutdown(struct platform_device * pdev)1629 static void dspi_shutdown(struct platform_device *pdev)
1630 {
1631 dspi_remove(pdev);
1632 }
1633
1634 static struct platform_driver fsl_dspi_driver = {
1635 .driver.name = DRIVER_NAME,
1636 .driver.of_match_table = fsl_dspi_dt_ids,
1637 .driver.pm = &dspi_pm,
1638 .probe = dspi_probe,
1639 .remove = dspi_remove,
1640 .shutdown = dspi_shutdown,
1641 };
1642 module_platform_driver(fsl_dspi_driver);
1643
1644 MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1645 MODULE_LICENSE("GPL");
1646 MODULE_ALIAS("platform:" DRIVER_NAME);
1647