1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Freescale i.MX7ULP LPSPI driver
4 //
5 // Copyright 2016 Freescale Semiconductor, Inc.
6 // Copyright 2018 NXP Semiconductors
7
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/irq.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma/imx-dma.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spi/spi.h>
26 #include <linux/spi/spi_bitbang.h>
27 #include <linux/types.h>
28
29 #define DRIVER_NAME "fsl_lpspi"
30
31 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
32
33 /* The maximum bytes that edma can transfer once.*/
34 #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
35
36 /* i.MX7ULP LPSPI registers */
37 #define IMX7ULP_VERID 0x0
38 #define IMX7ULP_PARAM 0x4
39 #define IMX7ULP_CR 0x10
40 #define IMX7ULP_SR 0x14
41 #define IMX7ULP_IER 0x18
42 #define IMX7ULP_DER 0x1c
43 #define IMX7ULP_CFGR0 0x20
44 #define IMX7ULP_CFGR1 0x24
45 #define IMX7ULP_DMR0 0x30
46 #define IMX7ULP_DMR1 0x34
47 #define IMX7ULP_CCR 0x40
48 #define IMX7ULP_FCR 0x58
49 #define IMX7ULP_FSR 0x5c
50 #define IMX7ULP_TCR 0x60
51 #define IMX7ULP_TDR 0x64
52 #define IMX7ULP_RSR 0x70
53 #define IMX7ULP_RDR 0x74
54
55 /* General control register field define */
56 #define CR_RRF BIT(9)
57 #define CR_RTF BIT(8)
58 #define CR_RST BIT(1)
59 #define CR_MEN BIT(0)
60 #define SR_MBF BIT(24)
61 #define SR_TCF BIT(10)
62 #define SR_FCF BIT(9)
63 #define SR_RDF BIT(1)
64 #define SR_TDF BIT(0)
65 #define IER_TCIE BIT(10)
66 #define IER_FCIE BIT(9)
67 #define IER_RDIE BIT(1)
68 #define IER_TDIE BIT(0)
69 #define DER_RDDE BIT(1)
70 #define DER_TDDE BIT(0)
71 #define CFGR1_PCSCFG BIT(27)
72 #define CFGR1_PINCFG (BIT(24)|BIT(25))
73 #define CFGR1_PCSPOL BIT(8)
74 #define CFGR1_NOSTALL BIT(3)
75 #define CFGR1_HOST BIT(0)
76 #define FSR_TXCOUNT (0xFF)
77 #define RSR_RXEMPTY BIT(1)
78 #define TCR_CPOL BIT(31)
79 #define TCR_CPHA BIT(30)
80 #define TCR_CONT BIT(21)
81 #define TCR_CONTC BIT(20)
82 #define TCR_RXMSK BIT(19)
83 #define TCR_TXMSK BIT(18)
84
85 struct fsl_lpspi_devtype_data {
86 u8 prescale_max;
87 };
88
89 struct lpspi_config {
90 u8 bpw;
91 u8 chip_select;
92 u8 prescale;
93 u16 mode;
94 u32 speed_hz;
95 u32 effective_speed_hz;
96 };
97
98 struct fsl_lpspi_data {
99 struct device *dev;
100 void __iomem *base;
101 unsigned long base_phys;
102 struct clk *clk_ipg;
103 struct clk *clk_per;
104 bool is_target;
105 bool is_only_cs1;
106 bool is_first_byte;
107
108 void *rx_buf;
109 const void *tx_buf;
110 void (*tx)(struct fsl_lpspi_data *);
111 void (*rx)(struct fsl_lpspi_data *);
112
113 u32 remain;
114 u8 watermark;
115 u8 txfifosize;
116 u8 rxfifosize;
117
118 struct lpspi_config config;
119 struct completion xfer_done;
120
121 bool target_aborted;
122
123 /* DMA */
124 bool usedma;
125 struct completion dma_rx_completion;
126 struct completion dma_tx_completion;
127
128 const struct fsl_lpspi_devtype_data *devtype_data;
129 };
130
131 /*
132 * ERR051608 fixed or not:
133 * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
134 */
135 static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
136 .prescale_max = 1,
137 };
138
139 static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
140 .prescale_max = 7,
141 };
142
143 static const struct of_device_id fsl_lpspi_dt_ids[] = {
144 { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
145 { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
146 { /* sentinel */ }
147 };
148 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
149
150 #define LPSPI_BUF_RX(type) \
151 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
152 { \
153 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
154 \
155 if (fsl_lpspi->rx_buf) { \
156 *(type *)fsl_lpspi->rx_buf = val; \
157 fsl_lpspi->rx_buf += sizeof(type); \
158 } \
159 }
160
161 #define LPSPI_BUF_TX(type) \
162 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
163 { \
164 type val = 0; \
165 \
166 if (fsl_lpspi->tx_buf) { \
167 val = *(type *)fsl_lpspi->tx_buf; \
168 fsl_lpspi->tx_buf += sizeof(type); \
169 } \
170 \
171 fsl_lpspi->remain -= sizeof(type); \
172 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
173 }
174
175 LPSPI_BUF_RX(u8)
LPSPI_BUF_TX(u8)176 LPSPI_BUF_TX(u8)
177 LPSPI_BUF_RX(u16)
178 LPSPI_BUF_TX(u16)
179 LPSPI_BUF_RX(u32)
180 LPSPI_BUF_TX(u32)
181
182 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
183 unsigned int enable)
184 {
185 writel(enable, fsl_lpspi->base + IMX7ULP_IER);
186 }
187
fsl_lpspi_bytes_per_word(const int bpw)188 static int fsl_lpspi_bytes_per_word(const int bpw)
189 {
190 return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
191 }
192
fsl_lpspi_can_dma(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * transfer)193 static bool fsl_lpspi_can_dma(struct spi_controller *controller,
194 struct spi_device *spi,
195 struct spi_transfer *transfer)
196 {
197 unsigned int bytes_per_word;
198
199 if (!controller->dma_rx)
200 return false;
201
202 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
203
204 switch (bytes_per_word) {
205 case 1:
206 case 2:
207 case 4:
208 break;
209 default:
210 return false;
211 }
212
213 return true;
214 }
215
lpspi_prepare_xfer_hardware(struct spi_controller * controller)216 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
217 {
218 struct fsl_lpspi_data *fsl_lpspi =
219 spi_controller_get_devdata(controller);
220 int ret;
221
222 ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
223 if (ret < 0) {
224 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
225 return ret;
226 }
227
228 return 0;
229 }
230
lpspi_unprepare_xfer_hardware(struct spi_controller * controller)231 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
232 {
233 struct fsl_lpspi_data *fsl_lpspi =
234 spi_controller_get_devdata(controller);
235
236 pm_runtime_mark_last_busy(fsl_lpspi->dev);
237 pm_runtime_put_autosuspend(fsl_lpspi->dev);
238
239 return 0;
240 }
241
fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data * fsl_lpspi)242 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
243 {
244 u8 txfifo_cnt;
245 u32 temp;
246
247 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
248
249 while (txfifo_cnt < fsl_lpspi->txfifosize) {
250 if (!fsl_lpspi->remain)
251 break;
252 fsl_lpspi->tx(fsl_lpspi);
253 txfifo_cnt++;
254 }
255
256 if (txfifo_cnt < fsl_lpspi->txfifosize) {
257 if (!fsl_lpspi->is_target) {
258 temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
259 temp &= ~TCR_CONTC;
260 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
261 }
262
263 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
264 } else
265 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
266 }
267
fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data * fsl_lpspi)268 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
269 {
270 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
271 fsl_lpspi->rx(fsl_lpspi);
272 }
273
fsl_lpspi_set_cmd(struct fsl_lpspi_data * fsl_lpspi)274 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
275 {
276 u32 temp = 0;
277
278 temp |= fsl_lpspi->config.bpw - 1;
279 temp |= (fsl_lpspi->config.mode & 0x3) << 30;
280 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
281 if (!fsl_lpspi->is_target) {
282 temp |= fsl_lpspi->config.prescale << 27;
283 /*
284 * Set TCR_CONT will keep SS asserted after current transfer.
285 * For the first transfer, clear TCR_CONTC to assert SS.
286 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
287 */
288 if (!fsl_lpspi->usedma) {
289 temp |= TCR_CONT;
290 if (fsl_lpspi->is_first_byte)
291 temp &= ~TCR_CONTC;
292 else
293 temp |= TCR_CONTC;
294 }
295 }
296 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
297
298 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
299 }
300
fsl_lpspi_set_watermark(struct fsl_lpspi_data * fsl_lpspi)301 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
302 {
303 u32 temp;
304
305 if (!fsl_lpspi->usedma)
306 temp = fsl_lpspi->watermark >> 1 |
307 (fsl_lpspi->watermark >> 1) << 16;
308 else
309 temp = fsl_lpspi->watermark >> 1;
310
311 writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
312
313 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
314 }
315
fsl_lpspi_set_bitrate(struct fsl_lpspi_data * fsl_lpspi)316 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
317 {
318 struct lpspi_config config = fsl_lpspi->config;
319 unsigned int perclk_rate, div;
320 u8 prescale_max;
321 u8 prescale;
322 int scldiv;
323
324 perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
325 prescale_max = fsl_lpspi->devtype_data->prescale_max;
326
327 if (!config.speed_hz) {
328 dev_err(fsl_lpspi->dev,
329 "error: the transmission speed provided is 0!\n");
330 return -EINVAL;
331 }
332
333 if (config.speed_hz > perclk_rate / 2) {
334 dev_err(fsl_lpspi->dev,
335 "per-clk should be at least two times of transfer speed");
336 return -EINVAL;
337 }
338
339 div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
340
341 for (prescale = 0; prescale <= prescale_max; prescale++) {
342 scldiv = div / (1 << prescale) - 2;
343 if (scldiv >= 0 && scldiv < 256) {
344 fsl_lpspi->config.prescale = prescale;
345 break;
346 }
347 }
348
349 if (scldiv < 0 || scldiv >= 256)
350 return -EINVAL;
351
352 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
353 fsl_lpspi->base + IMX7ULP_CCR);
354
355 fsl_lpspi->config.effective_speed_hz = perclk_rate / (scldiv + 2) *
356 (1 << prescale);
357
358 dev_dbg(fsl_lpspi->dev, "perclk=%u, speed=%u, prescale=%u, scldiv=%d\n",
359 perclk_rate, config.speed_hz, prescale, scldiv);
360
361 return 0;
362 }
363
fsl_lpspi_dma_configure(struct spi_controller * controller)364 static int fsl_lpspi_dma_configure(struct spi_controller *controller)
365 {
366 int ret;
367 enum dma_slave_buswidth buswidth;
368 struct dma_slave_config rx = {}, tx = {};
369 struct fsl_lpspi_data *fsl_lpspi =
370 spi_controller_get_devdata(controller);
371
372 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
373 case 4:
374 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
375 break;
376 case 2:
377 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
378 break;
379 case 1:
380 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
381 break;
382 default:
383 return -EINVAL;
384 }
385
386 tx.direction = DMA_MEM_TO_DEV;
387 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
388 tx.dst_addr_width = buswidth;
389 tx.dst_maxburst = 1;
390 ret = dmaengine_slave_config(controller->dma_tx, &tx);
391 if (ret) {
392 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
393 ret);
394 return ret;
395 }
396
397 rx.direction = DMA_DEV_TO_MEM;
398 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
399 rx.src_addr_width = buswidth;
400 rx.src_maxburst = 1;
401 ret = dmaengine_slave_config(controller->dma_rx, &rx);
402 if (ret) {
403 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
404 ret);
405 return ret;
406 }
407
408 return 0;
409 }
410
fsl_lpspi_config(struct fsl_lpspi_data * fsl_lpspi)411 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
412 {
413 u32 temp;
414 int ret;
415
416 if (!fsl_lpspi->is_target) {
417 ret = fsl_lpspi_set_bitrate(fsl_lpspi);
418 if (ret)
419 return ret;
420 }
421
422 fsl_lpspi_set_watermark(fsl_lpspi);
423
424 if (!fsl_lpspi->is_target)
425 temp = CFGR1_HOST;
426 else
427 temp = CFGR1_PINCFG;
428 if (fsl_lpspi->config.mode & SPI_CS_HIGH)
429 temp |= CFGR1_PCSPOL;
430 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
431
432 temp = readl(fsl_lpspi->base + IMX7ULP_CR);
433 temp |= CR_RRF | CR_RTF | CR_MEN;
434 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
435
436 temp = 0;
437 if (fsl_lpspi->usedma)
438 temp = DER_TDDE | DER_RDDE;
439 writel(temp, fsl_lpspi->base + IMX7ULP_DER);
440
441 return 0;
442 }
443
fsl_lpspi_setup_transfer(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)444 static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
445 struct spi_device *spi,
446 struct spi_transfer *t)
447 {
448 struct fsl_lpspi_data *fsl_lpspi =
449 spi_controller_get_devdata(spi->controller);
450
451 if (t == NULL)
452 return -EINVAL;
453
454 fsl_lpspi->config.mode = spi->mode;
455 fsl_lpspi->config.bpw = t->bits_per_word;
456 fsl_lpspi->config.speed_hz = t->speed_hz;
457 if (fsl_lpspi->is_only_cs1)
458 fsl_lpspi->config.chip_select = 1;
459 else
460 fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
461
462 if (!fsl_lpspi->config.speed_hz)
463 fsl_lpspi->config.speed_hz = spi->max_speed_hz;
464 if (!fsl_lpspi->config.bpw)
465 fsl_lpspi->config.bpw = spi->bits_per_word;
466
467 /* Initialize the functions for transfer */
468 if (fsl_lpspi->config.bpw <= 8) {
469 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
470 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
471 } else if (fsl_lpspi->config.bpw <= 16) {
472 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
473 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
474 } else {
475 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
476 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
477 }
478
479 if (t->len <= fsl_lpspi->txfifosize)
480 fsl_lpspi->watermark = t->len;
481 else
482 fsl_lpspi->watermark = fsl_lpspi->txfifosize;
483
484 if (fsl_lpspi_can_dma(controller, spi, t))
485 fsl_lpspi->usedma = true;
486 else
487 fsl_lpspi->usedma = false;
488
489 return fsl_lpspi_config(fsl_lpspi);
490 }
491
fsl_lpspi_target_abort(struct spi_controller * controller)492 static int fsl_lpspi_target_abort(struct spi_controller *controller)
493 {
494 struct fsl_lpspi_data *fsl_lpspi =
495 spi_controller_get_devdata(controller);
496
497 fsl_lpspi->target_aborted = true;
498 if (!fsl_lpspi->usedma)
499 complete(&fsl_lpspi->xfer_done);
500 else {
501 complete(&fsl_lpspi->dma_tx_completion);
502 complete(&fsl_lpspi->dma_rx_completion);
503 }
504
505 return 0;
506 }
507
fsl_lpspi_wait_for_completion(struct spi_controller * controller)508 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
509 {
510 struct fsl_lpspi_data *fsl_lpspi =
511 spi_controller_get_devdata(controller);
512
513 if (fsl_lpspi->is_target) {
514 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
515 fsl_lpspi->target_aborted) {
516 dev_dbg(fsl_lpspi->dev, "interrupted\n");
517 return -EINTR;
518 }
519 } else {
520 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
521 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
522 return -ETIMEDOUT;
523 }
524 }
525
526 return 0;
527 }
528
fsl_lpspi_reset(struct fsl_lpspi_data * fsl_lpspi)529 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
530 {
531 u32 temp;
532
533 if (!fsl_lpspi->usedma) {
534 /* Disable all interrupt */
535 fsl_lpspi_intctrl(fsl_lpspi, 0);
536 }
537
538 /* W1C for all flags in SR */
539 temp = 0x3F << 8;
540 writel(temp, fsl_lpspi->base + IMX7ULP_SR);
541
542 /* Clear FIFO and disable module */
543 temp = CR_RRF | CR_RTF;
544 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
545
546 return 0;
547 }
548
fsl_lpspi_dma_rx_callback(void * cookie)549 static void fsl_lpspi_dma_rx_callback(void *cookie)
550 {
551 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
552
553 complete(&fsl_lpspi->dma_rx_completion);
554 }
555
fsl_lpspi_dma_tx_callback(void * cookie)556 static void fsl_lpspi_dma_tx_callback(void *cookie)
557 {
558 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
559
560 complete(&fsl_lpspi->dma_tx_completion);
561 }
562
fsl_lpspi_calculate_timeout(struct fsl_lpspi_data * fsl_lpspi,int size)563 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
564 int size)
565 {
566 unsigned long timeout = 0;
567
568 /* Time with actual data transfer and CS change delay related to HW */
569 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
570
571 /* Add extra second for scheduler related activities */
572 timeout += 1;
573
574 /* Double calculated timeout */
575 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
576 }
577
fsl_lpspi_dma_transfer(struct spi_controller * controller,struct fsl_lpspi_data * fsl_lpspi,struct spi_transfer * transfer)578 static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
579 struct fsl_lpspi_data *fsl_lpspi,
580 struct spi_transfer *transfer)
581 {
582 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
583 unsigned long transfer_timeout;
584 unsigned long time_left;
585 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
586 int ret;
587
588 ret = fsl_lpspi_dma_configure(controller);
589 if (ret)
590 return ret;
591
592 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
593 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
594 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
595 if (!desc_rx)
596 return -EINVAL;
597
598 desc_rx->callback = fsl_lpspi_dma_rx_callback;
599 desc_rx->callback_param = (void *)fsl_lpspi;
600 dmaengine_submit(desc_rx);
601 reinit_completion(&fsl_lpspi->dma_rx_completion);
602 dma_async_issue_pending(controller->dma_rx);
603
604 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
605 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
606 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
607 if (!desc_tx) {
608 dmaengine_terminate_all(controller->dma_tx);
609 return -EINVAL;
610 }
611
612 desc_tx->callback = fsl_lpspi_dma_tx_callback;
613 desc_tx->callback_param = (void *)fsl_lpspi;
614 dmaengine_submit(desc_tx);
615 reinit_completion(&fsl_lpspi->dma_tx_completion);
616 dma_async_issue_pending(controller->dma_tx);
617
618 fsl_lpspi->target_aborted = false;
619
620 if (!fsl_lpspi->is_target) {
621 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
622 transfer->len);
623
624 /* Wait eDMA to finish the data transfer.*/
625 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
626 transfer_timeout);
627 if (!time_left) {
628 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
629 dmaengine_terminate_all(controller->dma_tx);
630 dmaengine_terminate_all(controller->dma_rx);
631 fsl_lpspi_reset(fsl_lpspi);
632 return -ETIMEDOUT;
633 }
634
635 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
636 transfer_timeout);
637 if (!time_left) {
638 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
639 dmaengine_terminate_all(controller->dma_tx);
640 dmaengine_terminate_all(controller->dma_rx);
641 fsl_lpspi_reset(fsl_lpspi);
642 return -ETIMEDOUT;
643 }
644 } else {
645 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
646 fsl_lpspi->target_aborted) {
647 dev_dbg(fsl_lpspi->dev,
648 "I/O Error in DMA TX interrupted\n");
649 dmaengine_terminate_all(controller->dma_tx);
650 dmaengine_terminate_all(controller->dma_rx);
651 fsl_lpspi_reset(fsl_lpspi);
652 return -EINTR;
653 }
654
655 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
656 fsl_lpspi->target_aborted) {
657 dev_dbg(fsl_lpspi->dev,
658 "I/O Error in DMA RX interrupted\n");
659 dmaengine_terminate_all(controller->dma_tx);
660 dmaengine_terminate_all(controller->dma_rx);
661 fsl_lpspi_reset(fsl_lpspi);
662 return -EINTR;
663 }
664 }
665
666 fsl_lpspi_reset(fsl_lpspi);
667
668 return 0;
669 }
670
fsl_lpspi_dma_exit(struct spi_controller * controller)671 static void fsl_lpspi_dma_exit(struct spi_controller *controller)
672 {
673 if (controller->dma_rx) {
674 dma_release_channel(controller->dma_rx);
675 controller->dma_rx = NULL;
676 }
677
678 if (controller->dma_tx) {
679 dma_release_channel(controller->dma_tx);
680 controller->dma_tx = NULL;
681 }
682 }
683
fsl_lpspi_dma_init(struct device * dev,struct fsl_lpspi_data * fsl_lpspi,struct spi_controller * controller)684 static int fsl_lpspi_dma_init(struct device *dev,
685 struct fsl_lpspi_data *fsl_lpspi,
686 struct spi_controller *controller)
687 {
688 int ret;
689
690 /* Prepare for TX DMA: */
691 controller->dma_tx = dma_request_chan(dev, "tx");
692 if (IS_ERR(controller->dma_tx)) {
693 ret = PTR_ERR(controller->dma_tx);
694 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
695 controller->dma_tx = NULL;
696 goto err;
697 }
698
699 /* Prepare for RX DMA: */
700 controller->dma_rx = dma_request_chan(dev, "rx");
701 if (IS_ERR(controller->dma_rx)) {
702 ret = PTR_ERR(controller->dma_rx);
703 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
704 controller->dma_rx = NULL;
705 goto err;
706 }
707
708 init_completion(&fsl_lpspi->dma_rx_completion);
709 init_completion(&fsl_lpspi->dma_tx_completion);
710 controller->can_dma = fsl_lpspi_can_dma;
711 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
712
713 return 0;
714 err:
715 fsl_lpspi_dma_exit(controller);
716 return ret;
717 }
718
fsl_lpspi_pio_transfer(struct spi_controller * controller,struct spi_transfer * t)719 static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
720 struct spi_transfer *t)
721 {
722 struct fsl_lpspi_data *fsl_lpspi =
723 spi_controller_get_devdata(controller);
724 int ret;
725
726 fsl_lpspi->tx_buf = t->tx_buf;
727 fsl_lpspi->rx_buf = t->rx_buf;
728 fsl_lpspi->remain = t->len;
729
730 reinit_completion(&fsl_lpspi->xfer_done);
731 fsl_lpspi->target_aborted = false;
732
733 fsl_lpspi_write_tx_fifo(fsl_lpspi);
734
735 ret = fsl_lpspi_wait_for_completion(controller);
736 if (ret)
737 return ret;
738
739 fsl_lpspi_reset(fsl_lpspi);
740
741 return 0;
742 }
743
fsl_lpspi_transfer_one(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)744 static int fsl_lpspi_transfer_one(struct spi_controller *controller,
745 struct spi_device *spi,
746 struct spi_transfer *t)
747 {
748 struct fsl_lpspi_data *fsl_lpspi =
749 spi_controller_get_devdata(controller);
750 int ret;
751
752 fsl_lpspi->is_first_byte = true;
753 ret = fsl_lpspi_setup_transfer(controller, spi, t);
754 if (ret < 0)
755 return ret;
756
757 t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz;
758
759 fsl_lpspi_set_cmd(fsl_lpspi);
760 fsl_lpspi->is_first_byte = false;
761
762 if (fsl_lpspi->usedma)
763 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
764 else
765 ret = fsl_lpspi_pio_transfer(controller, t);
766 if (ret < 0)
767 return ret;
768
769 return 0;
770 }
771
fsl_lpspi_isr(int irq,void * dev_id)772 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
773 {
774 u32 temp_SR, temp_IER;
775 struct fsl_lpspi_data *fsl_lpspi = dev_id;
776
777 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
778 fsl_lpspi_intctrl(fsl_lpspi, 0);
779 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
780
781 fsl_lpspi_read_rx_fifo(fsl_lpspi);
782
783 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
784 fsl_lpspi_write_tx_fifo(fsl_lpspi);
785 return IRQ_HANDLED;
786 }
787
788 if (temp_SR & SR_MBF ||
789 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
790 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
791 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
792 return IRQ_HANDLED;
793 }
794
795 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
796 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
797 complete(&fsl_lpspi->xfer_done);
798 return IRQ_HANDLED;
799 }
800
801 return IRQ_NONE;
802 }
803
804 #ifdef CONFIG_PM
fsl_lpspi_runtime_resume(struct device * dev)805 static int fsl_lpspi_runtime_resume(struct device *dev)
806 {
807 struct spi_controller *controller = dev_get_drvdata(dev);
808 struct fsl_lpspi_data *fsl_lpspi;
809 int ret;
810
811 fsl_lpspi = spi_controller_get_devdata(controller);
812
813 ret = clk_prepare_enable(fsl_lpspi->clk_per);
814 if (ret)
815 return ret;
816
817 ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
818 if (ret) {
819 clk_disable_unprepare(fsl_lpspi->clk_per);
820 return ret;
821 }
822
823 return 0;
824 }
825
fsl_lpspi_runtime_suspend(struct device * dev)826 static int fsl_lpspi_runtime_suspend(struct device *dev)
827 {
828 struct spi_controller *controller = dev_get_drvdata(dev);
829 struct fsl_lpspi_data *fsl_lpspi;
830
831 fsl_lpspi = spi_controller_get_devdata(controller);
832
833 clk_disable_unprepare(fsl_lpspi->clk_per);
834 clk_disable_unprepare(fsl_lpspi->clk_ipg);
835
836 return 0;
837 }
838 #endif
839
fsl_lpspi_init_rpm(struct fsl_lpspi_data * fsl_lpspi)840 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
841 {
842 struct device *dev = fsl_lpspi->dev;
843
844 pm_runtime_enable(dev);
845 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
846 pm_runtime_use_autosuspend(dev);
847
848 return 0;
849 }
850
fsl_lpspi_probe(struct platform_device * pdev)851 static int fsl_lpspi_probe(struct platform_device *pdev)
852 {
853 const struct fsl_lpspi_devtype_data *devtype_data;
854 struct fsl_lpspi_data *fsl_lpspi;
855 struct spi_controller *controller;
856 struct resource *res;
857 int ret, irq;
858 u32 num_cs;
859 u32 temp;
860 bool is_target;
861
862 devtype_data = of_device_get_match_data(&pdev->dev);
863 if (!devtype_data)
864 return -ENODEV;
865
866 is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
867 if (is_target)
868 controller = devm_spi_alloc_target(&pdev->dev,
869 sizeof(struct fsl_lpspi_data));
870 else
871 controller = devm_spi_alloc_host(&pdev->dev,
872 sizeof(struct fsl_lpspi_data));
873
874 if (!controller)
875 return -ENOMEM;
876
877 platform_set_drvdata(pdev, controller);
878
879 fsl_lpspi = spi_controller_get_devdata(controller);
880 fsl_lpspi->dev = &pdev->dev;
881 fsl_lpspi->is_target = is_target;
882 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
883 "fsl,spi-only-use-cs1-sel");
884 fsl_lpspi->devtype_data = devtype_data;
885
886 init_completion(&fsl_lpspi->xfer_done);
887
888 fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
889 if (IS_ERR(fsl_lpspi->base)) {
890 ret = PTR_ERR(fsl_lpspi->base);
891 return ret;
892 }
893 fsl_lpspi->base_phys = res->start;
894
895 irq = platform_get_irq(pdev, 0);
896 if (irq < 0) {
897 ret = irq;
898 return ret;
899 }
900
901 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN,
902 dev_name(&pdev->dev), fsl_lpspi);
903 if (ret) {
904 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
905 return ret;
906 }
907
908 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
909 if (IS_ERR(fsl_lpspi->clk_per)) {
910 ret = PTR_ERR(fsl_lpspi->clk_per);
911 return ret;
912 }
913
914 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
915 if (IS_ERR(fsl_lpspi->clk_ipg)) {
916 ret = PTR_ERR(fsl_lpspi->clk_ipg);
917 return ret;
918 }
919
920 /* enable the clock */
921 ret = fsl_lpspi_init_rpm(fsl_lpspi);
922 if (ret)
923 return ret;
924
925 ret = pm_runtime_get_sync(fsl_lpspi->dev);
926 if (ret < 0) {
927 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
928 goto out_pm_get;
929 }
930
931 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
932 fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
933 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
934 if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
935 &num_cs)) {
936 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
937 num_cs = ((temp >> 16) & 0xf);
938 else
939 num_cs = 1;
940 }
941
942 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
943 controller->transfer_one = fsl_lpspi_transfer_one;
944 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
945 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
946 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
947 controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
948 controller->dev.of_node = pdev->dev.of_node;
949 controller->bus_num = pdev->id;
950 controller->num_chipselect = num_cs;
951 controller->target_abort = fsl_lpspi_target_abort;
952 if (!fsl_lpspi->is_target)
953 controller->use_gpio_descriptors = true;
954
955 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
956 if (ret == -EPROBE_DEFER)
957 goto out_pm_get;
958 if (ret < 0) {
959 dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
960 enable_irq(irq);
961 }
962
963 ret = devm_spi_register_controller(&pdev->dev, controller);
964 if (ret < 0) {
965 dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
966 goto free_dma;
967 }
968
969 pm_runtime_mark_last_busy(fsl_lpspi->dev);
970 pm_runtime_put_autosuspend(fsl_lpspi->dev);
971
972 return 0;
973
974 free_dma:
975 fsl_lpspi_dma_exit(controller);
976 out_pm_get:
977 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
978 pm_runtime_put_sync(fsl_lpspi->dev);
979 pm_runtime_disable(fsl_lpspi->dev);
980
981 return ret;
982 }
983
fsl_lpspi_remove(struct platform_device * pdev)984 static void fsl_lpspi_remove(struct platform_device *pdev)
985 {
986 struct spi_controller *controller = platform_get_drvdata(pdev);
987 struct fsl_lpspi_data *fsl_lpspi =
988 spi_controller_get_devdata(controller);
989
990 fsl_lpspi_dma_exit(controller);
991
992 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
993 pm_runtime_disable(fsl_lpspi->dev);
994 }
995
fsl_lpspi_suspend(struct device * dev)996 static int fsl_lpspi_suspend(struct device *dev)
997 {
998 pinctrl_pm_select_sleep_state(dev);
999 return pm_runtime_force_suspend(dev);
1000 }
1001
fsl_lpspi_resume(struct device * dev)1002 static int fsl_lpspi_resume(struct device *dev)
1003 {
1004 int ret;
1005
1006 ret = pm_runtime_force_resume(dev);
1007 if (ret) {
1008 dev_err(dev, "Error in resume: %d\n", ret);
1009 return ret;
1010 }
1011
1012 pinctrl_pm_select_default_state(dev);
1013
1014 return 0;
1015 }
1016
1017 static const struct dev_pm_ops fsl_lpspi_pm_ops = {
1018 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
1019 fsl_lpspi_runtime_resume, NULL)
1020 SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
1021 };
1022
1023 static struct platform_driver fsl_lpspi_driver = {
1024 .driver = {
1025 .name = DRIVER_NAME,
1026 .of_match_table = fsl_lpspi_dt_ids,
1027 .pm = pm_ptr(&fsl_lpspi_pm_ops),
1028 },
1029 .probe = fsl_lpspi_probe,
1030 .remove = fsl_lpspi_remove,
1031 };
1032 module_platform_driver(fsl_lpspi_driver);
1033
1034 MODULE_DESCRIPTION("LPSPI Controller driver");
1035 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1036 MODULE_LICENSE("GPL");
1037