1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Freescale i.MX7ULP LPSPI driver
4 //
5 // Copyright 2016 Freescale Semiconductor, Inc.
6 // Copyright 2018 NXP Semiconductors
7
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/irq.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma/imx-dma.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spi/spi.h>
26 #include <linux/spi/spi_bitbang.h>
27 #include <linux/types.h>
28
29 #define DRIVER_NAME "fsl_lpspi"
30
31 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
32
33 /* The maximum bytes that edma can transfer once.*/
34 #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
35
36 /* i.MX7ULP LPSPI registers */
37 #define IMX7ULP_VERID 0x0
38 #define IMX7ULP_PARAM 0x4
39 #define IMX7ULP_CR 0x10
40 #define IMX7ULP_SR 0x14
41 #define IMX7ULP_IER 0x18
42 #define IMX7ULP_DER 0x1c
43 #define IMX7ULP_CFGR0 0x20
44 #define IMX7ULP_CFGR1 0x24
45 #define IMX7ULP_DMR0 0x30
46 #define IMX7ULP_DMR1 0x34
47 #define IMX7ULP_CCR 0x40
48 #define IMX7ULP_FCR 0x58
49 #define IMX7ULP_FSR 0x5c
50 #define IMX7ULP_TCR 0x60
51 #define IMX7ULP_TDR 0x64
52 #define IMX7ULP_RSR 0x70
53 #define IMX7ULP_RDR 0x74
54
55 /* General control register field define */
56 #define CR_RRF BIT(9)
57 #define CR_RTF BIT(8)
58 #define CR_RST BIT(1)
59 #define CR_MEN BIT(0)
60 #define SR_MBF BIT(24)
61 #define SR_TCF BIT(10)
62 #define SR_FCF BIT(9)
63 #define SR_RDF BIT(1)
64 #define SR_TDF BIT(0)
65 #define IER_TCIE BIT(10)
66 #define IER_FCIE BIT(9)
67 #define IER_RDIE BIT(1)
68 #define IER_TDIE BIT(0)
69 #define DER_RDDE BIT(1)
70 #define DER_TDDE BIT(0)
71 #define CFGR1_PCSCFG BIT(27)
72 #define CFGR1_PINCFG (BIT(24)|BIT(25))
73 #define CFGR1_PCSPOL BIT(8)
74 #define CFGR1_NOSTALL BIT(3)
75 #define CFGR1_HOST BIT(0)
76 #define FSR_TXCOUNT (0xFF)
77 #define RSR_RXEMPTY BIT(1)
78 #define TCR_CPOL BIT(31)
79 #define TCR_CPHA BIT(30)
80 #define TCR_CONT BIT(21)
81 #define TCR_CONTC BIT(20)
82 #define TCR_RXMSK BIT(19)
83 #define TCR_TXMSK BIT(18)
84
85 struct fsl_lpspi_devtype_data {
86 u8 prescale_max;
87 };
88
89 struct lpspi_config {
90 u8 bpw;
91 u8 chip_select;
92 u8 prescale;
93 u16 mode;
94 u32 speed_hz;
95 u32 effective_speed_hz;
96 };
97
98 struct fsl_lpspi_data {
99 struct device *dev;
100 void __iomem *base;
101 unsigned long base_phys;
102 struct clk *clk_ipg;
103 struct clk *clk_per;
104 bool is_target;
105 bool is_only_cs1;
106 bool is_first_byte;
107
108 void *rx_buf;
109 const void *tx_buf;
110 void (*tx)(struct fsl_lpspi_data *);
111 void (*rx)(struct fsl_lpspi_data *);
112
113 u32 remain;
114 u8 watermark;
115 u8 txfifosize;
116 u8 rxfifosize;
117
118 struct lpspi_config config;
119 struct completion xfer_done;
120
121 bool target_aborted;
122
123 /* DMA */
124 bool usedma;
125 struct completion dma_rx_completion;
126 struct completion dma_tx_completion;
127
128 const struct fsl_lpspi_devtype_data *devtype_data;
129 };
130
131 /*
132 * ERR051608 fixed or not:
133 * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
134 */
135 static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
136 .prescale_max = 1,
137 };
138
139 static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
140 .prescale_max = 7,
141 };
142
143 static const struct of_device_id fsl_lpspi_dt_ids[] = {
144 { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
145 { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
146 { /* sentinel */ }
147 };
148 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
149
150 #define LPSPI_BUF_RX(type) \
151 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
152 { \
153 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
154 \
155 if (fsl_lpspi->rx_buf) { \
156 *(type *)fsl_lpspi->rx_buf = val; \
157 fsl_lpspi->rx_buf += sizeof(type); \
158 } \
159 }
160
161 #define LPSPI_BUF_TX(type) \
162 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
163 { \
164 type val = 0; \
165 \
166 if (fsl_lpspi->tx_buf) { \
167 val = *(type *)fsl_lpspi->tx_buf; \
168 fsl_lpspi->tx_buf += sizeof(type); \
169 } \
170 \
171 fsl_lpspi->remain -= sizeof(type); \
172 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
173 }
174
175 LPSPI_BUF_RX(u8)
LPSPI_BUF_TX(u8)176 LPSPI_BUF_TX(u8)
177 LPSPI_BUF_RX(u16)
178 LPSPI_BUF_TX(u16)
179 LPSPI_BUF_RX(u32)
180 LPSPI_BUF_TX(u32)
181
182 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
183 unsigned int enable)
184 {
185 writel(enable, fsl_lpspi->base + IMX7ULP_IER);
186 }
187
fsl_lpspi_bytes_per_word(const int bpw)188 static int fsl_lpspi_bytes_per_word(const int bpw)
189 {
190 return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
191 }
192
fsl_lpspi_can_dma(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * transfer)193 static bool fsl_lpspi_can_dma(struct spi_controller *controller,
194 struct spi_device *spi,
195 struct spi_transfer *transfer)
196 {
197 unsigned int bytes_per_word;
198
199 if (!controller->dma_rx)
200 return false;
201
202 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
203
204 switch (bytes_per_word) {
205 case 1:
206 case 2:
207 case 4:
208 break;
209 default:
210 return false;
211 }
212
213 return true;
214 }
215
lpspi_prepare_xfer_hardware(struct spi_controller * controller)216 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
217 {
218 struct fsl_lpspi_data *fsl_lpspi =
219 spi_controller_get_devdata(controller);
220 int ret;
221
222 ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
223 if (ret < 0) {
224 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
225 return ret;
226 }
227
228 return 0;
229 }
230
lpspi_unprepare_xfer_hardware(struct spi_controller * controller)231 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
232 {
233 struct fsl_lpspi_data *fsl_lpspi =
234 spi_controller_get_devdata(controller);
235
236 pm_runtime_put_autosuspend(fsl_lpspi->dev);
237
238 return 0;
239 }
240
fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data * fsl_lpspi)241 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
242 {
243 u8 txfifo_cnt;
244 u32 temp;
245
246 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
247
248 while (txfifo_cnt < fsl_lpspi->txfifosize) {
249 if (!fsl_lpspi->remain)
250 break;
251 fsl_lpspi->tx(fsl_lpspi);
252 txfifo_cnt++;
253 }
254
255 if (txfifo_cnt < fsl_lpspi->txfifosize) {
256 if (!fsl_lpspi->is_target) {
257 temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
258 temp &= ~TCR_CONTC;
259 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
260 }
261
262 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
263 } else
264 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
265 }
266
fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data * fsl_lpspi)267 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
268 {
269 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
270 fsl_lpspi->rx(fsl_lpspi);
271 }
272
fsl_lpspi_set_cmd(struct fsl_lpspi_data * fsl_lpspi)273 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
274 {
275 u32 temp = 0;
276
277 temp |= fsl_lpspi->config.bpw - 1;
278 temp |= (fsl_lpspi->config.mode & 0x3) << 30;
279 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
280 if (!fsl_lpspi->is_target) {
281 temp |= fsl_lpspi->config.prescale << 27;
282 /*
283 * Set TCR_CONT will keep SS asserted after current transfer.
284 * For the first transfer, clear TCR_CONTC to assert SS.
285 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
286 */
287 if (!fsl_lpspi->usedma) {
288 temp |= TCR_CONT;
289 if (fsl_lpspi->is_first_byte)
290 temp &= ~TCR_CONTC;
291 else
292 temp |= TCR_CONTC;
293 }
294 }
295 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
296
297 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
298 }
299
fsl_lpspi_set_watermark(struct fsl_lpspi_data * fsl_lpspi)300 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
301 {
302 u32 temp;
303
304 if (!fsl_lpspi->usedma)
305 temp = fsl_lpspi->watermark >> 1 |
306 (fsl_lpspi->watermark >> 1) << 16;
307 else
308 temp = fsl_lpspi->watermark >> 1;
309
310 writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
311
312 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
313 }
314
fsl_lpspi_set_bitrate(struct fsl_lpspi_data * fsl_lpspi)315 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
316 {
317 struct lpspi_config config = fsl_lpspi->config;
318 unsigned int perclk_rate, div;
319 u8 prescale_max;
320 u8 prescale;
321 int scldiv;
322
323 perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
324 prescale_max = fsl_lpspi->devtype_data->prescale_max;
325
326 if (!config.speed_hz) {
327 dev_err(fsl_lpspi->dev,
328 "error: the transmission speed provided is 0!\n");
329 return -EINVAL;
330 }
331
332 if (config.speed_hz > perclk_rate / 2) {
333 dev_err(fsl_lpspi->dev,
334 "per-clk should be at least two times of transfer speed");
335 return -EINVAL;
336 }
337
338 div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
339
340 for (prescale = 0; prescale <= prescale_max; prescale++) {
341 scldiv = div / (1 << prescale) - 2;
342 if (scldiv >= 0 && scldiv < 256) {
343 fsl_lpspi->config.prescale = prescale;
344 break;
345 }
346 }
347
348 if (scldiv < 0 || scldiv >= 256)
349 return -EINVAL;
350
351 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
352 fsl_lpspi->base + IMX7ULP_CCR);
353
354 fsl_lpspi->config.effective_speed_hz = perclk_rate / (scldiv + 2) *
355 (1 << prescale);
356
357 dev_dbg(fsl_lpspi->dev, "perclk=%u, speed=%u, prescale=%u, scldiv=%d\n",
358 perclk_rate, config.speed_hz, prescale, scldiv);
359
360 return 0;
361 }
362
fsl_lpspi_dma_configure(struct spi_controller * controller)363 static int fsl_lpspi_dma_configure(struct spi_controller *controller)
364 {
365 int ret;
366 enum dma_slave_buswidth buswidth;
367 struct dma_slave_config rx = {}, tx = {};
368 struct fsl_lpspi_data *fsl_lpspi =
369 spi_controller_get_devdata(controller);
370
371 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
372 case 4:
373 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
374 break;
375 case 2:
376 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
377 break;
378 case 1:
379 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
380 break;
381 default:
382 return -EINVAL;
383 }
384
385 tx.direction = DMA_MEM_TO_DEV;
386 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
387 tx.dst_addr_width = buswidth;
388 tx.dst_maxburst = 1;
389 ret = dmaengine_slave_config(controller->dma_tx, &tx);
390 if (ret) {
391 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
392 ret);
393 return ret;
394 }
395
396 rx.direction = DMA_DEV_TO_MEM;
397 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
398 rx.src_addr_width = buswidth;
399 rx.src_maxburst = 1;
400 ret = dmaengine_slave_config(controller->dma_rx, &rx);
401 if (ret) {
402 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
403 ret);
404 return ret;
405 }
406
407 return 0;
408 }
409
fsl_lpspi_config(struct fsl_lpspi_data * fsl_lpspi)410 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
411 {
412 u32 temp;
413 int ret;
414
415 if (!fsl_lpspi->is_target) {
416 ret = fsl_lpspi_set_bitrate(fsl_lpspi);
417 if (ret)
418 return ret;
419 }
420
421 fsl_lpspi_set_watermark(fsl_lpspi);
422
423 if (!fsl_lpspi->is_target)
424 temp = CFGR1_HOST;
425 else
426 temp = CFGR1_PINCFG;
427 if (fsl_lpspi->config.mode & SPI_CS_HIGH)
428 temp |= CFGR1_PCSPOL;
429 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
430
431 temp = readl(fsl_lpspi->base + IMX7ULP_CR);
432 temp |= CR_RRF | CR_RTF | CR_MEN;
433 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
434
435 temp = 0;
436 if (fsl_lpspi->usedma)
437 temp = DER_TDDE | DER_RDDE;
438 writel(temp, fsl_lpspi->base + IMX7ULP_DER);
439
440 return 0;
441 }
442
fsl_lpspi_setup_transfer(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)443 static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
444 struct spi_device *spi,
445 struct spi_transfer *t)
446 {
447 struct fsl_lpspi_data *fsl_lpspi =
448 spi_controller_get_devdata(spi->controller);
449
450 if (t == NULL)
451 return -EINVAL;
452
453 fsl_lpspi->config.mode = spi->mode;
454 fsl_lpspi->config.bpw = t->bits_per_word;
455 fsl_lpspi->config.speed_hz = t->speed_hz;
456 if (fsl_lpspi->is_only_cs1)
457 fsl_lpspi->config.chip_select = 1;
458 else
459 fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
460
461 if (!fsl_lpspi->config.speed_hz)
462 fsl_lpspi->config.speed_hz = spi->max_speed_hz;
463 if (!fsl_lpspi->config.bpw)
464 fsl_lpspi->config.bpw = spi->bits_per_word;
465
466 /* Initialize the functions for transfer */
467 if (fsl_lpspi->config.bpw <= 8) {
468 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
469 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
470 } else if (fsl_lpspi->config.bpw <= 16) {
471 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
472 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
473 } else {
474 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
475 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
476 }
477
478 if (t->len <= fsl_lpspi->txfifosize)
479 fsl_lpspi->watermark = t->len;
480 else
481 fsl_lpspi->watermark = fsl_lpspi->txfifosize;
482
483 if (fsl_lpspi_can_dma(controller, spi, t))
484 fsl_lpspi->usedma = true;
485 else
486 fsl_lpspi->usedma = false;
487
488 return fsl_lpspi_config(fsl_lpspi);
489 }
490
fsl_lpspi_target_abort(struct spi_controller * controller)491 static int fsl_lpspi_target_abort(struct spi_controller *controller)
492 {
493 struct fsl_lpspi_data *fsl_lpspi =
494 spi_controller_get_devdata(controller);
495
496 fsl_lpspi->target_aborted = true;
497 if (!fsl_lpspi->usedma)
498 complete(&fsl_lpspi->xfer_done);
499 else {
500 complete(&fsl_lpspi->dma_tx_completion);
501 complete(&fsl_lpspi->dma_rx_completion);
502 }
503
504 return 0;
505 }
506
fsl_lpspi_wait_for_completion(struct spi_controller * controller)507 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
508 {
509 struct fsl_lpspi_data *fsl_lpspi =
510 spi_controller_get_devdata(controller);
511
512 if (fsl_lpspi->is_target) {
513 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
514 fsl_lpspi->target_aborted) {
515 dev_dbg(fsl_lpspi->dev, "interrupted\n");
516 return -EINTR;
517 }
518 } else {
519 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
520 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
521 return -ETIMEDOUT;
522 }
523 }
524
525 return 0;
526 }
527
fsl_lpspi_reset(struct fsl_lpspi_data * fsl_lpspi)528 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
529 {
530 u32 temp;
531
532 if (!fsl_lpspi->usedma) {
533 /* Disable all interrupt */
534 fsl_lpspi_intctrl(fsl_lpspi, 0);
535 }
536
537 /* W1C for all flags in SR */
538 temp = 0x3F << 8;
539 writel(temp, fsl_lpspi->base + IMX7ULP_SR);
540
541 /* Clear FIFO and disable module */
542 temp = CR_RRF | CR_RTF;
543 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
544
545 return 0;
546 }
547
fsl_lpspi_dma_rx_callback(void * cookie)548 static void fsl_lpspi_dma_rx_callback(void *cookie)
549 {
550 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
551
552 complete(&fsl_lpspi->dma_rx_completion);
553 }
554
fsl_lpspi_dma_tx_callback(void * cookie)555 static void fsl_lpspi_dma_tx_callback(void *cookie)
556 {
557 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
558
559 complete(&fsl_lpspi->dma_tx_completion);
560 }
561
fsl_lpspi_calculate_timeout(struct fsl_lpspi_data * fsl_lpspi,int size)562 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
563 int size)
564 {
565 unsigned long timeout = 0;
566
567 /* Time with actual data transfer and CS change delay related to HW */
568 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
569
570 /* Add extra second for scheduler related activities */
571 timeout += 1;
572
573 /* Double calculated timeout */
574 return secs_to_jiffies(2 * timeout);
575 }
576
fsl_lpspi_dma_transfer(struct spi_controller * controller,struct fsl_lpspi_data * fsl_lpspi,struct spi_transfer * transfer)577 static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
578 struct fsl_lpspi_data *fsl_lpspi,
579 struct spi_transfer *transfer)
580 {
581 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
582 unsigned long transfer_timeout;
583 unsigned long time_left;
584 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
585 int ret;
586
587 ret = fsl_lpspi_dma_configure(controller);
588 if (ret)
589 return ret;
590
591 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
592 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
593 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
594 if (!desc_rx)
595 return -EINVAL;
596
597 desc_rx->callback = fsl_lpspi_dma_rx_callback;
598 desc_rx->callback_param = (void *)fsl_lpspi;
599 dmaengine_submit(desc_rx);
600 reinit_completion(&fsl_lpspi->dma_rx_completion);
601 dma_async_issue_pending(controller->dma_rx);
602
603 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
604 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
605 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
606 if (!desc_tx) {
607 dmaengine_terminate_all(controller->dma_tx);
608 return -EINVAL;
609 }
610
611 desc_tx->callback = fsl_lpspi_dma_tx_callback;
612 desc_tx->callback_param = (void *)fsl_lpspi;
613 dmaengine_submit(desc_tx);
614 reinit_completion(&fsl_lpspi->dma_tx_completion);
615 dma_async_issue_pending(controller->dma_tx);
616
617 fsl_lpspi->target_aborted = false;
618
619 if (!fsl_lpspi->is_target) {
620 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
621 transfer->len);
622
623 /* Wait eDMA to finish the data transfer.*/
624 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
625 transfer_timeout);
626 if (!time_left) {
627 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
628 dmaengine_terminate_all(controller->dma_tx);
629 dmaengine_terminate_all(controller->dma_rx);
630 fsl_lpspi_reset(fsl_lpspi);
631 return -ETIMEDOUT;
632 }
633
634 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
635 transfer_timeout);
636 if (!time_left) {
637 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
638 dmaengine_terminate_all(controller->dma_tx);
639 dmaengine_terminate_all(controller->dma_rx);
640 fsl_lpspi_reset(fsl_lpspi);
641 return -ETIMEDOUT;
642 }
643 } else {
644 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
645 fsl_lpspi->target_aborted) {
646 dev_dbg(fsl_lpspi->dev,
647 "I/O Error in DMA TX interrupted\n");
648 dmaengine_terminate_all(controller->dma_tx);
649 dmaengine_terminate_all(controller->dma_rx);
650 fsl_lpspi_reset(fsl_lpspi);
651 return -EINTR;
652 }
653
654 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
655 fsl_lpspi->target_aborted) {
656 dev_dbg(fsl_lpspi->dev,
657 "I/O Error in DMA RX interrupted\n");
658 dmaengine_terminate_all(controller->dma_tx);
659 dmaengine_terminate_all(controller->dma_rx);
660 fsl_lpspi_reset(fsl_lpspi);
661 return -EINTR;
662 }
663 }
664
665 fsl_lpspi_reset(fsl_lpspi);
666
667 return 0;
668 }
669
fsl_lpspi_dma_exit(struct spi_controller * controller)670 static void fsl_lpspi_dma_exit(struct spi_controller *controller)
671 {
672 if (controller->dma_rx) {
673 dma_release_channel(controller->dma_rx);
674 controller->dma_rx = NULL;
675 }
676
677 if (controller->dma_tx) {
678 dma_release_channel(controller->dma_tx);
679 controller->dma_tx = NULL;
680 }
681 }
682
fsl_lpspi_dma_init(struct device * dev,struct fsl_lpspi_data * fsl_lpspi,struct spi_controller * controller)683 static int fsl_lpspi_dma_init(struct device *dev,
684 struct fsl_lpspi_data *fsl_lpspi,
685 struct spi_controller *controller)
686 {
687 int ret;
688
689 /* Prepare for TX DMA: */
690 controller->dma_tx = dma_request_chan(dev, "tx");
691 if (IS_ERR(controller->dma_tx)) {
692 ret = PTR_ERR(controller->dma_tx);
693 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
694 controller->dma_tx = NULL;
695 goto err;
696 }
697
698 /* Prepare for RX DMA: */
699 controller->dma_rx = dma_request_chan(dev, "rx");
700 if (IS_ERR(controller->dma_rx)) {
701 ret = PTR_ERR(controller->dma_rx);
702 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
703 controller->dma_rx = NULL;
704 goto err;
705 }
706
707 init_completion(&fsl_lpspi->dma_rx_completion);
708 init_completion(&fsl_lpspi->dma_tx_completion);
709 controller->can_dma = fsl_lpspi_can_dma;
710 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
711
712 return 0;
713 err:
714 fsl_lpspi_dma_exit(controller);
715 return ret;
716 }
717
fsl_lpspi_pio_transfer(struct spi_controller * controller,struct spi_transfer * t)718 static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
719 struct spi_transfer *t)
720 {
721 struct fsl_lpspi_data *fsl_lpspi =
722 spi_controller_get_devdata(controller);
723 int ret;
724
725 fsl_lpspi->tx_buf = t->tx_buf;
726 fsl_lpspi->rx_buf = t->rx_buf;
727 fsl_lpspi->remain = t->len;
728
729 reinit_completion(&fsl_lpspi->xfer_done);
730 fsl_lpspi->target_aborted = false;
731
732 fsl_lpspi_write_tx_fifo(fsl_lpspi);
733
734 ret = fsl_lpspi_wait_for_completion(controller);
735 if (ret)
736 return ret;
737
738 fsl_lpspi_reset(fsl_lpspi);
739
740 return 0;
741 }
742
fsl_lpspi_transfer_one(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)743 static int fsl_lpspi_transfer_one(struct spi_controller *controller,
744 struct spi_device *spi,
745 struct spi_transfer *t)
746 {
747 struct fsl_lpspi_data *fsl_lpspi =
748 spi_controller_get_devdata(controller);
749 int ret;
750
751 fsl_lpspi->is_first_byte = true;
752 ret = fsl_lpspi_setup_transfer(controller, spi, t);
753 if (ret < 0)
754 return ret;
755
756 t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz;
757
758 fsl_lpspi_set_cmd(fsl_lpspi);
759 fsl_lpspi->is_first_byte = false;
760
761 if (fsl_lpspi->usedma)
762 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
763 else
764 ret = fsl_lpspi_pio_transfer(controller, t);
765 if (ret < 0)
766 return ret;
767
768 return 0;
769 }
770
fsl_lpspi_isr(int irq,void * dev_id)771 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
772 {
773 u32 temp_SR, temp_IER;
774 struct fsl_lpspi_data *fsl_lpspi = dev_id;
775
776 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
777 fsl_lpspi_intctrl(fsl_lpspi, 0);
778 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
779
780 fsl_lpspi_read_rx_fifo(fsl_lpspi);
781
782 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
783 fsl_lpspi_write_tx_fifo(fsl_lpspi);
784 return IRQ_HANDLED;
785 }
786
787 if (temp_SR & SR_MBF ||
788 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
789 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
790 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
791 return IRQ_HANDLED;
792 }
793
794 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
795 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
796 complete(&fsl_lpspi->xfer_done);
797 return IRQ_HANDLED;
798 }
799
800 return IRQ_NONE;
801 }
802
803 #ifdef CONFIG_PM
fsl_lpspi_runtime_resume(struct device * dev)804 static int fsl_lpspi_runtime_resume(struct device *dev)
805 {
806 struct spi_controller *controller = dev_get_drvdata(dev);
807 struct fsl_lpspi_data *fsl_lpspi;
808 int ret;
809
810 fsl_lpspi = spi_controller_get_devdata(controller);
811
812 ret = clk_prepare_enable(fsl_lpspi->clk_per);
813 if (ret)
814 return ret;
815
816 ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
817 if (ret) {
818 clk_disable_unprepare(fsl_lpspi->clk_per);
819 return ret;
820 }
821
822 return 0;
823 }
824
fsl_lpspi_runtime_suspend(struct device * dev)825 static int fsl_lpspi_runtime_suspend(struct device *dev)
826 {
827 struct spi_controller *controller = dev_get_drvdata(dev);
828 struct fsl_lpspi_data *fsl_lpspi;
829
830 fsl_lpspi = spi_controller_get_devdata(controller);
831
832 clk_disable_unprepare(fsl_lpspi->clk_per);
833 clk_disable_unprepare(fsl_lpspi->clk_ipg);
834
835 return 0;
836 }
837 #endif
838
fsl_lpspi_init_rpm(struct fsl_lpspi_data * fsl_lpspi)839 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
840 {
841 struct device *dev = fsl_lpspi->dev;
842
843 pm_runtime_enable(dev);
844 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
845 pm_runtime_use_autosuspend(dev);
846
847 return 0;
848 }
849
fsl_lpspi_probe(struct platform_device * pdev)850 static int fsl_lpspi_probe(struct platform_device *pdev)
851 {
852 const struct fsl_lpspi_devtype_data *devtype_data;
853 struct fsl_lpspi_data *fsl_lpspi;
854 struct spi_controller *controller;
855 struct resource *res;
856 int ret, irq;
857 u32 num_cs;
858 u32 temp;
859 bool is_target;
860
861 devtype_data = of_device_get_match_data(&pdev->dev);
862 if (!devtype_data)
863 return -ENODEV;
864
865 is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
866 if (is_target)
867 controller = devm_spi_alloc_target(&pdev->dev,
868 sizeof(struct fsl_lpspi_data));
869 else
870 controller = devm_spi_alloc_host(&pdev->dev,
871 sizeof(struct fsl_lpspi_data));
872
873 if (!controller)
874 return -ENOMEM;
875
876 platform_set_drvdata(pdev, controller);
877
878 fsl_lpspi = spi_controller_get_devdata(controller);
879 fsl_lpspi->dev = &pdev->dev;
880 fsl_lpspi->is_target = is_target;
881 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
882 "fsl,spi-only-use-cs1-sel");
883 fsl_lpspi->devtype_data = devtype_data;
884
885 init_completion(&fsl_lpspi->xfer_done);
886
887 fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
888 if (IS_ERR(fsl_lpspi->base)) {
889 ret = PTR_ERR(fsl_lpspi->base);
890 return ret;
891 }
892 fsl_lpspi->base_phys = res->start;
893
894 irq = platform_get_irq(pdev, 0);
895 if (irq < 0) {
896 ret = irq;
897 return ret;
898 }
899
900 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN,
901 dev_name(&pdev->dev), fsl_lpspi);
902 if (ret) {
903 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
904 return ret;
905 }
906
907 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
908 if (IS_ERR(fsl_lpspi->clk_per)) {
909 ret = PTR_ERR(fsl_lpspi->clk_per);
910 return ret;
911 }
912
913 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
914 if (IS_ERR(fsl_lpspi->clk_ipg)) {
915 ret = PTR_ERR(fsl_lpspi->clk_ipg);
916 return ret;
917 }
918
919 /* enable the clock */
920 ret = fsl_lpspi_init_rpm(fsl_lpspi);
921 if (ret)
922 return ret;
923
924 ret = pm_runtime_get_sync(fsl_lpspi->dev);
925 if (ret < 0) {
926 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
927 goto out_pm_get;
928 }
929
930 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
931 fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
932 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
933 if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
934 &num_cs)) {
935 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
936 num_cs = ((temp >> 16) & 0xf);
937 else
938 num_cs = 1;
939 }
940
941 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
942 controller->transfer_one = fsl_lpspi_transfer_one;
943 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
944 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
945 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
946 controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
947 controller->dev.of_node = pdev->dev.of_node;
948 controller->bus_num = pdev->id;
949 controller->num_chipselect = num_cs;
950 controller->target_abort = fsl_lpspi_target_abort;
951 if (!fsl_lpspi->is_target)
952 controller->use_gpio_descriptors = true;
953
954 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
955 if (ret == -EPROBE_DEFER)
956 goto out_pm_get;
957 if (ret < 0) {
958 dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
959 enable_irq(irq);
960 }
961
962 ret = devm_spi_register_controller(&pdev->dev, controller);
963 if (ret < 0) {
964 dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
965 goto free_dma;
966 }
967
968 pm_runtime_put_autosuspend(fsl_lpspi->dev);
969
970 return 0;
971
972 free_dma:
973 fsl_lpspi_dma_exit(controller);
974 out_pm_get:
975 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
976 pm_runtime_put_sync(fsl_lpspi->dev);
977 pm_runtime_disable(fsl_lpspi->dev);
978
979 return ret;
980 }
981
fsl_lpspi_remove(struct platform_device * pdev)982 static void fsl_lpspi_remove(struct platform_device *pdev)
983 {
984 struct spi_controller *controller = platform_get_drvdata(pdev);
985 struct fsl_lpspi_data *fsl_lpspi =
986 spi_controller_get_devdata(controller);
987
988 fsl_lpspi_dma_exit(controller);
989
990 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
991 pm_runtime_disable(fsl_lpspi->dev);
992 }
993
fsl_lpspi_suspend(struct device * dev)994 static int fsl_lpspi_suspend(struct device *dev)
995 {
996 pinctrl_pm_select_sleep_state(dev);
997 return pm_runtime_force_suspend(dev);
998 }
999
fsl_lpspi_resume(struct device * dev)1000 static int fsl_lpspi_resume(struct device *dev)
1001 {
1002 int ret;
1003
1004 ret = pm_runtime_force_resume(dev);
1005 if (ret) {
1006 dev_err(dev, "Error in resume: %d\n", ret);
1007 return ret;
1008 }
1009
1010 pinctrl_pm_select_default_state(dev);
1011
1012 return 0;
1013 }
1014
1015 static const struct dev_pm_ops fsl_lpspi_pm_ops = {
1016 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
1017 fsl_lpspi_runtime_resume, NULL)
1018 SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
1019 };
1020
1021 static struct platform_driver fsl_lpspi_driver = {
1022 .driver = {
1023 .name = DRIVER_NAME,
1024 .of_match_table = fsl_lpspi_dt_ids,
1025 .pm = pm_ptr(&fsl_lpspi_pm_ops),
1026 },
1027 .probe = fsl_lpspi_probe,
1028 .remove = fsl_lpspi_remove,
1029 };
1030 module_platform_driver(fsl_lpspi_driver);
1031
1032 MODULE_DESCRIPTION("LPSPI Controller driver");
1033 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1034 MODULE_LICENSE("GPL");
1035